blob: 225ef6d7e9492b9022dce4e593a86d26eb190890 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason0f827312007-10-15 16:18:56 -04002/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
Chris Mason0f827312007-10-15 16:18:56 -04004 */
5
Li Zefan18077bb2012-07-09 20:22:35 -06006#include <asm/unaligned.h>
Chris Masond352ac62008-09-29 15:18:18 -04007
Li Zefan18077bb2012-07-09 20:22:35 -06008#include "ctree.h"
9
10static inline u8 get_unaligned_le8(const void *p)
11{
12 return *(u8 *)p;
13}
14
15static inline void put_unaligned_le8(u8 val, void *p)
16{
17 *(u8 *)p = val;
18}
19
David Sterba5e394682020-04-30 23:38:11 +020020static bool check_setget_bounds(const struct extent_buffer *eb,
21 const void *ptr, unsigned off, int size)
22{
23 const unsigned long member_offset = (unsigned long)ptr + off;
24
25 if (member_offset > eb->len) {
26 btrfs_warn(eb->fs_info,
27 "bad eb member start: ptr 0x%lx start %llu member offset %lu size %d",
28 (unsigned long)ptr, eb->start, member_offset, size);
29 return false;
30 }
31 if (member_offset + size > eb->len) {
32 btrfs_warn(eb->fs_info,
33 "bad eb member end: ptr 0x%lx start %llu member offset %lu size %d",
34 (unsigned long)ptr, eb->start, member_offset, size);
35 return false;
36 }
37
38 return true;
39}
40
Li Zefan18077bb2012-07-09 20:22:35 -060041/*
42 * this is some deeply nasty code.
Chris Masond352ac62008-09-29 15:18:18 -040043 *
44 * The end result is that anyone who #includes ctree.h gets a
Li Zefan18077bb2012-07-09 20:22:35 -060045 * declaration for the btrfs_set_foo functions and btrfs_foo functions,
Nicholas D Steeves01327612016-05-19 21:18:45 -040046 * which are wrappers of btrfs_set_token_#bits functions and
Li Zefan18077bb2012-07-09 20:22:35 -060047 * btrfs_get_token_#bits functions, which are defined in this file.
Chris Masond352ac62008-09-29 15:18:18 -040048 *
49 * These setget functions do all the extent_buffer related mapping
50 * required to efficiently read and write specific fields in the extent
51 * buffers. Every pointer to metadata items in btrfs is really just
52 * an unsigned long offset into the extent buffer which has been
53 * cast to a specific type. This gives us all the gcc type checking.
54 *
Li Zefan18077bb2012-07-09 20:22:35 -060055 * The extent buffer api is used to do the page spanning work required to
56 * have a metadata blocksize different from the page size.
David Sterbacb495112019-08-09 17:12:38 +020057 *
58 * There are 2 variants defined, one with a token pointer and one without.
Chris Masond352ac62008-09-29 15:18:18 -040059 */
60
Li Zefan18077bb2012-07-09 20:22:35 -060061#define DEFINE_BTRFS_SETGET_BITS(bits) \
David Sterbacc4c13d2020-04-29 02:15:56 +020062u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
63 const void *ptr, unsigned long off) \
Chris Mason0f827312007-10-15 16:18:56 -040064{ \
David Sterba8f9da812020-04-29 17:45:33 +020065 const unsigned long member_offset = (unsigned long)ptr + off; \
66 const unsigned long idx = member_offset >> PAGE_SHIFT; \
67 const unsigned long oip = offset_in_page(member_offset); \
68 const int size = sizeof(u##bits); \
David Sterbaba8a9a02020-04-30 17:57:55 +020069 u8 lebytes[sizeof(u##bits)]; \
70 const int part = PAGE_SIZE - oip; \
Li Zefan18077bb2012-07-09 20:22:35 -060071 \
David Sterba48bc3952019-08-09 17:30:23 +020072 ASSERT(token); \
David Sterba870b3882020-04-29 19:29:04 +020073 ASSERT(token->kaddr); \
David Sterba5e394682020-04-30 23:38:11 +020074 ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
David Sterba8f9da812020-04-29 17:45:33 +020075 if (token->offset <= member_offset && \
76 member_offset + size <= token->offset + PAGE_SIZE) { \
77 return get_unaligned_le##bits(token->kaddr + oip); \
Li Zefan18077bb2012-07-09 20:22:35 -060078 } \
David Sterbaba8a9a02020-04-30 17:57:55 +020079 token->kaddr = page_address(token->eb->pages[idx]); \
80 token->offset = idx << PAGE_SHIFT; \
81 if (oip + size <= PAGE_SIZE) \
David Sterba8f9da812020-04-29 17:45:33 +020082 return get_unaligned_le##bits(token->kaddr + oip); \
David Sterbaba8a9a02020-04-30 17:57:55 +020083 \
84 memcpy(lebytes, token->kaddr + oip, part); \
David Sterba8f9da812020-04-29 17:45:33 +020085 token->kaddr = page_address(token->eb->pages[idx + 1]); \
86 token->offset = (idx + 1) << PAGE_SHIFT; \
David Sterbaba8a9a02020-04-30 17:57:55 +020087 memcpy(lebytes + part, token->kaddr, size - part); \
88 return get_unaligned_le##bits(lebytes); \
Chris Mason0f827312007-10-15 16:18:56 -040089} \
David Sterbacb495112019-08-09 17:12:38 +020090u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
91 const void *ptr, unsigned long off) \
92{ \
David Sterba1441ed92020-04-29 16:04:44 +020093 const unsigned long member_offset = (unsigned long)ptr + off; \
94 const unsigned long oip = offset_in_page(member_offset); \
David Sterba84da0712020-04-30 17:57:55 +020095 const unsigned long idx = member_offset >> PAGE_SHIFT; \
96 char *kaddr = page_address(eb->pages[idx]); \
David Sterba1441ed92020-04-29 16:04:44 +020097 const int size = sizeof(u##bits); \
David Sterba84da0712020-04-30 17:57:55 +020098 const int part = PAGE_SIZE - oip; \
99 u8 lebytes[sizeof(u##bits)]; \
David Sterbacb495112019-08-09 17:12:38 +0200100 \
David Sterba5e394682020-04-30 23:38:11 +0200101 ASSERT(check_setget_bounds(eb, ptr, off, size)); \
David Sterba84da0712020-04-30 17:57:55 +0200102 if (oip + size <= PAGE_SIZE) \
David Sterba1441ed92020-04-29 16:04:44 +0200103 return get_unaligned_le##bits(kaddr + oip); \
David Sterba84da0712020-04-30 17:57:55 +0200104 \
105 memcpy(lebytes, kaddr + oip, part); \
106 kaddr = page_address(eb->pages[idx + 1]); \
107 memcpy(lebytes + part, kaddr, size - part); \
108 return get_unaligned_le##bits(lebytes); \
David Sterbacb495112019-08-09 17:12:38 +0200109} \
David Sterbacc4c13d2020-04-29 02:15:56 +0200110void btrfs_set_token_##bits(struct btrfs_map_token *token, \
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600111 const void *ptr, unsigned long off, \
David Sterbacc4c13d2020-04-29 02:15:56 +0200112 u##bits val) \
Chris Mason0f827312007-10-15 16:18:56 -0400113{ \
David Sterbace7afe82020-04-29 18:23:37 +0200114 const unsigned long member_offset = (unsigned long)ptr + off; \
115 const unsigned long idx = member_offset >> PAGE_SHIFT; \
116 const unsigned long oip = offset_in_page(member_offset); \
117 const int size = sizeof(u##bits); \
David Sterbaf472d3c2020-04-30 17:57:55 +0200118 u8 lebytes[sizeof(u##bits)]; \
119 const int part = PAGE_SIZE - oip; \
Li Zefan18077bb2012-07-09 20:22:35 -0600120 \
David Sterba48bc3952019-08-09 17:30:23 +0200121 ASSERT(token); \
David Sterba870b3882020-04-29 19:29:04 +0200122 ASSERT(token->kaddr); \
David Sterba5e394682020-04-30 23:38:11 +0200123 ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
David Sterbace7afe82020-04-29 18:23:37 +0200124 if (token->offset <= member_offset && \
125 member_offset + size <= token->offset + PAGE_SIZE) { \
126 put_unaligned_le##bits(val, token->kaddr + oip); \
Li Zefan18077bb2012-07-09 20:22:35 -0600127 return; \
128 } \
David Sterbaf472d3c2020-04-30 17:57:55 +0200129 token->kaddr = page_address(token->eb->pages[idx]); \
130 token->offset = idx << PAGE_SHIFT; \
David Sterbace7afe82020-04-29 18:23:37 +0200131 if (oip + size <= PAGE_SIZE) { \
David Sterbace7afe82020-04-29 18:23:37 +0200132 put_unaligned_le##bits(val, token->kaddr + oip); \
Li Zefan18077bb2012-07-09 20:22:35 -0600133 return; \
134 } \
David Sterbaf472d3c2020-04-30 17:57:55 +0200135 put_unaligned_le##bits(val, lebytes); \
136 memcpy(token->kaddr + oip, lebytes, part); \
David Sterbace7afe82020-04-29 18:23:37 +0200137 token->kaddr = page_address(token->eb->pages[idx + 1]); \
138 token->offset = (idx + 1) << PAGE_SHIFT; \
David Sterbaf472d3c2020-04-30 17:57:55 +0200139 memcpy(token->kaddr, lebytes + part, size - part); \
David Sterbacb495112019-08-09 17:12:38 +0200140} \
David Sterba2b489662020-04-29 03:04:10 +0200141void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
David Sterbacb495112019-08-09 17:12:38 +0200142 unsigned long off, u##bits val) \
143{ \
David Sterba029e4a42020-04-29 18:07:04 +0200144 const unsigned long member_offset = (unsigned long)ptr + off; \
145 const unsigned long oip = offset_in_page(member_offset); \
David Sterbaf4ca8c52020-04-30 17:57:55 +0200146 const unsigned long idx = member_offset >> PAGE_SHIFT; \
147 char *kaddr = page_address(eb->pages[idx]); \
David Sterba029e4a42020-04-29 18:07:04 +0200148 const int size = sizeof(u##bits); \
David Sterbaf4ca8c52020-04-30 17:57:55 +0200149 const int part = PAGE_SIZE - oip; \
150 u8 lebytes[sizeof(u##bits)]; \
David Sterbacb495112019-08-09 17:12:38 +0200151 \
David Sterba5e394682020-04-30 23:38:11 +0200152 ASSERT(check_setget_bounds(eb, ptr, off, size)); \
David Sterba029e4a42020-04-29 18:07:04 +0200153 if (oip + size <= PAGE_SIZE) { \
David Sterba029e4a42020-04-29 18:07:04 +0200154 put_unaligned_le##bits(val, kaddr + oip); \
David Sterbacb495112019-08-09 17:12:38 +0200155 return; \
156 } \
David Sterbaf4ca8c52020-04-30 17:57:55 +0200157 \
158 put_unaligned_le##bits(val, lebytes); \
159 memcpy(kaddr + oip, lebytes, part); \
160 kaddr = page_address(eb->pages[idx + 1]); \
161 memcpy(kaddr, lebytes + part, size - part); \
Li Zefan18077bb2012-07-09 20:22:35 -0600162}
Chris Mason0f827312007-10-15 16:18:56 -0400163
Li Zefan18077bb2012-07-09 20:22:35 -0600164DEFINE_BTRFS_SETGET_BITS(8)
165DEFINE_BTRFS_SETGET_BITS(16)
166DEFINE_BTRFS_SETGET_BITS(32)
167DEFINE_BTRFS_SETGET_BITS(64)
Chris Mason0f827312007-10-15 16:18:56 -0400168
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600169void btrfs_node_key(const struct extent_buffer *eb,
Chris Masone644d022007-11-06 15:09:29 -0500170 struct btrfs_disk_key *disk_key, int nr)
171{
172 unsigned long ptr = btrfs_node_key_ptr_offset(nr);
Chris Masone644d022007-11-06 15:09:29 -0500173 read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
174 struct btrfs_key_ptr, key, disk_key);
175}