blob: e6d2bd0194444b82dc14f711f24327f4d087c272 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason0f827312007-10-15 16:18:56 -04002/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
Chris Mason0f827312007-10-15 16:18:56 -04004 */
5
Li Zefan18077bb2012-07-09 20:22:35 -06006#include <asm/unaligned.h>
Chris Masond352ac62008-09-29 15:18:18 -04007
Li Zefan18077bb2012-07-09 20:22:35 -06008#include "ctree.h"
9
10static inline u8 get_unaligned_le8(const void *p)
11{
12 return *(u8 *)p;
13}
14
15static inline void put_unaligned_le8(u8 val, void *p)
16{
17 *(u8 *)p = val;
18}
19
David Sterba5e394682020-04-30 23:38:11 +020020static bool check_setget_bounds(const struct extent_buffer *eb,
21 const void *ptr, unsigned off, int size)
22{
23 const unsigned long member_offset = (unsigned long)ptr + off;
24
25 if (member_offset > eb->len) {
26 btrfs_warn(eb->fs_info,
27 "bad eb member start: ptr 0x%lx start %llu member offset %lu size %d",
28 (unsigned long)ptr, eb->start, member_offset, size);
29 return false;
30 }
31 if (member_offset + size > eb->len) {
32 btrfs_warn(eb->fs_info,
33 "bad eb member end: ptr 0x%lx start %llu member offset %lu size %d",
34 (unsigned long)ptr, eb->start, member_offset, size);
35 return false;
36 }
37
38 return true;
39}
40
Li Zefan18077bb2012-07-09 20:22:35 -060041/*
42 * this is some deeply nasty code.
Chris Masond352ac62008-09-29 15:18:18 -040043 *
44 * The end result is that anyone who #includes ctree.h gets a
Li Zefan18077bb2012-07-09 20:22:35 -060045 * declaration for the btrfs_set_foo functions and btrfs_foo functions,
Nicholas D Steeves01327612016-05-19 21:18:45 -040046 * which are wrappers of btrfs_set_token_#bits functions and
Li Zefan18077bb2012-07-09 20:22:35 -060047 * btrfs_get_token_#bits functions, which are defined in this file.
Chris Masond352ac62008-09-29 15:18:18 -040048 *
49 * These setget functions do all the extent_buffer related mapping
50 * required to efficiently read and write specific fields in the extent
51 * buffers. Every pointer to metadata items in btrfs is really just
52 * an unsigned long offset into the extent buffer which has been
53 * cast to a specific type. This gives us all the gcc type checking.
54 *
Li Zefan18077bb2012-07-09 20:22:35 -060055 * The extent buffer api is used to do the page spanning work required to
56 * have a metadata blocksize different from the page size.
David Sterbacb495112019-08-09 17:12:38 +020057 *
58 * There are 2 variants defined, one with a token pointer and one without.
Chris Masond352ac62008-09-29 15:18:18 -040059 */
60
Li Zefan18077bb2012-07-09 20:22:35 -060061#define DEFINE_BTRFS_SETGET_BITS(bits) \
David Sterbacc4c13d2020-04-29 02:15:56 +020062u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
63 const void *ptr, unsigned long off) \
Chris Mason0f827312007-10-15 16:18:56 -040064{ \
Li Zefan18077bb2012-07-09 20:22:35 -060065 unsigned long part_offset = (unsigned long)ptr; \
66 unsigned long offset = part_offset + off; \
67 void *p; \
68 int err; \
69 char *kaddr; \
70 unsigned long map_start; \
71 unsigned long map_len; \
72 int size = sizeof(u##bits); \
73 u##bits res; \
74 \
David Sterba48bc3952019-08-09 17:30:23 +020075 ASSERT(token); \
David Sterba870b3882020-04-29 19:29:04 +020076 ASSERT(token->kaddr); \
David Sterba5e394682020-04-30 23:38:11 +020077 ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
David Sterba870b3882020-04-29 19:29:04 +020078 if (token->offset <= offset && \
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +030079 (token->offset + PAGE_SIZE >= offset + size)) { \
Li Zefan18077bb2012-07-09 20:22:35 -060080 kaddr = token->kaddr; \
81 p = kaddr + part_offset - token->offset; \
82 res = get_unaligned_le##bits(p + off); \
83 return res; \
84 } \
David Sterba4dae6662020-04-29 02:13:57 +020085 err = map_private_extent_buffer(token->eb, offset, size, \
Li Zefan18077bb2012-07-09 20:22:35 -060086 &kaddr, &map_start, &map_len); \
87 if (err) { \
88 __le##bits leres; \
89 \
David Sterba4dae6662020-04-29 02:13:57 +020090 read_extent_buffer(token->eb, &leres, offset, size); \
Li Zefan18077bb2012-07-09 20:22:35 -060091 return le##bits##_to_cpu(leres); \
92 } \
93 p = kaddr + part_offset - map_start; \
94 res = get_unaligned_le##bits(p + off); \
David Sterba48bc3952019-08-09 17:30:23 +020095 token->kaddr = kaddr; \
96 token->offset = map_start; \
Li Zefan18077bb2012-07-09 20:22:35 -060097 return res; \
Chris Mason0f827312007-10-15 16:18:56 -040098} \
David Sterbacb495112019-08-09 17:12:38 +020099u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
100 const void *ptr, unsigned long off) \
101{ \
David Sterba1441ed92020-04-29 16:04:44 +0200102 const unsigned long member_offset = (unsigned long)ptr + off; \
103 const unsigned long oip = offset_in_page(member_offset); \
104 const int size = sizeof(u##bits); \
105 __le##bits leres; \
David Sterbacb495112019-08-09 17:12:38 +0200106 \
David Sterba5e394682020-04-30 23:38:11 +0200107 ASSERT(check_setget_bounds(eb, ptr, off, size)); \
David Sterba1441ed92020-04-29 16:04:44 +0200108 if (oip + size <= PAGE_SIZE) { \
109 const unsigned long idx = member_offset >> PAGE_SHIFT; \
110 const char *kaddr = page_address(eb->pages[idx]); \
111 return get_unaligned_le##bits(kaddr + oip); \
David Sterbacb495112019-08-09 17:12:38 +0200112 } \
David Sterba1441ed92020-04-29 16:04:44 +0200113 read_extent_buffer(eb, &leres, member_offset, size); \
114 return le##bits##_to_cpu(leres); \
David Sterbacb495112019-08-09 17:12:38 +0200115} \
David Sterbacc4c13d2020-04-29 02:15:56 +0200116void btrfs_set_token_##bits(struct btrfs_map_token *token, \
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600117 const void *ptr, unsigned long off, \
David Sterbacc4c13d2020-04-29 02:15:56 +0200118 u##bits val) \
Chris Mason0f827312007-10-15 16:18:56 -0400119{ \
Li Zefan18077bb2012-07-09 20:22:35 -0600120 unsigned long part_offset = (unsigned long)ptr; \
121 unsigned long offset = part_offset + off; \
122 void *p; \
123 int err; \
124 char *kaddr; \
125 unsigned long map_start; \
126 unsigned long map_len; \
127 int size = sizeof(u##bits); \
128 \
David Sterba48bc3952019-08-09 17:30:23 +0200129 ASSERT(token); \
David Sterba870b3882020-04-29 19:29:04 +0200130 ASSERT(token->kaddr); \
David Sterba5e394682020-04-30 23:38:11 +0200131 ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
David Sterba870b3882020-04-29 19:29:04 +0200132 if (token->offset <= offset && \
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300133 (token->offset + PAGE_SIZE >= offset + size)) { \
Li Zefan18077bb2012-07-09 20:22:35 -0600134 kaddr = token->kaddr; \
135 p = kaddr + part_offset - token->offset; \
136 put_unaligned_le##bits(val, p + off); \
137 return; \
138 } \
David Sterba4dae6662020-04-29 02:13:57 +0200139 err = map_private_extent_buffer(token->eb, offset, size, \
Li Zefan18077bb2012-07-09 20:22:35 -0600140 &kaddr, &map_start, &map_len); \
141 if (err) { \
142 __le##bits val2; \
143 \
144 val2 = cpu_to_le##bits(val); \
David Sterba4dae6662020-04-29 02:13:57 +0200145 write_extent_buffer(token->eb, &val2, offset, size); \
Li Zefan18077bb2012-07-09 20:22:35 -0600146 return; \
147 } \
148 p = kaddr + part_offset - map_start; \
149 put_unaligned_le##bits(val, p + off); \
David Sterba48bc3952019-08-09 17:30:23 +0200150 token->kaddr = kaddr; \
151 token->offset = map_start; \
David Sterbacb495112019-08-09 17:12:38 +0200152} \
153void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \
154 unsigned long off, u##bits val) \
155{ \
156 unsigned long part_offset = (unsigned long)ptr; \
157 unsigned long offset = part_offset + off; \
158 void *p; \
159 int err; \
160 char *kaddr; \
161 unsigned long map_start; \
162 unsigned long map_len; \
163 int size = sizeof(u##bits); \
164 \
David Sterba5e394682020-04-30 23:38:11 +0200165 ASSERT(check_setget_bounds(eb, ptr, off, size)); \
David Sterbacb495112019-08-09 17:12:38 +0200166 err = map_private_extent_buffer(eb, offset, size, \
167 &kaddr, &map_start, &map_len); \
168 if (err) { \
169 __le##bits val2; \
170 \
171 val2 = cpu_to_le##bits(val); \
172 write_extent_buffer(eb, &val2, offset, size); \
173 return; \
174 } \
175 p = kaddr + part_offset - map_start; \
176 put_unaligned_le##bits(val, p + off); \
Li Zefan18077bb2012-07-09 20:22:35 -0600177}
Chris Mason0f827312007-10-15 16:18:56 -0400178
Li Zefan18077bb2012-07-09 20:22:35 -0600179DEFINE_BTRFS_SETGET_BITS(8)
180DEFINE_BTRFS_SETGET_BITS(16)
181DEFINE_BTRFS_SETGET_BITS(32)
182DEFINE_BTRFS_SETGET_BITS(64)
Chris Mason0f827312007-10-15 16:18:56 -0400183
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600184void btrfs_node_key(const struct extent_buffer *eb,
Chris Masone644d022007-11-06 15:09:29 -0500185 struct btrfs_disk_key *disk_key, int nr)
186{
187 unsigned long ptr = btrfs_node_key_ptr_offset(nr);
Chris Masone644d022007-11-06 15:09:29 -0500188 read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
189 struct btrfs_key_ptr, key, disk_key);
190}