blob: 079b059818e9a5557ef4203f3d953d8116e1343a [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason0f827312007-10-15 16:18:56 -04002/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
Chris Mason0f827312007-10-15 16:18:56 -04004 */
5
Li Zefan18077bb2012-07-09 20:22:35 -06006#include <asm/unaligned.h>
Chris Masond352ac62008-09-29 15:18:18 -04007
Li Zefan18077bb2012-07-09 20:22:35 -06008#include "ctree.h"
9
10static inline u8 get_unaligned_le8(const void *p)
11{
12 return *(u8 *)p;
13}
14
15static inline void put_unaligned_le8(u8 val, void *p)
16{
17 *(u8 *)p = val;
18}
19
David Sterba5e394682020-04-30 23:38:11 +020020static bool check_setget_bounds(const struct extent_buffer *eb,
21 const void *ptr, unsigned off, int size)
22{
23 const unsigned long member_offset = (unsigned long)ptr + off;
24
25 if (member_offset > eb->len) {
26 btrfs_warn(eb->fs_info,
27 "bad eb member start: ptr 0x%lx start %llu member offset %lu size %d",
28 (unsigned long)ptr, eb->start, member_offset, size);
29 return false;
30 }
31 if (member_offset + size > eb->len) {
32 btrfs_warn(eb->fs_info,
33 "bad eb member end: ptr 0x%lx start %llu member offset %lu size %d",
34 (unsigned long)ptr, eb->start, member_offset, size);
35 return false;
36 }
37
38 return true;
39}
40
Li Zefan18077bb2012-07-09 20:22:35 -060041/*
David Sterba583e4a22020-05-06 20:54:13 +020042 * Macro templates that define helpers to read/write extent buffer data of a
43 * given size, that are also used via ctree.h for access to item members by
44 * specialized helpers.
Chris Masond352ac62008-09-29 15:18:18 -040045 *
David Sterba583e4a22020-05-06 20:54:13 +020046 * Generic helpers:
47 * - btrfs_set_8 (for 8/16/32/64)
48 * - btrfs_get_8 (for 8/16/32/64)
Chris Masond352ac62008-09-29 15:18:18 -040049 *
David Sterba583e4a22020-05-06 20:54:13 +020050 * Generic helpers with a token (cached address of the most recently accessed
51 * page):
52 * - btrfs_set_token_8 (for 8/16/32/64)
53 * - btrfs_get_token_8 (for 8/16/32/64)
Chris Masond352ac62008-09-29 15:18:18 -040054 *
David Sterba583e4a22020-05-06 20:54:13 +020055 * The set/get functions handle data spanning two pages transparently, in case
56 * metadata block size is larger than page. Every pointer to metadata items is
57 * an offset into the extent buffer page array, cast to a specific type. This
58 * gives us all the type checking.
David Sterbacb495112019-08-09 17:12:38 +020059 *
David Sterba583e4a22020-05-06 20:54:13 +020060 * The extent buffer pages stored in the array pages do not form a contiguous
61 * phyusical range, but the API functions assume the linear offset to the range
62 * from 0 to metadata node size.
Chris Masond352ac62008-09-29 15:18:18 -040063 */
64
Li Zefan18077bb2012-07-09 20:22:35 -060065#define DEFINE_BTRFS_SETGET_BITS(bits) \
David Sterbacc4c13d2020-04-29 02:15:56 +020066u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
67 const void *ptr, unsigned long off) \
Chris Mason0f827312007-10-15 16:18:56 -040068{ \
David Sterba8f9da812020-04-29 17:45:33 +020069 const unsigned long member_offset = (unsigned long)ptr + off; \
70 const unsigned long idx = member_offset >> PAGE_SHIFT; \
71 const unsigned long oip = offset_in_page(member_offset); \
72 const int size = sizeof(u##bits); \
David Sterbaba8a9a02020-04-30 17:57:55 +020073 u8 lebytes[sizeof(u##bits)]; \
74 const int part = PAGE_SIZE - oip; \
Li Zefan18077bb2012-07-09 20:22:35 -060075 \
David Sterba48bc3952019-08-09 17:30:23 +020076 ASSERT(token); \
David Sterba870b3882020-04-29 19:29:04 +020077 ASSERT(token->kaddr); \
David Sterba5e394682020-04-30 23:38:11 +020078 ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
David Sterba8f9da812020-04-29 17:45:33 +020079 if (token->offset <= member_offset && \
80 member_offset + size <= token->offset + PAGE_SIZE) { \
81 return get_unaligned_le##bits(token->kaddr + oip); \
Li Zefan18077bb2012-07-09 20:22:35 -060082 } \
David Sterbaba8a9a02020-04-30 17:57:55 +020083 token->kaddr = page_address(token->eb->pages[idx]); \
84 token->offset = idx << PAGE_SHIFT; \
85 if (oip + size <= PAGE_SIZE) \
David Sterba8f9da812020-04-29 17:45:33 +020086 return get_unaligned_le##bits(token->kaddr + oip); \
David Sterbaba8a9a02020-04-30 17:57:55 +020087 \
88 memcpy(lebytes, token->kaddr + oip, part); \
David Sterba8f9da812020-04-29 17:45:33 +020089 token->kaddr = page_address(token->eb->pages[idx + 1]); \
90 token->offset = (idx + 1) << PAGE_SHIFT; \
David Sterbaba8a9a02020-04-30 17:57:55 +020091 memcpy(lebytes + part, token->kaddr, size - part); \
92 return get_unaligned_le##bits(lebytes); \
Chris Mason0f827312007-10-15 16:18:56 -040093} \
David Sterbacb495112019-08-09 17:12:38 +020094u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
95 const void *ptr, unsigned long off) \
96{ \
David Sterba1441ed92020-04-29 16:04:44 +020097 const unsigned long member_offset = (unsigned long)ptr + off; \
98 const unsigned long oip = offset_in_page(member_offset); \
David Sterba84da0712020-04-30 17:57:55 +020099 const unsigned long idx = member_offset >> PAGE_SHIFT; \
100 char *kaddr = page_address(eb->pages[idx]); \
David Sterba1441ed92020-04-29 16:04:44 +0200101 const int size = sizeof(u##bits); \
David Sterba84da0712020-04-30 17:57:55 +0200102 const int part = PAGE_SIZE - oip; \
103 u8 lebytes[sizeof(u##bits)]; \
David Sterbacb495112019-08-09 17:12:38 +0200104 \
David Sterba5e394682020-04-30 23:38:11 +0200105 ASSERT(check_setget_bounds(eb, ptr, off, size)); \
David Sterba84da0712020-04-30 17:57:55 +0200106 if (oip + size <= PAGE_SIZE) \
David Sterba1441ed92020-04-29 16:04:44 +0200107 return get_unaligned_le##bits(kaddr + oip); \
David Sterba84da0712020-04-30 17:57:55 +0200108 \
109 memcpy(lebytes, kaddr + oip, part); \
110 kaddr = page_address(eb->pages[idx + 1]); \
111 memcpy(lebytes + part, kaddr, size - part); \
112 return get_unaligned_le##bits(lebytes); \
David Sterbacb495112019-08-09 17:12:38 +0200113} \
David Sterbacc4c13d2020-04-29 02:15:56 +0200114void btrfs_set_token_##bits(struct btrfs_map_token *token, \
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600115 const void *ptr, unsigned long off, \
David Sterbacc4c13d2020-04-29 02:15:56 +0200116 u##bits val) \
Chris Mason0f827312007-10-15 16:18:56 -0400117{ \
David Sterbace7afe82020-04-29 18:23:37 +0200118 const unsigned long member_offset = (unsigned long)ptr + off; \
119 const unsigned long idx = member_offset >> PAGE_SHIFT; \
120 const unsigned long oip = offset_in_page(member_offset); \
121 const int size = sizeof(u##bits); \
David Sterbaf472d3c2020-04-30 17:57:55 +0200122 u8 lebytes[sizeof(u##bits)]; \
123 const int part = PAGE_SIZE - oip; \
Li Zefan18077bb2012-07-09 20:22:35 -0600124 \
David Sterba48bc3952019-08-09 17:30:23 +0200125 ASSERT(token); \
David Sterba870b3882020-04-29 19:29:04 +0200126 ASSERT(token->kaddr); \
David Sterba5e394682020-04-30 23:38:11 +0200127 ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
David Sterbace7afe82020-04-29 18:23:37 +0200128 if (token->offset <= member_offset && \
129 member_offset + size <= token->offset + PAGE_SIZE) { \
130 put_unaligned_le##bits(val, token->kaddr + oip); \
Li Zefan18077bb2012-07-09 20:22:35 -0600131 return; \
132 } \
David Sterbaf472d3c2020-04-30 17:57:55 +0200133 token->kaddr = page_address(token->eb->pages[idx]); \
134 token->offset = idx << PAGE_SHIFT; \
David Sterbace7afe82020-04-29 18:23:37 +0200135 if (oip + size <= PAGE_SIZE) { \
David Sterbace7afe82020-04-29 18:23:37 +0200136 put_unaligned_le##bits(val, token->kaddr + oip); \
Li Zefan18077bb2012-07-09 20:22:35 -0600137 return; \
138 } \
David Sterbaf472d3c2020-04-30 17:57:55 +0200139 put_unaligned_le##bits(val, lebytes); \
140 memcpy(token->kaddr + oip, lebytes, part); \
David Sterbace7afe82020-04-29 18:23:37 +0200141 token->kaddr = page_address(token->eb->pages[idx + 1]); \
142 token->offset = (idx + 1) << PAGE_SHIFT; \
David Sterbaf472d3c2020-04-30 17:57:55 +0200143 memcpy(token->kaddr, lebytes + part, size - part); \
David Sterbacb495112019-08-09 17:12:38 +0200144} \
David Sterba2b489662020-04-29 03:04:10 +0200145void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
David Sterbacb495112019-08-09 17:12:38 +0200146 unsigned long off, u##bits val) \
147{ \
David Sterba029e4a42020-04-29 18:07:04 +0200148 const unsigned long member_offset = (unsigned long)ptr + off; \
149 const unsigned long oip = offset_in_page(member_offset); \
David Sterbaf4ca8c52020-04-30 17:57:55 +0200150 const unsigned long idx = member_offset >> PAGE_SHIFT; \
151 char *kaddr = page_address(eb->pages[idx]); \
David Sterba029e4a42020-04-29 18:07:04 +0200152 const int size = sizeof(u##bits); \
David Sterbaf4ca8c52020-04-30 17:57:55 +0200153 const int part = PAGE_SIZE - oip; \
154 u8 lebytes[sizeof(u##bits)]; \
David Sterbacb495112019-08-09 17:12:38 +0200155 \
David Sterba5e394682020-04-30 23:38:11 +0200156 ASSERT(check_setget_bounds(eb, ptr, off, size)); \
David Sterba029e4a42020-04-29 18:07:04 +0200157 if (oip + size <= PAGE_SIZE) { \
David Sterba029e4a42020-04-29 18:07:04 +0200158 put_unaligned_le##bits(val, kaddr + oip); \
David Sterbacb495112019-08-09 17:12:38 +0200159 return; \
160 } \
David Sterbaf4ca8c52020-04-30 17:57:55 +0200161 \
162 put_unaligned_le##bits(val, lebytes); \
163 memcpy(kaddr + oip, lebytes, part); \
164 kaddr = page_address(eb->pages[idx + 1]); \
165 memcpy(kaddr, lebytes + part, size - part); \
Li Zefan18077bb2012-07-09 20:22:35 -0600166}
Chris Mason0f827312007-10-15 16:18:56 -0400167
Li Zefan18077bb2012-07-09 20:22:35 -0600168DEFINE_BTRFS_SETGET_BITS(8)
169DEFINE_BTRFS_SETGET_BITS(16)
170DEFINE_BTRFS_SETGET_BITS(32)
171DEFINE_BTRFS_SETGET_BITS(64)
Chris Mason0f827312007-10-15 16:18:56 -0400172
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600173void btrfs_node_key(const struct extent_buffer *eb,
Chris Masone644d022007-11-06 15:09:29 -0500174 struct btrfs_disk_key *disk_key, int nr)
175{
176 unsigned long ptr = btrfs_node_key_ptr_offset(nr);
Chris Masone644d022007-11-06 15:09:29 -0500177 read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
178 struct btrfs_key_ptr, key, disk_key);
179}