blob: c46be27be700e7e95d2422c662141e249eea4240 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason0f827312007-10-15 16:18:56 -04002/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
Chris Mason0f827312007-10-15 16:18:56 -04004 */
5
Li Zefan18077bb2012-07-09 20:22:35 -06006#include <asm/unaligned.h>
Chris Masond352ac62008-09-29 15:18:18 -04007
Li Zefan18077bb2012-07-09 20:22:35 -06008#include "ctree.h"
9
David Sterba5e394682020-04-30 23:38:11 +020010static bool check_setget_bounds(const struct extent_buffer *eb,
11 const void *ptr, unsigned off, int size)
12{
13 const unsigned long member_offset = (unsigned long)ptr + off;
14
15 if (member_offset > eb->len) {
16 btrfs_warn(eb->fs_info,
17 "bad eb member start: ptr 0x%lx start %llu member offset %lu size %d",
18 (unsigned long)ptr, eb->start, member_offset, size);
19 return false;
20 }
21 if (member_offset + size > eb->len) {
22 btrfs_warn(eb->fs_info,
23 "bad eb member end: ptr 0x%lx start %llu member offset %lu size %d",
24 (unsigned long)ptr, eb->start, member_offset, size);
25 return false;
26 }
27
28 return true;
29}
30
Li Zefan18077bb2012-07-09 20:22:35 -060031/*
David Sterba583e4a22020-05-06 20:54:13 +020032 * Macro templates that define helpers to read/write extent buffer data of a
33 * given size, that are also used via ctree.h for access to item members by
34 * specialized helpers.
Chris Masond352ac62008-09-29 15:18:18 -040035 *
David Sterba583e4a22020-05-06 20:54:13 +020036 * Generic helpers:
37 * - btrfs_set_8 (for 8/16/32/64)
38 * - btrfs_get_8 (for 8/16/32/64)
Chris Masond352ac62008-09-29 15:18:18 -040039 *
David Sterba583e4a22020-05-06 20:54:13 +020040 * Generic helpers with a token (cached address of the most recently accessed
41 * page):
42 * - btrfs_set_token_8 (for 8/16/32/64)
43 * - btrfs_get_token_8 (for 8/16/32/64)
Chris Masond352ac62008-09-29 15:18:18 -040044 *
David Sterba583e4a22020-05-06 20:54:13 +020045 * The set/get functions handle data spanning two pages transparently, in case
46 * metadata block size is larger than page. Every pointer to metadata items is
47 * an offset into the extent buffer page array, cast to a specific type. This
48 * gives us all the type checking.
David Sterbacb495112019-08-09 17:12:38 +020049 *
David Sterba583e4a22020-05-06 20:54:13 +020050 * The extent buffer pages stored in the array pages do not form a contiguous
51 * phyusical range, but the API functions assume the linear offset to the range
52 * from 0 to metadata node size.
Chris Masond352ac62008-09-29 15:18:18 -040053 */
54
Li Zefan18077bb2012-07-09 20:22:35 -060055#define DEFINE_BTRFS_SETGET_BITS(bits) \
David Sterbacc4c13d2020-04-29 02:15:56 +020056u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
57 const void *ptr, unsigned long off) \
Chris Mason0f827312007-10-15 16:18:56 -040058{ \
David Sterba8f9da812020-04-29 17:45:33 +020059 const unsigned long member_offset = (unsigned long)ptr + off; \
60 const unsigned long idx = member_offset >> PAGE_SHIFT; \
61 const unsigned long oip = offset_in_page(member_offset); \
62 const int size = sizeof(u##bits); \
David Sterbaba8a9a02020-04-30 17:57:55 +020063 u8 lebytes[sizeof(u##bits)]; \
64 const int part = PAGE_SIZE - oip; \
Li Zefan18077bb2012-07-09 20:22:35 -060065 \
David Sterba48bc3952019-08-09 17:30:23 +020066 ASSERT(token); \
David Sterba870b3882020-04-29 19:29:04 +020067 ASSERT(token->kaddr); \
David Sterba5e394682020-04-30 23:38:11 +020068 ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
David Sterba8f9da812020-04-29 17:45:33 +020069 if (token->offset <= member_offset && \
70 member_offset + size <= token->offset + PAGE_SIZE) { \
71 return get_unaligned_le##bits(token->kaddr + oip); \
Li Zefan18077bb2012-07-09 20:22:35 -060072 } \
David Sterbaba8a9a02020-04-30 17:57:55 +020073 token->kaddr = page_address(token->eb->pages[idx]); \
74 token->offset = idx << PAGE_SHIFT; \
75 if (oip + size <= PAGE_SIZE) \
David Sterba8f9da812020-04-29 17:45:33 +020076 return get_unaligned_le##bits(token->kaddr + oip); \
David Sterbaba8a9a02020-04-30 17:57:55 +020077 \
78 memcpy(lebytes, token->kaddr + oip, part); \
David Sterba8f9da812020-04-29 17:45:33 +020079 token->kaddr = page_address(token->eb->pages[idx + 1]); \
80 token->offset = (idx + 1) << PAGE_SHIFT; \
David Sterbaba8a9a02020-04-30 17:57:55 +020081 memcpy(lebytes + part, token->kaddr, size - part); \
82 return get_unaligned_le##bits(lebytes); \
Chris Mason0f827312007-10-15 16:18:56 -040083} \
David Sterbacb495112019-08-09 17:12:38 +020084u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
85 const void *ptr, unsigned long off) \
86{ \
David Sterba1441ed92020-04-29 16:04:44 +020087 const unsigned long member_offset = (unsigned long)ptr + off; \
88 const unsigned long oip = offset_in_page(member_offset); \
David Sterba84da0712020-04-30 17:57:55 +020089 const unsigned long idx = member_offset >> PAGE_SHIFT; \
90 char *kaddr = page_address(eb->pages[idx]); \
David Sterba1441ed92020-04-29 16:04:44 +020091 const int size = sizeof(u##bits); \
David Sterba84da0712020-04-30 17:57:55 +020092 const int part = PAGE_SIZE - oip; \
93 u8 lebytes[sizeof(u##bits)]; \
David Sterbacb495112019-08-09 17:12:38 +020094 \
David Sterba5e394682020-04-30 23:38:11 +020095 ASSERT(check_setget_bounds(eb, ptr, off, size)); \
David Sterba84da0712020-04-30 17:57:55 +020096 if (oip + size <= PAGE_SIZE) \
David Sterba1441ed92020-04-29 16:04:44 +020097 return get_unaligned_le##bits(kaddr + oip); \
David Sterba84da0712020-04-30 17:57:55 +020098 \
99 memcpy(lebytes, kaddr + oip, part); \
100 kaddr = page_address(eb->pages[idx + 1]); \
101 memcpy(lebytes + part, kaddr, size - part); \
102 return get_unaligned_le##bits(lebytes); \
David Sterbacb495112019-08-09 17:12:38 +0200103} \
David Sterbacc4c13d2020-04-29 02:15:56 +0200104void btrfs_set_token_##bits(struct btrfs_map_token *token, \
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600105 const void *ptr, unsigned long off, \
David Sterbacc4c13d2020-04-29 02:15:56 +0200106 u##bits val) \
Chris Mason0f827312007-10-15 16:18:56 -0400107{ \
David Sterbace7afe82020-04-29 18:23:37 +0200108 const unsigned long member_offset = (unsigned long)ptr + off; \
109 const unsigned long idx = member_offset >> PAGE_SHIFT; \
110 const unsigned long oip = offset_in_page(member_offset); \
111 const int size = sizeof(u##bits); \
David Sterbaf472d3c2020-04-30 17:57:55 +0200112 u8 lebytes[sizeof(u##bits)]; \
113 const int part = PAGE_SIZE - oip; \
Li Zefan18077bb2012-07-09 20:22:35 -0600114 \
David Sterba48bc3952019-08-09 17:30:23 +0200115 ASSERT(token); \
David Sterba870b3882020-04-29 19:29:04 +0200116 ASSERT(token->kaddr); \
David Sterba5e394682020-04-30 23:38:11 +0200117 ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
David Sterbace7afe82020-04-29 18:23:37 +0200118 if (token->offset <= member_offset && \
119 member_offset + size <= token->offset + PAGE_SIZE) { \
120 put_unaligned_le##bits(val, token->kaddr + oip); \
Li Zefan18077bb2012-07-09 20:22:35 -0600121 return; \
122 } \
David Sterbaf472d3c2020-04-30 17:57:55 +0200123 token->kaddr = page_address(token->eb->pages[idx]); \
124 token->offset = idx << PAGE_SHIFT; \
David Sterbace7afe82020-04-29 18:23:37 +0200125 if (oip + size <= PAGE_SIZE) { \
David Sterbace7afe82020-04-29 18:23:37 +0200126 put_unaligned_le##bits(val, token->kaddr + oip); \
Li Zefan18077bb2012-07-09 20:22:35 -0600127 return; \
128 } \
David Sterbaf472d3c2020-04-30 17:57:55 +0200129 put_unaligned_le##bits(val, lebytes); \
130 memcpy(token->kaddr + oip, lebytes, part); \
David Sterbace7afe82020-04-29 18:23:37 +0200131 token->kaddr = page_address(token->eb->pages[idx + 1]); \
132 token->offset = (idx + 1) << PAGE_SHIFT; \
David Sterbaf472d3c2020-04-30 17:57:55 +0200133 memcpy(token->kaddr, lebytes + part, size - part); \
David Sterbacb495112019-08-09 17:12:38 +0200134} \
David Sterba2b489662020-04-29 03:04:10 +0200135void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
David Sterbacb495112019-08-09 17:12:38 +0200136 unsigned long off, u##bits val) \
137{ \
David Sterba029e4a42020-04-29 18:07:04 +0200138 const unsigned long member_offset = (unsigned long)ptr + off; \
139 const unsigned long oip = offset_in_page(member_offset); \
David Sterbaf4ca8c52020-04-30 17:57:55 +0200140 const unsigned long idx = member_offset >> PAGE_SHIFT; \
141 char *kaddr = page_address(eb->pages[idx]); \
David Sterba029e4a42020-04-29 18:07:04 +0200142 const int size = sizeof(u##bits); \
David Sterbaf4ca8c52020-04-30 17:57:55 +0200143 const int part = PAGE_SIZE - oip; \
144 u8 lebytes[sizeof(u##bits)]; \
David Sterbacb495112019-08-09 17:12:38 +0200145 \
David Sterba5e394682020-04-30 23:38:11 +0200146 ASSERT(check_setget_bounds(eb, ptr, off, size)); \
David Sterba029e4a42020-04-29 18:07:04 +0200147 if (oip + size <= PAGE_SIZE) { \
David Sterba029e4a42020-04-29 18:07:04 +0200148 put_unaligned_le##bits(val, kaddr + oip); \
David Sterbacb495112019-08-09 17:12:38 +0200149 return; \
150 } \
David Sterbaf4ca8c52020-04-30 17:57:55 +0200151 \
152 put_unaligned_le##bits(val, lebytes); \
153 memcpy(kaddr + oip, lebytes, part); \
154 kaddr = page_address(eb->pages[idx + 1]); \
155 memcpy(kaddr, lebytes + part, size - part); \
Li Zefan18077bb2012-07-09 20:22:35 -0600156}
Chris Mason0f827312007-10-15 16:18:56 -0400157
Li Zefan18077bb2012-07-09 20:22:35 -0600158DEFINE_BTRFS_SETGET_BITS(8)
159DEFINE_BTRFS_SETGET_BITS(16)
160DEFINE_BTRFS_SETGET_BITS(32)
161DEFINE_BTRFS_SETGET_BITS(64)
Chris Mason0f827312007-10-15 16:18:56 -0400162
Jeff Mahoney1cbb1f42017-06-28 21:56:53 -0600163void btrfs_node_key(const struct extent_buffer *eb,
Chris Masone644d022007-11-06 15:09:29 -0500164 struct btrfs_disk_key *disk_key, int nr)
165{
166 unsigned long ptr = btrfs_node_key_ptr_offset(nr);
Chris Masone644d022007-11-06 15:09:29 -0500167 read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
168 struct btrfs_key_ptr, key, disk_key);
169}