blob: dc1b77b92fc1756d7941f1757429e6e85a314dfc [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
6#ifndef __XFS_LOG_H__
7#define __XFS_LOG_H__
8
Christoph Hellwig89ae3792019-06-28 19:27:34 -07009struct xfs_cil_ctx;
10
Dave Chinnerfc06c6d2013-08-12 20:49:22 +100011struct xfs_log_vec {
12 struct xfs_log_vec *lv_next; /* next lv in build list */
13 int lv_niovecs; /* number of iovecs in lv */
14 struct xfs_log_iovec *lv_iovecp; /* iovec array */
15 struct xfs_log_item *lv_item; /* owner */
16 char *lv_buf; /* formatted buffer */
Dave Chinner110dc242014-05-20 08:18:09 +100017 int lv_bytes; /* accounted space in buffer */
18 int lv_buf_len; /* aligned size of buffer */
Dave Chinner7492c5b2013-08-12 20:50:05 +100019 int lv_size; /* size of allocated lv */
Dave Chinnerfc06c6d2013-08-12 20:49:22 +100020};
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Dave Chinnerfc06c6d2013-08-12 20:49:22 +100022#define XFS_LOG_VEC_ORDERED (-1)
23
Christoph Hellwig12343512013-12-13 11:00:43 +110024static inline void *
Christoph Hellwigbde7cff2013-12-13 11:34:02 +110025xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
26 uint type)
Christoph Hellwig12343512013-12-13 11:00:43 +110027{
28 struct xfs_log_iovec *vec = *vecp;
29
Christoph Hellwigbde7cff2013-12-13 11:34:02 +110030 if (vec) {
31 ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
32 vec++;
33 } else {
34 vec = &lv->lv_iovecp[0];
35 }
Christoph Hellwig12343512013-12-13 11:00:43 +110036
Christoph Hellwigbde7cff2013-12-13 11:34:02 +110037 vec->i_type = type;
38 vec->i_addr = lv->lv_buf + lv->lv_buf_len;
39
40 ASSERT(IS_ALIGNED((unsigned long)vec->i_addr, sizeof(uint64_t)));
41
42 *vecp = vec;
Christoph Hellwig12343512013-12-13 11:00:43 +110043 return vec->i_addr;
44}
45
Dave Chinner110dc242014-05-20 08:18:09 +100046/*
47 * We need to make sure the next buffer is naturally aligned for the biggest
48 * basic data type we put into it. We already accounted for this padding when
49 * sizing the buffer.
50 *
51 * However, this padding does not get written into the log, and hence we have to
52 * track the space used by the log vectors separately to prevent log space hangs
53 * due to inaccurate accounting (i.e. a leak) of the used log space through the
54 * CIL context ticket.
55 */
Christoph Hellwigbde7cff2013-12-13 11:34:02 +110056static inline void
57xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len)
58{
Christoph Hellwigbde7cff2013-12-13 11:34:02 +110059 lv->lv_buf_len += round_up(len, sizeof(uint64_t));
Dave Chinner110dc242014-05-20 08:18:09 +100060 lv->lv_bytes += len;
Christoph Hellwigbde7cff2013-12-13 11:34:02 +110061 vec->i_len = len;
62}
63
64static inline void *
65xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
66 uint type, void *data, int len)
67{
68 void *buf;
69
70 buf = xlog_prepare_iovec(lv, vecp, type);
71 memcpy(buf, data, len);
72 xlog_finish_iovec(lv, *vecp, len);
73 return buf;
74}
75
Dave Chinnerfc06c6d2013-08-12 20:49:22 +100076/*
Nathan Scottc41564b2006-03-29 08:55:14 +100077 * By comparing each component, we don't have to worry about extra
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 * endian issues in treating two 32 bit numbers as one 64 bit number
79 */
Andrew Mortona1365642006-01-08 01:04:09 -080080static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
Linus Torvalds1da177e2005-04-16 15:20:36 -070081{
82 if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2))
83 return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999;
84
85 if (BLOCK_LSN(lsn1) != BLOCK_LSN(lsn2))
86 return (BLOCK_LSN(lsn1)<BLOCK_LSN(lsn2))? -999 : 999;
87
88 return 0;
89}
90
91#define XFS_LSN_CMP(x,y) _lsn_cmp(x,y)
92
93/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 * Flags to xfs_log_force()
95 *
96 * XFS_LOG_SYNC: Synchronous force in-core log to disk
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 */
98#define XFS_LOG_SYNC 0x1
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100/* Log manager interfaces */
101struct xfs_mount;
Christoph Hellwig35a8a722010-02-15 23:34:54 +0000102struct xlog_in_core;
Dave Chinnercc09c0d2008-11-17 17:37:10 +1100103struct xlog_ticket;
Dave Chinner43f5efc2010-03-23 10:10:00 +1100104struct xfs_log_item;
105struct xfs_item_ops;
Dave Chinner955833c2010-05-14 21:41:46 +1000106struct xfs_trans;
Dave Chinner0020a192021-08-10 18:00:44 -0700107struct xlog;
Christoph Hellwig35a8a722010-02-15 23:34:54 +0000108
Christoph Hellwig60e5bb72018-03-13 23:15:28 -0700109int xfs_log_force(struct xfs_mount *mp, uint flags);
Dave Chinner5f9b4b02021-06-18 08:21:52 -0700110int xfs_log_force_seq(struct xfs_mount *mp, xfs_csn_t seq, uint flags,
Christoph Hellwig656de4f2018-03-13 23:15:28 -0700111 int *log_forced);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112int xfs_log_mount(struct xfs_mount *mp,
113 struct xfs_buftarg *log_target,
114 xfs_daddr_t start_block,
115 int num_bblocks);
Christoph Hellwig42490232008-08-13 16:49:32 +1000116int xfs_log_mount_finish(struct xfs_mount *mp);
Hariprasad Kelama7a92502019-07-03 07:34:18 -0700117void xfs_log_mount_cancel(struct xfs_mount *);
Christoph Hellwig09a423a2012-02-20 02:31:20 +0000118xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
Christoph Hellwig1c304622012-04-23 15:58:33 +1000119xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
Christoph Hellwigcfb7cdc2012-02-20 02:31:23 +0000120void xfs_log_space_wake(struct xfs_mount *mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121int xfs_log_reserve(struct xfs_mount *mp,
122 int length,
123 int count,
Christoph Hellwig35a8a722010-02-15 23:34:54 +0000124 struct xlog_ticket **ticket,
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700125 uint8_t clientid,
Christoph Hellwig710b1e22016-04-06 09:20:36 +1000126 bool permanent);
Christoph Hellwig9006fb92012-02-20 02:31:31 +0000127int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
Christoph Hellwig21b699c2009-03-16 08:19:29 +0100128void xfs_log_unmount(struct xfs_mount *mp);
Brian Foster50d25482021-01-22 16:48:20 -0800129bool xfs_log_writable(struct xfs_mount *mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Dave Chinner71e330b2010-05-21 14:37:18 +1000131struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
Dave Chinnercc09c0d2008-11-17 17:37:10 +1100132void xfs_log_ticket_put(struct xlog_ticket *ticket);
133
Christoph Hellwig12e6a0f2020-03-20 08:49:20 -0700134void xlog_cil_process_committed(struct list_head *list);
Dave Chinnerccf7c232010-05-20 23:19:42 +1000135bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
Dave Chinner71e330b2010-05-21 14:37:18 +1000136
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100137void xfs_log_work_queue(struct xfs_mount *mp);
Brian Foster303591a2021-01-22 16:48:22 -0800138int xfs_log_quiesce(struct xfs_mount *mp);
Brian Foster9e54ee02021-01-22 16:48:21 -0800139void xfs_log_clean(struct xfs_mount *mp);
Brian Fostera45086e2015-10-12 15:59:25 +1100140bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
Dave Chinnerf661f1e2012-10-08 21:56:02 +1100141
Darrick J. Wonged1575d2020-09-25 17:39:51 -0700142xfs_lsn_t xlog_grant_push_threshold(struct xlog *log, int need_bytes);
Dave Chinnerb36d4652021-08-10 18:00:39 -0700143bool xlog_force_shutdown(struct xlog *log, int shutdown_flags);
Darrick J. Wonged1575d2020-09-25 17:39:51 -0700144
Darrick J. Wong2b73a2c2021-08-08 08:27:12 -0700145void xlog_use_incompat_feat(struct xlog *log);
146void xlog_drop_incompat_feat(struct xlog *log);
147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148#endif /* __XFS_LOG_H__ */