Andrew Morton | f79e2ab | 2006-03-31 02:30:42 -0800 | [diff] [blame] | 1 | /* |
| 2 | * High-level sync()-related operations |
| 3 | */ |
| 4 | |
| 5 | #include <linux/kernel.h> |
| 6 | #include <linux/file.h> |
| 7 | #include <linux/fs.h> |
| 8 | #include <linux/module.h> |
Al Viro | 914e263 | 2006-10-18 13:55:46 -0400 | [diff] [blame] | 9 | #include <linux/sched.h> |
Andrew Morton | f79e2ab | 2006-03-31 02:30:42 -0800 | [diff] [blame] | 10 | #include <linux/writeback.h> |
| 11 | #include <linux/syscalls.h> |
| 12 | #include <linux/linkage.h> |
| 13 | #include <linux/pagemap.h> |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 14 | #include <linux/quotaops.h> |
| 15 | #include <linux/buffer_head.h> |
Jan Kara | 5a3e5cb | 2009-04-27 16:43:48 +0200 | [diff] [blame] | 16 | #include "internal.h" |
Andrew Morton | f79e2ab | 2006-03-31 02:30:42 -0800 | [diff] [blame] | 17 | |
| 18 | #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ |
| 19 | SYNC_FILE_RANGE_WAIT_AFTER) |
| 20 | |
Jan Kara | c15c54f | 2009-04-27 16:43:52 +0200 | [diff] [blame] | 21 | /* |
Jens Axboe | d8a8559 | 2009-09-02 12:34:32 +0200 | [diff] [blame] | 22 | * Do the filesystem syncing work. For simple filesystems |
| 23 | * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to |
| 24 | * submit IO for these buffers via __sync_blockdev(). This also speeds up the |
| 25 | * wait == 1 case since in that case write_inode() functions do |
| 26 | * sync_dirty_buffer() and thus effectively write one block at a time. |
Jan Kara | c15c54f | 2009-04-27 16:43:52 +0200 | [diff] [blame] | 27 | */ |
Jan Kara | 60b0680 | 2009-04-27 16:43:53 +0200 | [diff] [blame] | 28 | static int __sync_filesystem(struct super_block *sb, int wait) |
Jan Kara | c15c54f | 2009-04-27 16:43:52 +0200 | [diff] [blame] | 29 | { |
Jan Kara | c3f8a40 | 2009-04-27 16:43:55 +0200 | [diff] [blame] | 30 | /* Avoid doing twice syncing and cache pruning for quota sync */ |
Jens Axboe | d8a8559 | 2009-09-02 12:34:32 +0200 | [diff] [blame] | 31 | if (!wait) { |
Jan Kara | c3f8a40 | 2009-04-27 16:43:55 +0200 | [diff] [blame] | 32 | writeout_quota_sb(sb, -1); |
Jens Axboe | d8a8559 | 2009-09-02 12:34:32 +0200 | [diff] [blame] | 33 | writeback_inodes_sb(sb); |
| 34 | } else { |
Jan Kara | c3f8a40 | 2009-04-27 16:43:55 +0200 | [diff] [blame] | 35 | sync_quota_sb(sb, -1); |
Jens Axboe | d8a8559 | 2009-09-02 12:34:32 +0200 | [diff] [blame] | 36 | sync_inodes_sb(sb); |
| 37 | } |
Jan Kara | c15c54f | 2009-04-27 16:43:52 +0200 | [diff] [blame] | 38 | if (sb->s_op->sync_fs) |
| 39 | sb->s_op->sync_fs(sb, wait); |
| 40 | return __sync_blockdev(sb->s_bdev, wait); |
| 41 | } |
| 42 | |
| 43 | /* |
| 44 | * Write out and wait upon all dirty data associated with this |
| 45 | * superblock. Filesystem data as well as the underlying block |
| 46 | * device. Takes the superblock lock. |
| 47 | */ |
Jan Kara | 60b0680 | 2009-04-27 16:43:53 +0200 | [diff] [blame] | 48 | int sync_filesystem(struct super_block *sb) |
Jan Kara | c15c54f | 2009-04-27 16:43:52 +0200 | [diff] [blame] | 49 | { |
| 50 | int ret; |
| 51 | |
Christoph Hellwig | 5af7926 | 2009-05-05 15:41:25 +0200 | [diff] [blame] | 52 | /* |
| 53 | * We need to be protected against the filesystem going from |
| 54 | * r/o to r/w or vice versa. |
| 55 | */ |
| 56 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
| 57 | |
| 58 | /* |
| 59 | * No point in syncing out anything if the filesystem is read-only. |
| 60 | */ |
| 61 | if (sb->s_flags & MS_RDONLY) |
| 62 | return 0; |
| 63 | |
Jan Kara | 60b0680 | 2009-04-27 16:43:53 +0200 | [diff] [blame] | 64 | ret = __sync_filesystem(sb, 0); |
Jan Kara | c15c54f | 2009-04-27 16:43:52 +0200 | [diff] [blame] | 65 | if (ret < 0) |
| 66 | return ret; |
Jan Kara | 60b0680 | 2009-04-27 16:43:53 +0200 | [diff] [blame] | 67 | return __sync_filesystem(sb, 1); |
Jan Kara | c15c54f | 2009-04-27 16:43:52 +0200 | [diff] [blame] | 68 | } |
Jan Kara | 60b0680 | 2009-04-27 16:43:53 +0200 | [diff] [blame] | 69 | EXPORT_SYMBOL_GPL(sync_filesystem); |
Jan Kara | c15c54f | 2009-04-27 16:43:52 +0200 | [diff] [blame] | 70 | |
| 71 | /* |
| 72 | * Sync all the data for all the filesystems (called by sys_sync() and |
| 73 | * emergency sync) |
| 74 | * |
| 75 | * This operation is careful to avoid the livelock which could easily happen |
| 76 | * if two or more filesystems are being continuously dirtied. s_need_sync |
| 77 | * is used only here. We set it against all filesystems and then clear it as |
| 78 | * we sync them. So redirtied filesystems are skipped. |
| 79 | * |
| 80 | * But if process A is currently running sync_filesystems and then process B |
| 81 | * calls sync_filesystems as well, process B will set all the s_need_sync |
| 82 | * flags again, which will cause process A to resync everything. Fix that with |
| 83 | * a local mutex. |
| 84 | */ |
| 85 | static void sync_filesystems(int wait) |
| 86 | { |
| 87 | struct super_block *sb; |
| 88 | static DEFINE_MUTEX(mutex); |
| 89 | |
| 90 | mutex_lock(&mutex); /* Could be down_interruptible */ |
| 91 | spin_lock(&sb_lock); |
Christoph Hellwig | 5af7926 | 2009-05-05 15:41:25 +0200 | [diff] [blame] | 92 | list_for_each_entry(sb, &super_blocks, s_list) |
Jan Kara | c15c54f | 2009-04-27 16:43:52 +0200 | [diff] [blame] | 93 | sb->s_need_sync = 1; |
Jan Kara | c15c54f | 2009-04-27 16:43:52 +0200 | [diff] [blame] | 94 | |
| 95 | restart: |
| 96 | list_for_each_entry(sb, &super_blocks, s_list) { |
| 97 | if (!sb->s_need_sync) |
| 98 | continue; |
| 99 | sb->s_need_sync = 0; |
Jan Kara | c15c54f | 2009-04-27 16:43:52 +0200 | [diff] [blame] | 100 | sb->s_count++; |
| 101 | spin_unlock(&sb_lock); |
Christoph Hellwig | 5af7926 | 2009-05-05 15:41:25 +0200 | [diff] [blame] | 102 | |
Jan Kara | c15c54f | 2009-04-27 16:43:52 +0200 | [diff] [blame] | 103 | down_read(&sb->s_umount); |
Christoph Hellwig | 5af7926 | 2009-05-05 15:41:25 +0200 | [diff] [blame] | 104 | if (!(sb->s_flags & MS_RDONLY) && sb->s_root) |
Jan Kara | 60b0680 | 2009-04-27 16:43:53 +0200 | [diff] [blame] | 105 | __sync_filesystem(sb, wait); |
Jan Kara | c15c54f | 2009-04-27 16:43:52 +0200 | [diff] [blame] | 106 | up_read(&sb->s_umount); |
Christoph Hellwig | 5af7926 | 2009-05-05 15:41:25 +0200 | [diff] [blame] | 107 | |
Jan Kara | c15c54f | 2009-04-27 16:43:52 +0200 | [diff] [blame] | 108 | /* restart only when sb is no longer on the list */ |
| 109 | spin_lock(&sb_lock); |
| 110 | if (__put_super_and_need_restart(sb)) |
| 111 | goto restart; |
| 112 | } |
| 113 | spin_unlock(&sb_lock); |
| 114 | mutex_unlock(&mutex); |
| 115 | } |
| 116 | |
Zhang, Yanmin | 3beab0b | 2009-07-05 12:08:08 -0700 | [diff] [blame] | 117 | /* |
| 118 | * sync everything. Start out by waking pdflush, because that writes back |
| 119 | * all queues in parallel. |
| 120 | */ |
Heiko Carstens | a5f8fa9 | 2009-01-14 14:14:11 +0100 | [diff] [blame] | 121 | SYSCALL_DEFINE0(sync) |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 122 | { |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 123 | wakeup_flusher_threads(0); |
Jan Kara | 5cee581 | 2009-04-27 16:43:51 +0200 | [diff] [blame] | 124 | sync_filesystems(0); |
| 125 | sync_filesystems(1); |
| 126 | if (unlikely(laptop_mode)) |
| 127 | laptop_sync_completion(); |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 128 | return 0; |
| 129 | } |
| 130 | |
Jens Axboe | a2a9537 | 2009-03-17 09:38:40 +0100 | [diff] [blame] | 131 | static void do_sync_work(struct work_struct *work) |
| 132 | { |
Jan Kara | 5cee581 | 2009-04-27 16:43:51 +0200 | [diff] [blame] | 133 | /* |
| 134 | * Sync twice to reduce the possibility we skipped some inodes / pages |
| 135 | * because they were temporarily locked |
| 136 | */ |
| 137 | sync_filesystems(0); |
| 138 | sync_filesystems(0); |
| 139 | printk("Emergency Sync complete\n"); |
Jens Axboe | a2a9537 | 2009-03-17 09:38:40 +0100 | [diff] [blame] | 140 | kfree(work); |
| 141 | } |
| 142 | |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 143 | void emergency_sync(void) |
| 144 | { |
Jens Axboe | a2a9537 | 2009-03-17 09:38:40 +0100 | [diff] [blame] | 145 | struct work_struct *work; |
| 146 | |
| 147 | work = kmalloc(sizeof(*work), GFP_ATOMIC); |
| 148 | if (work) { |
| 149 | INIT_WORK(work, do_sync_work); |
| 150 | schedule_work(work); |
| 151 | } |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | /* |
| 155 | * Generic function to fsync a file. |
| 156 | * |
| 157 | * filp may be NULL if called via the msync of a vma. |
| 158 | */ |
| 159 | int file_fsync(struct file *filp, struct dentry *dentry, int datasync) |
| 160 | { |
| 161 | struct inode * inode = dentry->d_inode; |
| 162 | struct super_block * sb; |
| 163 | int ret, err; |
| 164 | |
| 165 | /* sync the inode to buffers */ |
| 166 | ret = write_inode_now(inode, 0); |
| 167 | |
| 168 | /* sync the superblock to buffers */ |
| 169 | sb = inode->i_sb; |
OGAWA Hirofumi | 762873c | 2008-04-29 00:59:42 -0700 | [diff] [blame] | 170 | if (sb->s_dirt && sb->s_op->write_super) |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 171 | sb->s_op->write_super(sb); |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 172 | |
| 173 | /* .. finally sync the buffers to disk */ |
| 174 | err = sync_blockdev(sb->s_bdev); |
| 175 | if (!ret) |
| 176 | ret = err; |
| 177 | return ret; |
| 178 | } |
| 179 | |
Christoph Hellwig | 4c728ef | 2008-12-22 21:11:15 +0100 | [diff] [blame] | 180 | /** |
| 181 | * vfs_fsync - perform a fsync or fdatasync on a file |
| 182 | * @file: file to sync |
| 183 | * @dentry: dentry of @file |
| 184 | * @data: only perform a fdatasync operation |
| 185 | * |
| 186 | * Write back data and metadata for @file to disk. If @datasync is |
| 187 | * set only metadata needed to access modified file data is written. |
| 188 | * |
| 189 | * In case this function is called from nfsd @file may be %NULL and |
| 190 | * only @dentry is set. This can only happen when the filesystem |
| 191 | * implements the export_operations API. |
| 192 | */ |
| 193 | int vfs_fsync(struct file *file, struct dentry *dentry, int datasync) |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 194 | { |
Christoph Hellwig | 4c728ef | 2008-12-22 21:11:15 +0100 | [diff] [blame] | 195 | const struct file_operations *fop; |
| 196 | struct address_space *mapping; |
| 197 | int err, ret; |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 198 | |
Christoph Hellwig | 4c728ef | 2008-12-22 21:11:15 +0100 | [diff] [blame] | 199 | /* |
| 200 | * Get mapping and operations from the file in case we have |
| 201 | * as file, or get the default values for them in case we |
| 202 | * don't have a struct file available. Damn nfsd.. |
| 203 | */ |
| 204 | if (file) { |
| 205 | mapping = file->f_mapping; |
| 206 | fop = file->f_op; |
| 207 | } else { |
| 208 | mapping = dentry->d_inode->i_mapping; |
| 209 | fop = dentry->d_inode->i_fop; |
| 210 | } |
| 211 | |
| 212 | if (!fop || !fop->fsync) { |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 213 | ret = -EINVAL; |
| 214 | goto out; |
| 215 | } |
| 216 | |
| 217 | ret = filemap_fdatawrite(mapping); |
| 218 | |
| 219 | /* |
| 220 | * We need to protect against concurrent writers, which could cause |
| 221 | * livelocks in fsync_buffers_list(). |
| 222 | */ |
| 223 | mutex_lock(&mapping->host->i_mutex); |
Christoph Hellwig | 4c728ef | 2008-12-22 21:11:15 +0100 | [diff] [blame] | 224 | err = fop->fsync(file, dentry, datasync); |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 225 | if (!ret) |
| 226 | ret = err; |
| 227 | mutex_unlock(&mapping->host->i_mutex); |
| 228 | err = filemap_fdatawait(mapping); |
| 229 | if (!ret) |
| 230 | ret = err; |
| 231 | out: |
| 232 | return ret; |
| 233 | } |
Christoph Hellwig | 4c728ef | 2008-12-22 21:11:15 +0100 | [diff] [blame] | 234 | EXPORT_SYMBOL(vfs_fsync); |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 235 | |
Christoph Hellwig | 4c728ef | 2008-12-22 21:11:15 +0100 | [diff] [blame] | 236 | static int do_fsync(unsigned int fd, int datasync) |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 237 | { |
| 238 | struct file *file; |
| 239 | int ret = -EBADF; |
| 240 | |
| 241 | file = fget(fd); |
| 242 | if (file) { |
Christoph Hellwig | 4c728ef | 2008-12-22 21:11:15 +0100 | [diff] [blame] | 243 | ret = vfs_fsync(file, file->f_path.dentry, datasync); |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 244 | fput(file); |
| 245 | } |
| 246 | return ret; |
| 247 | } |
| 248 | |
Heiko Carstens | a5f8fa9 | 2009-01-14 14:14:11 +0100 | [diff] [blame] | 249 | SYSCALL_DEFINE1(fsync, unsigned int, fd) |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 250 | { |
Christoph Hellwig | 4c728ef | 2008-12-22 21:11:15 +0100 | [diff] [blame] | 251 | return do_fsync(fd, 0); |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 252 | } |
| 253 | |
Heiko Carstens | a5f8fa9 | 2009-01-14 14:14:11 +0100 | [diff] [blame] | 254 | SYSCALL_DEFINE1(fdatasync, unsigned int, fd) |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 255 | { |
Christoph Hellwig | 4c728ef | 2008-12-22 21:11:15 +0100 | [diff] [blame] | 256 | return do_fsync(fd, 1); |
David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 257 | } |
| 258 | |
| 259 | /* |
Andrew Morton | f79e2ab | 2006-03-31 02:30:42 -0800 | [diff] [blame] | 260 | * sys_sync_file_range() permits finely controlled syncing over a segment of |
| 261 | * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is |
| 262 | * zero then sys_sync_file_range() will operate from offset out to EOF. |
| 263 | * |
| 264 | * The flag bits are: |
| 265 | * |
| 266 | * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range |
| 267 | * before performing the write. |
| 268 | * |
| 269 | * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the |
Pavel Machek | cce7708 | 2008-07-23 21:27:36 -0700 | [diff] [blame] | 270 | * range which are not presently under writeback. Note that this may block for |
| 271 | * significant periods due to exhaustion of disk request structures. |
Andrew Morton | f79e2ab | 2006-03-31 02:30:42 -0800 | [diff] [blame] | 272 | * |
| 273 | * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range |
| 274 | * after performing the write. |
| 275 | * |
| 276 | * Useful combinations of the flag bits are: |
| 277 | * |
| 278 | * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages |
| 279 | * in the range which were dirty on entry to sys_sync_file_range() are placed |
| 280 | * under writeout. This is a start-write-for-data-integrity operation. |
| 281 | * |
| 282 | * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which |
| 283 | * are not presently under writeout. This is an asynchronous flush-to-disk |
| 284 | * operation. Not suitable for data integrity operations. |
| 285 | * |
| 286 | * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for |
| 287 | * completion of writeout of all pages in the range. This will be used after an |
| 288 | * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait |
| 289 | * for that operation to complete and to return the result. |
| 290 | * |
| 291 | * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER: |
| 292 | * a traditional sync() operation. This is a write-for-data-integrity operation |
| 293 | * which will ensure that all pages in the range which were dirty on entry to |
| 294 | * sys_sync_file_range() are committed to disk. |
| 295 | * |
| 296 | * |
| 297 | * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any |
| 298 | * I/O errors or ENOSPC conditions and will return those to the caller, after |
| 299 | * clearing the EIO and ENOSPC flags in the address_space. |
| 300 | * |
| 301 | * It should be noted that none of these operations write out the file's |
| 302 | * metadata. So unless the application is strictly performing overwrites of |
| 303 | * already-instantiated disk blocks, there are no guarantees here that the data |
| 304 | * will be available after a crash. |
| 305 | */ |
Heiko Carstens | 6673e0c | 2009-01-14 14:14:02 +0100 | [diff] [blame] | 306 | SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes, |
| 307 | unsigned int flags) |
Andrew Morton | f79e2ab | 2006-03-31 02:30:42 -0800 | [diff] [blame] | 308 | { |
| 309 | int ret; |
| 310 | struct file *file; |
| 311 | loff_t endbyte; /* inclusive */ |
| 312 | int fput_needed; |
| 313 | umode_t i_mode; |
| 314 | |
| 315 | ret = -EINVAL; |
| 316 | if (flags & ~VALID_FLAGS) |
| 317 | goto out; |
| 318 | |
| 319 | endbyte = offset + nbytes; |
| 320 | |
| 321 | if ((s64)offset < 0) |
| 322 | goto out; |
| 323 | if ((s64)endbyte < 0) |
| 324 | goto out; |
| 325 | if (endbyte < offset) |
| 326 | goto out; |
| 327 | |
| 328 | if (sizeof(pgoff_t) == 4) { |
| 329 | if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { |
| 330 | /* |
| 331 | * The range starts outside a 32 bit machine's |
| 332 | * pagecache addressing capabilities. Let it "succeed" |
| 333 | */ |
| 334 | ret = 0; |
| 335 | goto out; |
| 336 | } |
| 337 | if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { |
| 338 | /* |
| 339 | * Out to EOF |
| 340 | */ |
| 341 | nbytes = 0; |
| 342 | } |
| 343 | } |
| 344 | |
| 345 | if (nbytes == 0) |
OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 346 | endbyte = LLONG_MAX; |
Andrew Morton | f79e2ab | 2006-03-31 02:30:42 -0800 | [diff] [blame] | 347 | else |
| 348 | endbyte--; /* inclusive */ |
| 349 | |
| 350 | ret = -EBADF; |
| 351 | file = fget_light(fd, &fput_needed); |
| 352 | if (!file) |
| 353 | goto out; |
| 354 | |
Josef "Jeff" Sipek | 0f7fc9e | 2006-12-08 02:36:35 -0800 | [diff] [blame] | 355 | i_mode = file->f_path.dentry->d_inode->i_mode; |
Andrew Morton | f79e2ab | 2006-03-31 02:30:42 -0800 | [diff] [blame] | 356 | ret = -ESPIPE; |
| 357 | if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) && |
| 358 | !S_ISLNK(i_mode)) |
| 359 | goto out_put; |
| 360 | |
Mark Fasheh | ef51c97 | 2007-05-08 00:27:10 -0700 | [diff] [blame] | 361 | ret = do_sync_mapping_range(file->f_mapping, offset, endbyte, flags); |
Andrew Morton | f79e2ab | 2006-03-31 02:30:42 -0800 | [diff] [blame] | 362 | out_put: |
| 363 | fput_light(file, fput_needed); |
| 364 | out: |
| 365 | return ret; |
| 366 | } |
Heiko Carstens | 6673e0c | 2009-01-14 14:14:02 +0100 | [diff] [blame] | 367 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS |
| 368 | asmlinkage long SyS_sync_file_range(long fd, loff_t offset, loff_t nbytes, |
| 369 | long flags) |
| 370 | { |
| 371 | return SYSC_sync_file_range((int) fd, offset, nbytes, |
| 372 | (unsigned int) flags); |
| 373 | } |
| 374 | SYSCALL_ALIAS(sys_sync_file_range, SyS_sync_file_range); |
| 375 | #endif |
Andrew Morton | f79e2ab | 2006-03-31 02:30:42 -0800 | [diff] [blame] | 376 | |
David Woodhouse | edd5cd4 | 2007-06-27 14:10:09 -0700 | [diff] [blame] | 377 | /* It would be nice if people remember that not all the world's an i386 |
| 378 | when they introduce new system calls */ |
Heiko Carstens | 6673e0c | 2009-01-14 14:14:02 +0100 | [diff] [blame] | 379 | SYSCALL_DEFINE(sync_file_range2)(int fd, unsigned int flags, |
| 380 | loff_t offset, loff_t nbytes) |
David Woodhouse | edd5cd4 | 2007-06-27 14:10:09 -0700 | [diff] [blame] | 381 | { |
| 382 | return sys_sync_file_range(fd, offset, nbytes, flags); |
| 383 | } |
Heiko Carstens | 6673e0c | 2009-01-14 14:14:02 +0100 | [diff] [blame] | 384 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS |
| 385 | asmlinkage long SyS_sync_file_range2(long fd, long flags, |
| 386 | loff_t offset, loff_t nbytes) |
| 387 | { |
| 388 | return SYSC_sync_file_range2((int) fd, (unsigned int) flags, |
| 389 | offset, nbytes); |
| 390 | } |
| 391 | SYSCALL_ALIAS(sys_sync_file_range2, SyS_sync_file_range2); |
| 392 | #endif |
David Woodhouse | edd5cd4 | 2007-06-27 14:10:09 -0700 | [diff] [blame] | 393 | |
Andrew Morton | f79e2ab | 2006-03-31 02:30:42 -0800 | [diff] [blame] | 394 | /* |
| 395 | * `endbyte' is inclusive |
| 396 | */ |
Mark Fasheh | 5b04aa3 | 2007-03-01 11:01:55 -0800 | [diff] [blame] | 397 | int do_sync_mapping_range(struct address_space *mapping, loff_t offset, |
| 398 | loff_t endbyte, unsigned int flags) |
Andrew Morton | f79e2ab | 2006-03-31 02:30:42 -0800 | [diff] [blame] | 399 | { |
| 400 | int ret; |
Andrew Morton | f79e2ab | 2006-03-31 02:30:42 -0800 | [diff] [blame] | 401 | |
Andrew Morton | f79e2ab | 2006-03-31 02:30:42 -0800 | [diff] [blame] | 402 | if (!mapping) { |
| 403 | ret = -EINVAL; |
| 404 | goto out; |
| 405 | } |
| 406 | |
| 407 | ret = 0; |
| 408 | if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) { |
| 409 | ret = wait_on_page_writeback_range(mapping, |
| 410 | offset >> PAGE_CACHE_SHIFT, |
| 411 | endbyte >> PAGE_CACHE_SHIFT); |
| 412 | if (ret < 0) |
| 413 | goto out; |
| 414 | } |
| 415 | |
| 416 | if (flags & SYNC_FILE_RANGE_WRITE) { |
| 417 | ret = __filemap_fdatawrite_range(mapping, offset, endbyte, |
Nick Piggin | ee53a89 | 2009-01-06 14:39:12 -0800 | [diff] [blame] | 418 | WB_SYNC_ALL); |
Andrew Morton | f79e2ab | 2006-03-31 02:30:42 -0800 | [diff] [blame] | 419 | if (ret < 0) |
| 420 | goto out; |
| 421 | } |
| 422 | |
| 423 | if (flags & SYNC_FILE_RANGE_WAIT_AFTER) { |
| 424 | ret = wait_on_page_writeback_range(mapping, |
| 425 | offset >> PAGE_CACHE_SHIFT, |
| 426 | endbyte >> PAGE_CACHE_SHIFT); |
| 427 | } |
| 428 | out: |
| 429 | return ret; |
| 430 | } |
Mark Fasheh | 5b04aa3 | 2007-03-01 11:01:55 -0800 | [diff] [blame] | 431 | EXPORT_SYMBOL_GPL(do_sync_mapping_range); |