1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* Watch queue and general notification mechan 2 /* Watch queue and general notification mechanism, built on pipes 3 * 3 * 4 * Copyright (C) 2020 Red Hat, Inc. All Rights 4 * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.c 5 * Written by David Howells (dhowells@redhat.com) 6 * 6 * 7 * See Documentation/core-api/watch_queue.rst 7 * See Documentation/core-api/watch_queue.rst 8 */ 8 */ 9 9 10 #define pr_fmt(fmt) "watchq: " fmt 10 #define pr_fmt(fmt) "watchq: " fmt 11 #include <linux/module.h> 11 #include <linux/module.h> 12 #include <linux/init.h> 12 #include <linux/init.h> 13 #include <linux/sched.h> 13 #include <linux/sched.h> 14 #include <linux/slab.h> 14 #include <linux/slab.h> 15 #include <linux/printk.h> 15 #include <linux/printk.h> 16 #include <linux/miscdevice.h> 16 #include <linux/miscdevice.h> 17 #include <linux/fs.h> 17 #include <linux/fs.h> 18 #include <linux/mm.h> 18 #include <linux/mm.h> 19 #include <linux/pagemap.h> 19 #include <linux/pagemap.h> 20 #include <linux/poll.h> 20 #include <linux/poll.h> 21 #include <linux/uaccess.h> 21 #include <linux/uaccess.h> 22 #include <linux/vmalloc.h> 22 #include <linux/vmalloc.h> 23 #include <linux/file.h> 23 #include <linux/file.h> 24 #include <linux/security.h> 24 #include <linux/security.h> 25 #include <linux/cred.h> 25 #include <linux/cred.h> 26 #include <linux/sched/signal.h> 26 #include <linux/sched/signal.h> 27 #include <linux/watch_queue.h> 27 #include <linux/watch_queue.h> 28 #include <linux/pipe_fs_i.h> 28 #include <linux/pipe_fs_i.h> 29 29 30 MODULE_DESCRIPTION("Watch queue"); 30 MODULE_DESCRIPTION("Watch queue"); 31 MODULE_AUTHOR("Red Hat, Inc."); 31 MODULE_AUTHOR("Red Hat, Inc."); 32 32 33 #define WATCH_QUEUE_NOTE_SIZE 128 33 #define WATCH_QUEUE_NOTE_SIZE 128 34 #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE 34 #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE) 35 35 36 /* 36 /* 37 * This must be called under the RCU read-lock 37 * This must be called under the RCU read-lock, which makes 38 * sure that the wqueue still exists. It can t 38 * sure that the wqueue still exists. It can then take the lock, 39 * and check that the wqueue hasn't been destr 39 * and check that the wqueue hasn't been destroyed, which in 40 * turn makes sure that the notification pipe 40 * turn makes sure that the notification pipe still exists. 41 */ 41 */ 42 static inline bool lock_wqueue(struct watch_qu 42 static inline bool lock_wqueue(struct watch_queue *wqueue) 43 { 43 { 44 spin_lock_bh(&wqueue->lock); 44 spin_lock_bh(&wqueue->lock); 45 if (unlikely(!wqueue->pipe)) { 45 if (unlikely(!wqueue->pipe)) { 46 spin_unlock_bh(&wqueue->lock); 46 spin_unlock_bh(&wqueue->lock); 47 return false; 47 return false; 48 } 48 } 49 return true; 49 return true; 50 } 50 } 51 51 52 static inline void unlock_wqueue(struct watch_ 52 static inline void unlock_wqueue(struct watch_queue *wqueue) 53 { 53 { 54 spin_unlock_bh(&wqueue->lock); 54 spin_unlock_bh(&wqueue->lock); 55 } 55 } 56 56 57 static void watch_queue_pipe_buf_release(struc 57 static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe, 58 struc 58 struct pipe_buffer *buf) 59 { 59 { 60 struct watch_queue *wqueue = (struct w 60 struct watch_queue *wqueue = (struct watch_queue *)buf->private; 61 struct page *page; 61 struct page *page; 62 unsigned int bit; 62 unsigned int bit; 63 63 64 /* We need to work out which note with 64 /* We need to work out which note within the page this refers to, but 65 * the note might have been maximum si 65 * the note might have been maximum size, so merely ANDing the offset 66 * off doesn't work. OTOH, the note m 66 * off doesn't work. OTOH, the note must've been more than zero size. 67 */ 67 */ 68 bit = buf->offset + buf->len; 68 bit = buf->offset + buf->len; 69 if ((bit & (WATCH_QUEUE_NOTE_SIZE - 1) 69 if ((bit & (WATCH_QUEUE_NOTE_SIZE - 1)) == 0) 70 bit -= WATCH_QUEUE_NOTE_SIZE; 70 bit -= WATCH_QUEUE_NOTE_SIZE; 71 bit /= WATCH_QUEUE_NOTE_SIZE; 71 bit /= WATCH_QUEUE_NOTE_SIZE; 72 72 73 page = buf->page; 73 page = buf->page; 74 bit += page->index; 74 bit += page->index; 75 75 76 set_bit(bit, wqueue->notes_bitmap); 76 set_bit(bit, wqueue->notes_bitmap); 77 generic_pipe_buf_release(pipe, buf); 77 generic_pipe_buf_release(pipe, buf); 78 } 78 } 79 79 80 // No try_steal function => no stealing 80 // No try_steal function => no stealing 81 #define watch_queue_pipe_buf_try_steal NULL 81 #define watch_queue_pipe_buf_try_steal NULL 82 82 83 /* New data written to a pipe may be appended 83 /* New data written to a pipe may be appended to a buffer with this type. */ 84 static const struct pipe_buf_operations watch_ 84 static const struct pipe_buf_operations watch_queue_pipe_buf_ops = { 85 .release = watch_queue_pipe_buf 85 .release = watch_queue_pipe_buf_release, 86 .try_steal = watch_queue_pipe_buf 86 .try_steal = watch_queue_pipe_buf_try_steal, 87 .get = generic_pipe_buf_get 87 .get = generic_pipe_buf_get, 88 }; 88 }; 89 89 90 /* 90 /* 91 * Post a notification to a watch queue. 91 * Post a notification to a watch queue. 92 * 92 * 93 * Must be called with the RCU lock for readin 93 * Must be called with the RCU lock for reading, and the 94 * watch_queue lock held, which guarantees tha 94 * watch_queue lock held, which guarantees that the pipe 95 * hasn't been released. 95 * hasn't been released. 96 */ 96 */ 97 static bool post_one_notification(struct watch 97 static bool post_one_notification(struct watch_queue *wqueue, 98 struct watch 98 struct watch_notification *n) 99 { 99 { 100 void *p; 100 void *p; 101 struct pipe_inode_info *pipe = wqueue- 101 struct pipe_inode_info *pipe = wqueue->pipe; 102 struct pipe_buffer *buf; 102 struct pipe_buffer *buf; 103 struct page *page; 103 struct page *page; 104 unsigned int head, tail, mask, note, o 104 unsigned int head, tail, mask, note, offset, len; 105 bool done = false; 105 bool done = false; 106 106 107 spin_lock_irq(&pipe->rd_wait.lock); 107 spin_lock_irq(&pipe->rd_wait.lock); 108 108 109 mask = pipe->ring_size - 1; 109 mask = pipe->ring_size - 1; 110 head = pipe->head; 110 head = pipe->head; 111 tail = pipe->tail; 111 tail = pipe->tail; 112 if (pipe_full(head, tail, pipe->ring_s 112 if (pipe_full(head, tail, pipe->ring_size)) 113 goto lost; 113 goto lost; 114 114 115 note = find_first_bit(wqueue->notes_bi 115 note = find_first_bit(wqueue->notes_bitmap, wqueue->nr_notes); 116 if (note >= wqueue->nr_notes) 116 if (note >= wqueue->nr_notes) 117 goto lost; 117 goto lost; 118 118 119 page = wqueue->notes[note / WATCH_QUEU 119 page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE]; 120 offset = note % WATCH_QUEUE_NOTES_PER_ 120 offset = note % WATCH_QUEUE_NOTES_PER_PAGE * WATCH_QUEUE_NOTE_SIZE; 121 get_page(page); 121 get_page(page); 122 len = n->info & WATCH_INFO_LENGTH; 122 len = n->info & WATCH_INFO_LENGTH; 123 p = kmap_atomic(page); 123 p = kmap_atomic(page); 124 memcpy(p + offset, n, len); 124 memcpy(p + offset, n, len); 125 kunmap_atomic(p); 125 kunmap_atomic(p); 126 126 127 buf = &pipe->bufs[head & mask]; 127 buf = &pipe->bufs[head & mask]; 128 buf->page = page; 128 buf->page = page; 129 buf->private = (unsigned long)wqueue; 129 buf->private = (unsigned long)wqueue; 130 buf->ops = &watch_queue_pipe_buf_ops; 130 buf->ops = &watch_queue_pipe_buf_ops; 131 buf->offset = offset; 131 buf->offset = offset; 132 buf->len = len; 132 buf->len = len; 133 buf->flags = PIPE_BUF_FLAG_WHOLE; 133 buf->flags = PIPE_BUF_FLAG_WHOLE; 134 smp_store_release(&pipe->head, head + 134 smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */ 135 135 136 if (!test_and_clear_bit(note, wqueue-> 136 if (!test_and_clear_bit(note, wqueue->notes_bitmap)) { 137 spin_unlock_irq(&pipe->rd_wait 137 spin_unlock_irq(&pipe->rd_wait.lock); 138 BUG(); 138 BUG(); 139 } 139 } 140 wake_up_interruptible_sync_poll_locked 140 wake_up_interruptible_sync_poll_locked(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); 141 done = true; 141 done = true; 142 142 143 out: 143 out: 144 spin_unlock_irq(&pipe->rd_wait.lock); 144 spin_unlock_irq(&pipe->rd_wait.lock); 145 if (done) 145 if (done) 146 kill_fasync(&pipe->fasync_read 146 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 147 return done; 147 return done; 148 148 149 lost: 149 lost: 150 buf = &pipe->bufs[(head - 1) & mask]; 150 buf = &pipe->bufs[(head - 1) & mask]; 151 buf->flags |= PIPE_BUF_FLAG_LOSS; 151 buf->flags |= PIPE_BUF_FLAG_LOSS; 152 goto out; 152 goto out; 153 } 153 } 154 154 155 /* 155 /* 156 * Apply filter rules to a notification. 156 * Apply filter rules to a notification. 157 */ 157 */ 158 static bool filter_watch_notification(const st 158 static bool filter_watch_notification(const struct watch_filter *wf, 159 const st 159 const struct watch_notification *n) 160 { 160 { 161 const struct watch_type_filter *wt; 161 const struct watch_type_filter *wt; 162 unsigned int st_bits = sizeof(wt->subt 162 unsigned int st_bits = sizeof(wt->subtype_filter[0]) * 8; 163 unsigned int st_index = n->subtype / s 163 unsigned int st_index = n->subtype / st_bits; 164 unsigned int st_bit = 1U << (n->subtyp 164 unsigned int st_bit = 1U << (n->subtype % st_bits); 165 int i; 165 int i; 166 166 167 if (!test_bit(n->type, wf->type_filter 167 if (!test_bit(n->type, wf->type_filter)) 168 return false; 168 return false; 169 169 170 for (i = 0; i < wf->nr_filters; i++) { 170 for (i = 0; i < wf->nr_filters; i++) { 171 wt = &wf->filters[i]; 171 wt = &wf->filters[i]; 172 if (n->type == wt->type && 172 if (n->type == wt->type && 173 (wt->subtype_filter[st_ind 173 (wt->subtype_filter[st_index] & st_bit) && 174 (n->info & wt->info_mask) 174 (n->info & wt->info_mask) == wt->info_filter) 175 return true; 175 return true; 176 } 176 } 177 177 178 return false; /* If there is a filter, 178 return false; /* If there is a filter, the default is to reject. */ 179 } 179 } 180 180 181 /** 181 /** 182 * __post_watch_notification - Post an event n 182 * __post_watch_notification - Post an event notification 183 * @wlist: The watch list to post the event to 183 * @wlist: The watch list to post the event to. 184 * @n: The notification record to post. 184 * @n: The notification record to post. 185 * @cred: The creds of the process that trigge 185 * @cred: The creds of the process that triggered the notification. 186 * @id: The ID to match on the watch. 186 * @id: The ID to match on the watch. 187 * 187 * 188 * Post a notification of an event into a set 188 * Post a notification of an event into a set of watch queues and let the users 189 * know. 189 * know. 190 * 190 * 191 * The size of the notification should be set 191 * The size of the notification should be set in n->info & WATCH_INFO_LENGTH and 192 * should be in units of sizeof(*n). 192 * should be in units of sizeof(*n). 193 */ 193 */ 194 void __post_watch_notification(struct watch_li 194 void __post_watch_notification(struct watch_list *wlist, 195 struct watch_no 195 struct watch_notification *n, 196 const struct cr 196 const struct cred *cred, 197 u64 id) 197 u64 id) 198 { 198 { 199 const struct watch_filter *wf; 199 const struct watch_filter *wf; 200 struct watch_queue *wqueue; 200 struct watch_queue *wqueue; 201 struct watch *watch; 201 struct watch *watch; 202 202 203 if (((n->info & WATCH_INFO_LENGTH) >> 203 if (((n->info & WATCH_INFO_LENGTH) >> WATCH_INFO_LENGTH__SHIFT) == 0) { 204 WARN_ON(1); 204 WARN_ON(1); 205 return; 205 return; 206 } 206 } 207 207 208 rcu_read_lock(); 208 rcu_read_lock(); 209 209 210 hlist_for_each_entry_rcu(watch, &wlist 210 hlist_for_each_entry_rcu(watch, &wlist->watchers, list_node) { 211 if (watch->id != id) 211 if (watch->id != id) 212 continue; 212 continue; 213 n->info &= ~WATCH_INFO_ID; 213 n->info &= ~WATCH_INFO_ID; 214 n->info |= watch->info_id; 214 n->info |= watch->info_id; 215 215 216 wqueue = rcu_dereference(watch 216 wqueue = rcu_dereference(watch->queue); 217 wf = rcu_dereference(wqueue->f 217 wf = rcu_dereference(wqueue->filter); 218 if (wf && !filter_watch_notifi 218 if (wf && !filter_watch_notification(wf, n)) 219 continue; 219 continue; 220 220 221 if (security_post_notification 221 if (security_post_notification(watch->cred, cred, n) < 0) 222 continue; 222 continue; 223 223 224 if (lock_wqueue(wqueue)) { 224 if (lock_wqueue(wqueue)) { 225 post_one_notification( 225 post_one_notification(wqueue, n); 226 unlock_wqueue(wqueue); 226 unlock_wqueue(wqueue); 227 } 227 } 228 } 228 } 229 229 230 rcu_read_unlock(); 230 rcu_read_unlock(); 231 } 231 } 232 EXPORT_SYMBOL(__post_watch_notification); 232 EXPORT_SYMBOL(__post_watch_notification); 233 233 234 /* 234 /* 235 * Allocate sufficient pages to preallocation 235 * Allocate sufficient pages to preallocation for the requested number of 236 * notifications. 236 * notifications. 237 */ 237 */ 238 long watch_queue_set_size(struct pipe_inode_in 238 long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) 239 { 239 { 240 struct watch_queue *wqueue = pipe->wat 240 struct watch_queue *wqueue = pipe->watch_queue; 241 struct page **pages; 241 struct page **pages; 242 unsigned long *bitmap; 242 unsigned long *bitmap; 243 unsigned long user_bufs; 243 unsigned long user_bufs; 244 int ret, i, nr_pages; 244 int ret, i, nr_pages; 245 245 246 if (!wqueue) 246 if (!wqueue) 247 return -ENODEV; 247 return -ENODEV; 248 if (wqueue->notes) 248 if (wqueue->notes) 249 return -EBUSY; 249 return -EBUSY; 250 250 251 if (nr_notes < 1 || 251 if (nr_notes < 1 || 252 nr_notes > 512) /* TODO: choose a 252 nr_notes > 512) /* TODO: choose a better hard limit */ 253 return -EINVAL; 253 return -EINVAL; 254 254 255 nr_pages = (nr_notes + WATCH_QUEUE_NOT 255 nr_pages = (nr_notes + WATCH_QUEUE_NOTES_PER_PAGE - 1); 256 nr_pages /= WATCH_QUEUE_NOTES_PER_PAGE 256 nr_pages /= WATCH_QUEUE_NOTES_PER_PAGE; 257 user_bufs = account_pipe_buffers(pipe- 257 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages); 258 258 259 if (nr_pages > pipe->max_usage && 259 if (nr_pages > pipe->max_usage && 260 (too_many_pipe_buffers_hard(user_b 260 (too_many_pipe_buffers_hard(user_bufs) || 261 too_many_pipe_buffers_soft(user_b 261 too_many_pipe_buffers_soft(user_bufs)) && 262 pipe_is_unprivileged_user()) { 262 pipe_is_unprivileged_user()) { 263 ret = -EPERM; 263 ret = -EPERM; 264 goto error; 264 goto error; 265 } 265 } 266 266 267 nr_notes = nr_pages * WATCH_QUEUE_NOTE 267 nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE; 268 ret = pipe_resize_ring(pipe, roundup_p 268 ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes)); 269 if (ret < 0) 269 if (ret < 0) 270 goto error; 270 goto error; 271 271 272 ret = -ENOMEM; 272 ret = -ENOMEM; 273 pages = kcalloc(nr_pages, sizeof(struc !! 273 pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL); 274 if (!pages) 274 if (!pages) 275 goto error; 275 goto error; 276 276 277 for (i = 0; i < nr_pages; i++) { 277 for (i = 0; i < nr_pages; i++) { 278 pages[i] = alloc_page(GFP_KERN 278 pages[i] = alloc_page(GFP_KERNEL); 279 if (!pages[i]) 279 if (!pages[i]) 280 goto error_p; 280 goto error_p; 281 pages[i]->index = i * WATCH_QU 281 pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE; 282 } 282 } 283 283 284 bitmap = bitmap_alloc(nr_notes, GFP_KE 284 bitmap = bitmap_alloc(nr_notes, GFP_KERNEL); 285 if (!bitmap) 285 if (!bitmap) 286 goto error_p; 286 goto error_p; 287 287 288 bitmap_fill(bitmap, nr_notes); 288 bitmap_fill(bitmap, nr_notes); 289 wqueue->notes = pages; 289 wqueue->notes = pages; 290 wqueue->notes_bitmap = bitmap; 290 wqueue->notes_bitmap = bitmap; 291 wqueue->nr_pages = nr_pages; 291 wqueue->nr_pages = nr_pages; 292 wqueue->nr_notes = nr_notes; 292 wqueue->nr_notes = nr_notes; 293 return 0; 293 return 0; 294 294 295 error_p: 295 error_p: 296 while (--i >= 0) 296 while (--i >= 0) 297 __free_page(pages[i]); 297 __free_page(pages[i]); 298 kfree(pages); 298 kfree(pages); 299 error: 299 error: 300 (void) account_pipe_buffers(pipe->user 300 (void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted); 301 return ret; 301 return ret; 302 } 302 } 303 303 304 /* 304 /* 305 * Set the filter on a watch queue. 305 * Set the filter on a watch queue. 306 */ 306 */ 307 long watch_queue_set_filter(struct pipe_inode_ 307 long watch_queue_set_filter(struct pipe_inode_info *pipe, 308 struct watch_notif 308 struct watch_notification_filter __user *_filter) 309 { 309 { 310 struct watch_notification_type_filter 310 struct watch_notification_type_filter *tf; 311 struct watch_notification_filter filte 311 struct watch_notification_filter filter; 312 struct watch_type_filter *q; 312 struct watch_type_filter *q; 313 struct watch_filter *wfilter; 313 struct watch_filter *wfilter; 314 struct watch_queue *wqueue = pipe->wat 314 struct watch_queue *wqueue = pipe->watch_queue; 315 int ret, nr_filter = 0, i; 315 int ret, nr_filter = 0, i; 316 316 317 if (!wqueue) 317 if (!wqueue) 318 return -ENODEV; 318 return -ENODEV; 319 319 320 if (!_filter) { 320 if (!_filter) { 321 /* Remove the old filter */ 321 /* Remove the old filter */ 322 wfilter = NULL; 322 wfilter = NULL; 323 goto set; 323 goto set; 324 } 324 } 325 325 326 /* Grab the user's filter specificatio 326 /* Grab the user's filter specification */ 327 if (copy_from_user(&filter, _filter, s 327 if (copy_from_user(&filter, _filter, sizeof(filter)) != 0) 328 return -EFAULT; 328 return -EFAULT; 329 if (filter.nr_filters == 0 || 329 if (filter.nr_filters == 0 || 330 filter.nr_filters > 16 || 330 filter.nr_filters > 16 || 331 filter.__reserved != 0) 331 filter.__reserved != 0) 332 return -EINVAL; 332 return -EINVAL; 333 333 334 tf = memdup_array_user(_filter->filter 334 tf = memdup_array_user(_filter->filters, filter.nr_filters, sizeof(*tf)); 335 if (IS_ERR(tf)) 335 if (IS_ERR(tf)) 336 return PTR_ERR(tf); 336 return PTR_ERR(tf); 337 337 338 ret = -EINVAL; 338 ret = -EINVAL; 339 for (i = 0; i < filter.nr_filters; i++ 339 for (i = 0; i < filter.nr_filters; i++) { 340 if ((tf[i].info_filter & ~tf[i 340 if ((tf[i].info_filter & ~tf[i].info_mask) || 341 tf[i].info_mask & WATCH_IN 341 tf[i].info_mask & WATCH_INFO_LENGTH) 342 goto err_filter; 342 goto err_filter; 343 /* Ignore any unknown types */ 343 /* Ignore any unknown types */ 344 if (tf[i].type >= WATCH_TYPE__ 344 if (tf[i].type >= WATCH_TYPE__NR) 345 continue; 345 continue; 346 nr_filter++; 346 nr_filter++; 347 } 347 } 348 348 349 /* Now we need to build the internal f 349 /* Now we need to build the internal filter from only the relevant 350 * user-specified filters. 350 * user-specified filters. 351 */ 351 */ 352 ret = -ENOMEM; 352 ret = -ENOMEM; 353 wfilter = kzalloc(struct_size(wfilter, 353 wfilter = kzalloc(struct_size(wfilter, filters, nr_filter), GFP_KERNEL); 354 if (!wfilter) 354 if (!wfilter) 355 goto err_filter; 355 goto err_filter; 356 wfilter->nr_filters = nr_filter; 356 wfilter->nr_filters = nr_filter; 357 357 358 q = wfilter->filters; 358 q = wfilter->filters; 359 for (i = 0; i < filter.nr_filters; i++ 359 for (i = 0; i < filter.nr_filters; i++) { 360 if (tf[i].type >= WATCH_TYPE__ 360 if (tf[i].type >= WATCH_TYPE__NR) 361 continue; 361 continue; 362 362 363 q->type = tf[i 363 q->type = tf[i].type; 364 q->info_filter = tf[i 364 q->info_filter = tf[i].info_filter; 365 q->info_mask = tf[i 365 q->info_mask = tf[i].info_mask; 366 q->subtype_filter[0] = tf[i 366 q->subtype_filter[0] = tf[i].subtype_filter[0]; 367 __set_bit(q->type, wfilter->ty 367 __set_bit(q->type, wfilter->type_filter); 368 q++; 368 q++; 369 } 369 } 370 370 371 kfree(tf); 371 kfree(tf); 372 set: 372 set: 373 pipe_lock(pipe); 373 pipe_lock(pipe); 374 wfilter = rcu_replace_pointer(wqueue-> 374 wfilter = rcu_replace_pointer(wqueue->filter, wfilter, 375 lockdep_ 375 lockdep_is_held(&pipe->mutex)); 376 pipe_unlock(pipe); 376 pipe_unlock(pipe); 377 if (wfilter) 377 if (wfilter) 378 kfree_rcu(wfilter, rcu); 378 kfree_rcu(wfilter, rcu); 379 return 0; 379 return 0; 380 380 381 err_filter: 381 err_filter: 382 kfree(tf); 382 kfree(tf); 383 return ret; 383 return ret; 384 } 384 } 385 385 386 static void __put_watch_queue(struct kref *kre 386 static void __put_watch_queue(struct kref *kref) 387 { 387 { 388 struct watch_queue *wqueue = 388 struct watch_queue *wqueue = 389 container_of(kref, struct watc 389 container_of(kref, struct watch_queue, usage); 390 struct watch_filter *wfilter; 390 struct watch_filter *wfilter; 391 int i; 391 int i; 392 392 393 for (i = 0; i < wqueue->nr_pages; i++) 393 for (i = 0; i < wqueue->nr_pages; i++) 394 __free_page(wqueue->notes[i]); 394 __free_page(wqueue->notes[i]); 395 kfree(wqueue->notes); 395 kfree(wqueue->notes); 396 bitmap_free(wqueue->notes_bitmap); 396 bitmap_free(wqueue->notes_bitmap); 397 397 398 wfilter = rcu_access_pointer(wqueue->f 398 wfilter = rcu_access_pointer(wqueue->filter); 399 if (wfilter) 399 if (wfilter) 400 kfree_rcu(wfilter, rcu); 400 kfree_rcu(wfilter, rcu); 401 kfree_rcu(wqueue, rcu); 401 kfree_rcu(wqueue, rcu); 402 } 402 } 403 403 404 /** 404 /** 405 * put_watch_queue - Dispose of a ref on a wat 405 * put_watch_queue - Dispose of a ref on a watchqueue. 406 * @wqueue: The watch queue to unref. 406 * @wqueue: The watch queue to unref. 407 */ 407 */ 408 void put_watch_queue(struct watch_queue *wqueu 408 void put_watch_queue(struct watch_queue *wqueue) 409 { 409 { 410 kref_put(&wqueue->usage, __put_watch_q 410 kref_put(&wqueue->usage, __put_watch_queue); 411 } 411 } 412 EXPORT_SYMBOL(put_watch_queue); 412 EXPORT_SYMBOL(put_watch_queue); 413 413 414 static void free_watch(struct rcu_head *rcu) 414 static void free_watch(struct rcu_head *rcu) 415 { 415 { 416 struct watch *watch = container_of(rcu 416 struct watch *watch = container_of(rcu, struct watch, rcu); 417 417 418 put_watch_queue(rcu_access_pointer(wat 418 put_watch_queue(rcu_access_pointer(watch->queue)); 419 atomic_dec(&watch->cred->user->nr_watc 419 atomic_dec(&watch->cred->user->nr_watches); 420 put_cred(watch->cred); 420 put_cred(watch->cred); 421 kfree(watch); 421 kfree(watch); 422 } 422 } 423 423 424 static void __put_watch(struct kref *kref) 424 static void __put_watch(struct kref *kref) 425 { 425 { 426 struct watch *watch = container_of(kre 426 struct watch *watch = container_of(kref, struct watch, usage); 427 427 428 call_rcu(&watch->rcu, free_watch); 428 call_rcu(&watch->rcu, free_watch); 429 } 429 } 430 430 431 /* 431 /* 432 * Discard a watch. 432 * Discard a watch. 433 */ 433 */ 434 static void put_watch(struct watch *watch) 434 static void put_watch(struct watch *watch) 435 { 435 { 436 kref_put(&watch->usage, __put_watch); 436 kref_put(&watch->usage, __put_watch); 437 } 437 } 438 438 439 /** 439 /** 440 * init_watch - Initialise a watch 440 * init_watch - Initialise a watch 441 * @watch: The watch to initialise. 441 * @watch: The watch to initialise. 442 * @wqueue: The queue to assign. 442 * @wqueue: The queue to assign. 443 * 443 * 444 * Initialise a watch and set the watch queue. 444 * Initialise a watch and set the watch queue. 445 */ 445 */ 446 void init_watch(struct watch *watch, struct wa 446 void init_watch(struct watch *watch, struct watch_queue *wqueue) 447 { 447 { 448 kref_init(&watch->usage); 448 kref_init(&watch->usage); 449 INIT_HLIST_NODE(&watch->list_node); 449 INIT_HLIST_NODE(&watch->list_node); 450 INIT_HLIST_NODE(&watch->queue_node); 450 INIT_HLIST_NODE(&watch->queue_node); 451 rcu_assign_pointer(watch->queue, wqueu 451 rcu_assign_pointer(watch->queue, wqueue); 452 } 452 } 453 453 454 static int add_one_watch(struct watch *watch, 454 static int add_one_watch(struct watch *watch, struct watch_list *wlist, struct watch_queue *wqueue) 455 { 455 { 456 const struct cred *cred; 456 const struct cred *cred; 457 struct watch *w; 457 struct watch *w; 458 458 459 hlist_for_each_entry(w, &wlist->watche 459 hlist_for_each_entry(w, &wlist->watchers, list_node) { 460 struct watch_queue *wq = rcu_a 460 struct watch_queue *wq = rcu_access_pointer(w->queue); 461 if (wqueue == wq && watch->id 461 if (wqueue == wq && watch->id == w->id) 462 return -EBUSY; 462 return -EBUSY; 463 } 463 } 464 464 465 cred = current_cred(); 465 cred = current_cred(); 466 if (atomic_inc_return(&cred->user->nr_ 466 if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) { 467 atomic_dec(&cred->user->nr_wat 467 atomic_dec(&cred->user->nr_watches); 468 return -EAGAIN; 468 return -EAGAIN; 469 } 469 } 470 470 471 watch->cred = get_cred(cred); 471 watch->cred = get_cred(cred); 472 rcu_assign_pointer(watch->watch_list, 472 rcu_assign_pointer(watch->watch_list, wlist); 473 473 474 kref_get(&wqueue->usage); 474 kref_get(&wqueue->usage); 475 kref_get(&watch->usage); 475 kref_get(&watch->usage); 476 hlist_add_head(&watch->queue_node, &wq 476 hlist_add_head(&watch->queue_node, &wqueue->watches); 477 hlist_add_head_rcu(&watch->list_node, 477 hlist_add_head_rcu(&watch->list_node, &wlist->watchers); 478 return 0; 478 return 0; 479 } 479 } 480 480 481 /** 481 /** 482 * add_watch_to_object - Add a watch on an obj 482 * add_watch_to_object - Add a watch on an object to a watch list 483 * @watch: The watch to add 483 * @watch: The watch to add 484 * @wlist: The watch list to add to 484 * @wlist: The watch list to add to 485 * 485 * 486 * @watch->queue must have been set to point t 486 * @watch->queue must have been set to point to the queue to post notifications 487 * to and the watch list of the object to be w 487 * to and the watch list of the object to be watched. @watch->cred must also 488 * have been set to the appropriate credential 488 * have been set to the appropriate credentials and a ref taken on them. 489 * 489 * 490 * The caller must pin the queue and the list 490 * The caller must pin the queue and the list both and must hold the list 491 * locked against racing watch additions/remov 491 * locked against racing watch additions/removals. 492 */ 492 */ 493 int add_watch_to_object(struct watch *watch, s 493 int add_watch_to_object(struct watch *watch, struct watch_list *wlist) 494 { 494 { 495 struct watch_queue *wqueue; 495 struct watch_queue *wqueue; 496 int ret = -ENOENT; 496 int ret = -ENOENT; 497 497 498 rcu_read_lock(); 498 rcu_read_lock(); 499 499 500 wqueue = rcu_access_pointer(watch->que 500 wqueue = rcu_access_pointer(watch->queue); 501 if (lock_wqueue(wqueue)) { 501 if (lock_wqueue(wqueue)) { 502 spin_lock(&wlist->lock); 502 spin_lock(&wlist->lock); 503 ret = add_one_watch(watch, wli 503 ret = add_one_watch(watch, wlist, wqueue); 504 spin_unlock(&wlist->lock); 504 spin_unlock(&wlist->lock); 505 unlock_wqueue(wqueue); 505 unlock_wqueue(wqueue); 506 } 506 } 507 507 508 rcu_read_unlock(); 508 rcu_read_unlock(); 509 return ret; 509 return ret; 510 } 510 } 511 EXPORT_SYMBOL(add_watch_to_object); 511 EXPORT_SYMBOL(add_watch_to_object); 512 512 513 /** 513 /** 514 * remove_watch_from_object - Remove a watch o 514 * remove_watch_from_object - Remove a watch or all watches from an object. 515 * @wlist: The watch list to remove from 515 * @wlist: The watch list to remove from 516 * @wq: The watch queue of interest (ignored i 516 * @wq: The watch queue of interest (ignored if @all is true) 517 * @id: The ID of the watch to remove (ignored 517 * @id: The ID of the watch to remove (ignored if @all is true) 518 * @all: True to remove all objects 518 * @all: True to remove all objects 519 * 519 * 520 * Remove a specific watch or all watches from 520 * Remove a specific watch or all watches from an object. A notification is 521 * sent to the watcher to tell them that this 521 * sent to the watcher to tell them that this happened. 522 */ 522 */ 523 int remove_watch_from_object(struct watch_list 523 int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq, 524 u64 id, bool all) 524 u64 id, bool all) 525 { 525 { 526 struct watch_notification_removal n; 526 struct watch_notification_removal n; 527 struct watch_queue *wqueue; 527 struct watch_queue *wqueue; 528 struct watch *watch; 528 struct watch *watch; 529 int ret = -EBADSLT; 529 int ret = -EBADSLT; 530 530 531 rcu_read_lock(); 531 rcu_read_lock(); 532 532 533 again: 533 again: 534 spin_lock(&wlist->lock); 534 spin_lock(&wlist->lock); 535 hlist_for_each_entry(watch, &wlist->wa 535 hlist_for_each_entry(watch, &wlist->watchers, list_node) { 536 if (all || 536 if (all || 537 (watch->id == id && rcu_ac 537 (watch->id == id && rcu_access_pointer(watch->queue) == wq)) 538 goto found; 538 goto found; 539 } 539 } 540 spin_unlock(&wlist->lock); 540 spin_unlock(&wlist->lock); 541 goto out; 541 goto out; 542 542 543 found: 543 found: 544 ret = 0; 544 ret = 0; 545 hlist_del_init_rcu(&watch->list_node); 545 hlist_del_init_rcu(&watch->list_node); 546 rcu_assign_pointer(watch->watch_list, 546 rcu_assign_pointer(watch->watch_list, NULL); 547 spin_unlock(&wlist->lock); 547 spin_unlock(&wlist->lock); 548 548 549 /* We now own the reference on watch t 549 /* We now own the reference on watch that used to belong to wlist. */ 550 550 551 n.watch.type = WATCH_TYPE_META; 551 n.watch.type = WATCH_TYPE_META; 552 n.watch.subtype = WATCH_META_REMOVAL_N 552 n.watch.subtype = WATCH_META_REMOVAL_NOTIFICATION; 553 n.watch.info = watch->info_id | watch_ 553 n.watch.info = watch->info_id | watch_sizeof(n.watch); 554 n.id = id; 554 n.id = id; 555 if (id != 0) 555 if (id != 0) 556 n.watch.info = watch->info_id 556 n.watch.info = watch->info_id | watch_sizeof(n); 557 557 558 wqueue = rcu_dereference(watch->queue) 558 wqueue = rcu_dereference(watch->queue); 559 559 560 if (lock_wqueue(wqueue)) { 560 if (lock_wqueue(wqueue)) { 561 post_one_notification(wqueue, 561 post_one_notification(wqueue, &n.watch); 562 562 563 if (!hlist_unhashed(&watch->qu 563 if (!hlist_unhashed(&watch->queue_node)) { 564 hlist_del_init_rcu(&wa 564 hlist_del_init_rcu(&watch->queue_node); 565 put_watch(watch); 565 put_watch(watch); 566 } 566 } 567 567 568 unlock_wqueue(wqueue); 568 unlock_wqueue(wqueue); 569 } 569 } 570 570 571 if (wlist->release_watch) { 571 if (wlist->release_watch) { 572 void (*release_watch)(struct w 572 void (*release_watch)(struct watch *); 573 573 574 release_watch = wlist->release 574 release_watch = wlist->release_watch; 575 rcu_read_unlock(); 575 rcu_read_unlock(); 576 (*release_watch)(watch); 576 (*release_watch)(watch); 577 rcu_read_lock(); 577 rcu_read_lock(); 578 } 578 } 579 put_watch(watch); 579 put_watch(watch); 580 580 581 if (all && !hlist_empty(&wlist->watche 581 if (all && !hlist_empty(&wlist->watchers)) 582 goto again; 582 goto again; 583 out: 583 out: 584 rcu_read_unlock(); 584 rcu_read_unlock(); 585 return ret; 585 return ret; 586 } 586 } 587 EXPORT_SYMBOL(remove_watch_from_object); 587 EXPORT_SYMBOL(remove_watch_from_object); 588 588 589 /* 589 /* 590 * Remove all the watches that are contributor 590 * Remove all the watches that are contributory to a queue. This has the 591 * potential to race with removal of the watch 591 * potential to race with removal of the watches by the destruction of the 592 * objects being watched or with the distribut 592 * objects being watched or with the distribution of notifications. 593 */ 593 */ 594 void watch_queue_clear(struct watch_queue *wqu 594 void watch_queue_clear(struct watch_queue *wqueue) 595 { 595 { 596 struct watch_list *wlist; 596 struct watch_list *wlist; 597 struct watch *watch; 597 struct watch *watch; 598 bool release; 598 bool release; 599 599 600 rcu_read_lock(); 600 rcu_read_lock(); 601 spin_lock_bh(&wqueue->lock); 601 spin_lock_bh(&wqueue->lock); 602 602 603 /* 603 /* 604 * This pipe can be freed by callers l 604 * This pipe can be freed by callers like free_pipe_info(). 605 * Removing this reference also preven 605 * Removing this reference also prevents new notifications. 606 */ 606 */ 607 wqueue->pipe = NULL; 607 wqueue->pipe = NULL; 608 608 609 while (!hlist_empty(&wqueue->watches)) 609 while (!hlist_empty(&wqueue->watches)) { 610 watch = hlist_entry(wqueue->wa 610 watch = hlist_entry(wqueue->watches.first, struct watch, queue_node); 611 hlist_del_init_rcu(&watch->que 611 hlist_del_init_rcu(&watch->queue_node); 612 /* We now own a ref on the wat 612 /* We now own a ref on the watch. */ 613 spin_unlock_bh(&wqueue->lock); 613 spin_unlock_bh(&wqueue->lock); 614 614 615 /* We can't do the next bit un 615 /* We can't do the next bit under the queue lock as we need to 616 * get the list lock - which w 616 * get the list lock - which would cause a deadlock if someone 617 * was removing from the oppos 617 * was removing from the opposite direction at the same time or 618 * posting a notification. 618 * posting a notification. 619 */ 619 */ 620 wlist = rcu_dereference(watch- 620 wlist = rcu_dereference(watch->watch_list); 621 if (wlist) { 621 if (wlist) { 622 void (*release_watch)( 622 void (*release_watch)(struct watch *); 623 623 624 spin_lock(&wlist->lock 624 spin_lock(&wlist->lock); 625 625 626 release = !hlist_unhas 626 release = !hlist_unhashed(&watch->list_node); 627 if (release) { 627 if (release) { 628 hlist_del_init 628 hlist_del_init_rcu(&watch->list_node); 629 rcu_assign_poi 629 rcu_assign_pointer(watch->watch_list, NULL); 630 630 631 /* We now own 631 /* We now own a second ref on the watch. */ 632 } 632 } 633 633 634 release_watch = wlist- 634 release_watch = wlist->release_watch; 635 spin_unlock(&wlist->lo 635 spin_unlock(&wlist->lock); 636 636 637 if (release) { 637 if (release) { 638 if (release_wa 638 if (release_watch) { 639 rcu_re 639 rcu_read_unlock(); 640 /* Thi 640 /* This might need to call dput(), so 641 * we 641 * we have to drop all the locks. 642 */ 642 */ 643 (*rele 643 (*release_watch)(watch); 644 rcu_re 644 rcu_read_lock(); 645 } 645 } 646 put_watch(watc 646 put_watch(watch); 647 } 647 } 648 } 648 } 649 649 650 put_watch(watch); 650 put_watch(watch); 651 spin_lock_bh(&wqueue->lock); 651 spin_lock_bh(&wqueue->lock); 652 } 652 } 653 653 654 spin_unlock_bh(&wqueue->lock); 654 spin_unlock_bh(&wqueue->lock); 655 rcu_read_unlock(); 655 rcu_read_unlock(); 656 } 656 } 657 657 658 /** 658 /** 659 * get_watch_queue - Get a watch queue from it 659 * get_watch_queue - Get a watch queue from its file descriptor. 660 * @fd: The fd to query. 660 * @fd: The fd to query. 661 */ 661 */ 662 struct watch_queue *get_watch_queue(int fd) 662 struct watch_queue *get_watch_queue(int fd) 663 { 663 { 664 struct pipe_inode_info *pipe; 664 struct pipe_inode_info *pipe; 665 struct watch_queue *wqueue = ERR_PTR(- 665 struct watch_queue *wqueue = ERR_PTR(-EINVAL); 666 struct fd f; 666 struct fd f; 667 667 668 f = fdget(fd); 668 f = fdget(fd); 669 if (fd_file(f)) { !! 669 if (f.file) { 670 pipe = get_pipe_info(fd_file(f !! 670 pipe = get_pipe_info(f.file, false); 671 if (pipe && pipe->watch_queue) 671 if (pipe && pipe->watch_queue) { 672 wqueue = pipe->watch_q 672 wqueue = pipe->watch_queue; 673 kref_get(&wqueue->usag 673 kref_get(&wqueue->usage); 674 } 674 } 675 fdput(f); 675 fdput(f); 676 } 676 } 677 677 678 return wqueue; 678 return wqueue; 679 } 679 } 680 EXPORT_SYMBOL(get_watch_queue); 680 EXPORT_SYMBOL(get_watch_queue); 681 681 682 /* 682 /* 683 * Initialise a watch queue 683 * Initialise a watch queue 684 */ 684 */ 685 int watch_queue_init(struct pipe_inode_info *p 685 int watch_queue_init(struct pipe_inode_info *pipe) 686 { 686 { 687 struct watch_queue *wqueue; 687 struct watch_queue *wqueue; 688 688 689 wqueue = kzalloc(sizeof(*wqueue), GFP_ 689 wqueue = kzalloc(sizeof(*wqueue), GFP_KERNEL); 690 if (!wqueue) 690 if (!wqueue) 691 return -ENOMEM; 691 return -ENOMEM; 692 692 693 wqueue->pipe = pipe; 693 wqueue->pipe = pipe; 694 kref_init(&wqueue->usage); 694 kref_init(&wqueue->usage); 695 spin_lock_init(&wqueue->lock); 695 spin_lock_init(&wqueue->lock); 696 INIT_HLIST_HEAD(&wqueue->watches); 696 INIT_HLIST_HEAD(&wqueue->watches); 697 697 698 pipe->watch_queue = wqueue; 698 pipe->watch_queue = wqueue; 699 return 0; 699 return 0; 700 } 700 } 701 701
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.