1 // SPDX-License-Identifier: LGPL-2.1 1 // SPDX-License-Identifier: LGPL-2.1 2 /* 2 /* 3 * 3 * 4 * vfs operations that deal with files 4 * vfs operations that deal with files 5 * 5 * 6 * Copyright (C) International Business Mach 6 * Copyright (C) International Business Machines Corp., 2002,2010 7 * Author(s): Steve French (sfrench@us.ibm.c 7 * Author(s): Steve French (sfrench@us.ibm.com) 8 * Jeremy Allison (jra@samba.org) 8 * Jeremy Allison (jra@samba.org) 9 * 9 * 10 */ 10 */ 11 #include <linux/fs.h> 11 #include <linux/fs.h> 12 #include <linux/filelock.h> 12 #include <linux/filelock.h> 13 #include <linux/backing-dev.h> 13 #include <linux/backing-dev.h> 14 #include <linux/stat.h> 14 #include <linux/stat.h> 15 #include <linux/fcntl.h> 15 #include <linux/fcntl.h> 16 #include <linux/pagemap.h> 16 #include <linux/pagemap.h> 17 #include <linux/pagevec.h> 17 #include <linux/pagevec.h> 18 #include <linux/writeback.h> 18 #include <linux/writeback.h> 19 #include <linux/task_io_accounting_ops.h> 19 #include <linux/task_io_accounting_ops.h> 20 #include <linux/delay.h> 20 #include <linux/delay.h> 21 #include <linux/mount.h> 21 #include <linux/mount.h> 22 #include <linux/slab.h> 22 #include <linux/slab.h> 23 #include <linux/swap.h> 23 #include <linux/swap.h> 24 #include <linux/mm.h> 24 #include <linux/mm.h> 25 #include <asm/div64.h> 25 #include <asm/div64.h> 26 #include "cifsfs.h" 26 #include "cifsfs.h" 27 #include "cifspdu.h" 27 #include "cifspdu.h" 28 #include "cifsglob.h" 28 #include "cifsglob.h" 29 #include "cifsproto.h" 29 #include "cifsproto.h" 30 #include "smb2proto.h" 30 #include "smb2proto.h" 31 #include "cifs_unicode.h" 31 #include "cifs_unicode.h" 32 #include "cifs_debug.h" 32 #include "cifs_debug.h" 33 #include "cifs_fs_sb.h" 33 #include "cifs_fs_sb.h" 34 #include "fscache.h" 34 #include "fscache.h" 35 #include "smbdirect.h" 35 #include "smbdirect.h" 36 #include "fs_context.h" 36 #include "fs_context.h" 37 #include "cifs_ioctl.h" 37 #include "cifs_ioctl.h" 38 #include "cached_dir.h" 38 #include "cached_dir.h" 39 #include <trace/events/netfs.h> 39 #include <trace/events/netfs.h> 40 40 41 static int cifs_reopen_file(struct cifsFileInf 41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush); 42 42 43 /* 43 /* 44 * Prepare a subrequest to upload to the serve 44 * Prepare a subrequest to upload to the server. We need to allocate credits 45 * so that we know the maximum amount of data 45 * so that we know the maximum amount of data that we can include in it. 46 */ 46 */ 47 static void cifs_prepare_write(struct netfs_io 47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq) 48 { 48 { 49 struct cifs_io_subrequest *wdata = 49 struct cifs_io_subrequest *wdata = 50 container_of(subreq, struct ci 50 container_of(subreq, struct cifs_io_subrequest, subreq); 51 struct cifs_io_request *req = wdata->r 51 struct cifs_io_request *req = wdata->req; 52 struct netfs_io_stream *stream = &req- 52 struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr]; 53 struct TCP_Server_Info *server; 53 struct TCP_Server_Info *server; 54 struct cifsFileInfo *open_file = req-> 54 struct cifsFileInfo *open_file = req->cfile; 55 size_t wsize = req->rreq.wsize; 55 size_t wsize = req->rreq.wsize; 56 int rc; 56 int rc; 57 57 58 if (!wdata->have_xid) { 58 if (!wdata->have_xid) { 59 wdata->xid = get_xid(); 59 wdata->xid = get_xid(); 60 wdata->have_xid = true; 60 wdata->have_xid = true; 61 } 61 } 62 62 63 server = cifs_pick_channel(tlink_tcon( 63 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); 64 wdata->server = server; 64 wdata->server = server; 65 65 66 retry: 66 retry: 67 if (open_file->invalidHandle) { 67 if (open_file->invalidHandle) { 68 rc = cifs_reopen_file(open_fil 68 rc = cifs_reopen_file(open_file, false); 69 if (rc < 0) { 69 if (rc < 0) { 70 if (rc == -EAGAIN) 70 if (rc == -EAGAIN) 71 goto retry; 71 goto retry; 72 subreq->error = rc; 72 subreq->error = rc; 73 return netfs_prepare_w 73 return netfs_prepare_write_failed(subreq); 74 } 74 } 75 } 75 } 76 76 77 rc = server->ops->wait_mtu_credits(ser 77 rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len, 78 &wd 78 &wdata->credits); 79 if (rc < 0) { 79 if (rc < 0) { 80 subreq->error = rc; 80 subreq->error = rc; 81 return netfs_prepare_write_fai 81 return netfs_prepare_write_failed(subreq); 82 } 82 } 83 83 84 wdata->credits.rreq_debug_id = subreq- 84 wdata->credits.rreq_debug_id = subreq->rreq->debug_id; 85 wdata->credits.rreq_debug_index = subr 85 wdata->credits.rreq_debug_index = subreq->debug_index; 86 wdata->credits.in_flight_check = 1; 86 wdata->credits.in_flight_check = 1; 87 trace_smb3_rw_credits(wdata->rreq->deb 87 trace_smb3_rw_credits(wdata->rreq->debug_id, 88 wdata->subreq.de 88 wdata->subreq.debug_index, 89 wdata->credits.v 89 wdata->credits.value, 90 server->credits, 90 server->credits, server->in_flight, 91 wdata->credits.v 91 wdata->credits.value, 92 cifs_trace_rw_cr 92 cifs_trace_rw_credits_write_prepare); 93 93 94 #ifdef CONFIG_CIFS_SMB_DIRECT 94 #ifdef CONFIG_CIFS_SMB_DIRECT 95 if (server->smbd_conn) 95 if (server->smbd_conn) 96 stream->sreq_max_segs = server 96 stream->sreq_max_segs = server->smbd_conn->max_frmr_depth; 97 #endif 97 #endif 98 } 98 } 99 99 100 /* 100 /* 101 * Issue a subrequest to upload to the server. 101 * Issue a subrequest to upload to the server. 102 */ 102 */ 103 static void cifs_issue_write(struct netfs_io_s 103 static void cifs_issue_write(struct netfs_io_subrequest *subreq) 104 { 104 { 105 struct cifs_io_subrequest *wdata = 105 struct cifs_io_subrequest *wdata = 106 container_of(subreq, struct ci 106 container_of(subreq, struct cifs_io_subrequest, subreq); 107 struct cifs_sb_info *sbi = CIFS_SB(sub 107 struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb); 108 int rc; 108 int rc; 109 109 110 if (cifs_forced_shutdown(sbi)) { 110 if (cifs_forced_shutdown(sbi)) { 111 rc = -EIO; 111 rc = -EIO; 112 goto fail; 112 goto fail; 113 } 113 } 114 114 115 rc = adjust_credits(wdata->server, wda 115 rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust); 116 if (rc) 116 if (rc) 117 goto fail; 117 goto fail; 118 118 119 rc = -EAGAIN; 119 rc = -EAGAIN; 120 if (wdata->req->cfile->invalidHandle) 120 if (wdata->req->cfile->invalidHandle) 121 goto fail; 121 goto fail; 122 122 123 wdata->server->ops->async_writev(wdata 123 wdata->server->ops->async_writev(wdata); 124 out: 124 out: 125 return; 125 return; 126 126 127 fail: 127 fail: 128 if (rc == -EAGAIN) 128 if (rc == -EAGAIN) 129 trace_netfs_sreq(subreq, netfs 129 trace_netfs_sreq(subreq, netfs_sreq_trace_retry); 130 else 130 else 131 trace_netfs_sreq(subreq, netfs 131 trace_netfs_sreq(subreq, netfs_sreq_trace_fail); 132 add_credits_and_wake_if(wdata->server, 132 add_credits_and_wake_if(wdata->server, &wdata->credits, 0); 133 cifs_write_subrequest_terminated(wdata 133 cifs_write_subrequest_terminated(wdata, rc, false); 134 goto out; 134 goto out; 135 } 135 } 136 136 137 static void cifs_netfs_invalidate_cache(struct 137 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq) 138 { 138 { 139 cifs_invalidate_cache(wreq->inode, 0); 139 cifs_invalidate_cache(wreq->inode, 0); 140 } 140 } 141 141 142 /* 142 /* 143 * Negotiate the size of a read operation on b 143 * Negotiate the size of a read operation on behalf of the netfs library. 144 */ 144 */ 145 static int cifs_prepare_read(struct netfs_io_s 145 static int cifs_prepare_read(struct netfs_io_subrequest *subreq) 146 { 146 { 147 struct netfs_io_request *rreq = subreq 147 struct netfs_io_request *rreq = subreq->rreq; 148 struct cifs_io_subrequest *rdata = con 148 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq); 149 struct cifs_io_request *req = containe 149 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq); 150 struct TCP_Server_Info *server = req-> 150 struct TCP_Server_Info *server = req->server; 151 struct cifs_sb_info *cifs_sb = CIFS_SB 151 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); 152 size_t size; 152 size_t size; 153 int rc = 0; 153 int rc = 0; 154 154 155 if (!rdata->have_xid) { 155 if (!rdata->have_xid) { 156 rdata->xid = get_xid(); 156 rdata->xid = get_xid(); 157 rdata->have_xid = true; 157 rdata->have_xid = true; 158 } 158 } 159 rdata->server = server; 159 rdata->server = server; 160 160 161 if (cifs_sb->ctx->rsize == 0) 161 if (cifs_sb->ctx->rsize == 0) 162 cifs_sb->ctx->rsize = 162 cifs_sb->ctx->rsize = 163 server->ops->negotiate 163 server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink), 164 164 cifs_sb->ctx); 165 165 166 rc = server->ops->wait_mtu_credits(ser 166 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, 167 &si 167 &size, &rdata->credits); 168 if (rc) 168 if (rc) 169 return rc; 169 return rc; 170 170 171 rreq->io_streams[0].sreq_max_len = siz 171 rreq->io_streams[0].sreq_max_len = size; 172 172 173 rdata->credits.in_flight_check = 1; 173 rdata->credits.in_flight_check = 1; 174 rdata->credits.rreq_debug_id = rreq->d 174 rdata->credits.rreq_debug_id = rreq->debug_id; 175 rdata->credits.rreq_debug_index = subr 175 rdata->credits.rreq_debug_index = subreq->debug_index; 176 176 177 trace_smb3_rw_credits(rdata->rreq->deb 177 trace_smb3_rw_credits(rdata->rreq->debug_id, 178 rdata->subreq.de 178 rdata->subreq.debug_index, 179 rdata->credits.v 179 rdata->credits.value, 180 server->credits, 180 server->credits, server->in_flight, 0, 181 cifs_trace_rw_cr 181 cifs_trace_rw_credits_read_submit); 182 182 183 #ifdef CONFIG_CIFS_SMB_DIRECT 183 #ifdef CONFIG_CIFS_SMB_DIRECT 184 if (server->smbd_conn) 184 if (server->smbd_conn) 185 rreq->io_streams[0].sreq_max_s 185 rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth; 186 #endif 186 #endif 187 return 0; 187 return 0; 188 } 188 } 189 189 190 /* 190 /* 191 * Issue a read operation on behalf of the net 191 * Issue a read operation on behalf of the netfs helper functions. We're asked 192 * to make a read of a certain size at a point 192 * to make a read of a certain size at a point in the file. We are permitted 193 * to only read a portion of that, but as long 193 * to only read a portion of that, but as long as we read something, the netfs 194 * helper will call us again so that we can is 194 * helper will call us again so that we can issue another read. 195 */ 195 */ 196 static void cifs_issue_read(struct netfs_io_su 196 static void cifs_issue_read(struct netfs_io_subrequest *subreq) 197 { 197 { 198 struct netfs_io_request *rreq = subreq 198 struct netfs_io_request *rreq = subreq->rreq; 199 struct cifs_io_subrequest *rdata = con 199 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq); 200 struct cifs_io_request *req = containe 200 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq); 201 struct TCP_Server_Info *server = req-> 201 struct TCP_Server_Info *server = req->server; 202 int rc = 0; 202 int rc = 0; 203 203 204 cifs_dbg(FYI, "%s: op=%08x[%x] mapping 204 cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n", 205 __func__, rreq->debug_id, sub 205 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping, 206 subreq->transferred, subreq-> 206 subreq->transferred, subreq->len); 207 207 208 rc = adjust_credits(server, rdata, cif 208 rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust); 209 if (rc) 209 if (rc) 210 goto failed; 210 goto failed; 211 211 212 if (req->cfile->invalidHandle) { 212 if (req->cfile->invalidHandle) { 213 do { 213 do { 214 rc = cifs_reopen_file( 214 rc = cifs_reopen_file(req->cfile, true); 215 } while (rc == -EAGAIN); 215 } while (rc == -EAGAIN); 216 if (rc) 216 if (rc) 217 goto failed; 217 goto failed; 218 } 218 } 219 219 220 if (subreq->rreq->origin != NETFS_DIO_ 220 if (subreq->rreq->origin != NETFS_DIO_READ) 221 __set_bit(NETFS_SREQ_CLEAR_TAI 221 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); 222 222 223 trace_netfs_sreq(subreq, netfs_sreq_tr 223 trace_netfs_sreq(subreq, netfs_sreq_trace_submit); 224 rc = rdata->server->ops->async_readv(r 224 rc = rdata->server->ops->async_readv(rdata); 225 if (rc) 225 if (rc) 226 goto failed; 226 goto failed; 227 return; 227 return; 228 228 229 failed: 229 failed: 230 netfs_read_subreq_terminated(subreq, r 230 netfs_read_subreq_terminated(subreq, rc, false); 231 } 231 } 232 232 233 /* 233 /* 234 * Writeback calls this when it finds a folio 234 * Writeback calls this when it finds a folio that needs uploading. This isn't 235 * called if writeback only has copy-to-cache 235 * called if writeback only has copy-to-cache to deal with. 236 */ 236 */ 237 static void cifs_begin_writeback(struct netfs_ 237 static void cifs_begin_writeback(struct netfs_io_request *wreq) 238 { 238 { 239 struct cifs_io_request *req = containe 239 struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq); 240 int ret; 240 int ret; 241 241 242 ret = cifs_get_writable_file(CIFS_I(wr 242 ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile); 243 if (ret) { 243 if (ret) { 244 cifs_dbg(VFS, "No writable han 244 cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret); 245 return; 245 return; 246 } 246 } 247 247 248 wreq->io_streams[0].avail = true; 248 wreq->io_streams[0].avail = true; 249 } 249 } 250 250 251 /* 251 /* 252 * Initialise a request. 252 * Initialise a request. 253 */ 253 */ 254 static int cifs_init_request(struct netfs_io_r 254 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file) 255 { 255 { 256 struct cifs_io_request *req = containe 256 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq); 257 struct cifs_sb_info *cifs_sb = CIFS_SB 257 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); 258 struct cifsFileInfo *open_file = NULL; 258 struct cifsFileInfo *open_file = NULL; 259 259 260 rreq->rsize = cifs_sb->ctx->rsize; 260 rreq->rsize = cifs_sb->ctx->rsize; 261 rreq->wsize = cifs_sb->ctx->wsize; 261 rreq->wsize = cifs_sb->ctx->wsize; 262 req->pid = current->tgid; // Ummm... 262 req->pid = current->tgid; // Ummm... This may be a workqueue 263 263 264 if (file) { 264 if (file) { 265 open_file = file->private_data 265 open_file = file->private_data; 266 rreq->netfs_priv = file->priva 266 rreq->netfs_priv = file->private_data; 267 req->cfile = cifsFileInfo_get( 267 req->cfile = cifsFileInfo_get(open_file); 268 req->server = cifs_pick_channe 268 req->server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses); 269 if (cifs_sb->mnt_cifs_flags & 269 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 270 req->pid = req->cfile- 270 req->pid = req->cfile->pid; 271 } else if (rreq->origin != NETFS_WRITE 271 } else if (rreq->origin != NETFS_WRITEBACK) { 272 WARN_ON_ONCE(1); 272 WARN_ON_ONCE(1); 273 return -EIO; 273 return -EIO; 274 } 274 } 275 275 276 return 0; 276 return 0; 277 } 277 } 278 278 279 /* 279 /* 280 * Completion of a request operation. 280 * Completion of a request operation. 281 */ 281 */ 282 static void cifs_rreq_done(struct netfs_io_req 282 static void cifs_rreq_done(struct netfs_io_request *rreq) 283 { 283 { 284 struct timespec64 atime, mtime; 284 struct timespec64 atime, mtime; 285 struct inode *inode = rreq->inode; 285 struct inode *inode = rreq->inode; 286 286 287 /* we do not want atime to be less tha 287 /* we do not want atime to be less than mtime, it broke some apps */ 288 atime = inode_set_atime_to_ts(inode, c 288 atime = inode_set_atime_to_ts(inode, current_time(inode)); 289 mtime = inode_get_mtime(inode); 289 mtime = inode_get_mtime(inode); 290 if (timespec64_compare(&atime, &mtime) 290 if (timespec64_compare(&atime, &mtime)) 291 inode_set_atime_to_ts(inode, i 291 inode_set_atime_to_ts(inode, inode_get_mtime(inode)); 292 } 292 } 293 293 294 static void cifs_free_request(struct netfs_io_ 294 static void cifs_free_request(struct netfs_io_request *rreq) 295 { 295 { 296 struct cifs_io_request *req = containe 296 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq); 297 297 298 if (req->cfile) 298 if (req->cfile) 299 cifsFileInfo_put(req->cfile); 299 cifsFileInfo_put(req->cfile); 300 } 300 } 301 301 302 static void cifs_free_subrequest(struct netfs_ 302 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq) 303 { 303 { 304 struct cifs_io_subrequest *rdata = 304 struct cifs_io_subrequest *rdata = 305 container_of(subreq, struct ci 305 container_of(subreq, struct cifs_io_subrequest, subreq); 306 int rc = subreq->error; 306 int rc = subreq->error; 307 307 308 if (rdata->subreq.source == NETFS_DOWN 308 if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) { 309 #ifdef CONFIG_CIFS_SMB_DIRECT 309 #ifdef CONFIG_CIFS_SMB_DIRECT 310 if (rdata->mr) { 310 if (rdata->mr) { 311 smbd_deregister_mr(rda 311 smbd_deregister_mr(rdata->mr); 312 rdata->mr = NULL; 312 rdata->mr = NULL; 313 } 313 } 314 #endif 314 #endif 315 } 315 } 316 316 317 if (rdata->credits.value != 0) { 317 if (rdata->credits.value != 0) { 318 trace_smb3_rw_credits(rdata->r 318 trace_smb3_rw_credits(rdata->rreq->debug_id, 319 rdata->s 319 rdata->subreq.debug_index, 320 rdata->c 320 rdata->credits.value, 321 rdata->s 321 rdata->server ? rdata->server->credits : 0, 322 rdata->s 322 rdata->server ? rdata->server->in_flight : 0, 323 -rdata-> 323 -rdata->credits.value, 324 cifs_tra 324 cifs_trace_rw_credits_free_subreq); 325 if (rdata->server) 325 if (rdata->server) 326 add_credits_and_wake_i 326 add_credits_and_wake_if(rdata->server, &rdata->credits, 0); 327 else 327 else 328 rdata->credits.value = 328 rdata->credits.value = 0; 329 } 329 } 330 330 331 if (rdata->have_xid) 331 if (rdata->have_xid) 332 free_xid(rdata->xid); 332 free_xid(rdata->xid); 333 } 333 } 334 334 335 const struct netfs_request_ops cifs_req_ops = 335 const struct netfs_request_ops cifs_req_ops = { 336 .request_pool = &cifs_io_req 336 .request_pool = &cifs_io_request_pool, 337 .subrequest_pool = &cifs_io_sub 337 .subrequest_pool = &cifs_io_subrequest_pool, 338 .init_request = cifs_init_re 338 .init_request = cifs_init_request, 339 .free_request = cifs_free_re 339 .free_request = cifs_free_request, 340 .free_subrequest = cifs_free_su 340 .free_subrequest = cifs_free_subrequest, 341 .prepare_read = cifs_prepare 341 .prepare_read = cifs_prepare_read, 342 .issue_read = cifs_issue_r 342 .issue_read = cifs_issue_read, 343 .done = cifs_rreq_do 343 .done = cifs_rreq_done, 344 .begin_writeback = cifs_begin_w 344 .begin_writeback = cifs_begin_writeback, 345 .prepare_write = cifs_prepare 345 .prepare_write = cifs_prepare_write, 346 .issue_write = cifs_issue_w 346 .issue_write = cifs_issue_write, 347 .invalidate_cache = cifs_netfs_i 347 .invalidate_cache = cifs_netfs_invalidate_cache, 348 }; 348 }; 349 349 350 /* 350 /* 351 * Mark as invalid, all open files on tree con 351 * Mark as invalid, all open files on tree connections since they 352 * were closed when session to server was lost 352 * were closed when session to server was lost. 353 */ 353 */ 354 void 354 void 355 cifs_mark_open_files_invalid(struct cifs_tcon 355 cifs_mark_open_files_invalid(struct cifs_tcon *tcon) 356 { 356 { 357 struct cifsFileInfo *open_file = NULL; 357 struct cifsFileInfo *open_file = NULL; 358 struct list_head *tmp; 358 struct list_head *tmp; 359 struct list_head *tmp1; 359 struct list_head *tmp1; 360 360 361 /* only send once per connect */ 361 /* only send once per connect */ 362 spin_lock(&tcon->tc_lock); 362 spin_lock(&tcon->tc_lock); 363 if (tcon->need_reconnect) 363 if (tcon->need_reconnect) 364 tcon->status = TID_NEED_RECON; 364 tcon->status = TID_NEED_RECON; 365 365 366 if (tcon->status != TID_NEED_RECON) { 366 if (tcon->status != TID_NEED_RECON) { 367 spin_unlock(&tcon->tc_lock); 367 spin_unlock(&tcon->tc_lock); 368 return; 368 return; 369 } 369 } 370 tcon->status = TID_IN_FILES_INVALIDATE 370 tcon->status = TID_IN_FILES_INVALIDATE; 371 spin_unlock(&tcon->tc_lock); 371 spin_unlock(&tcon->tc_lock); 372 372 373 /* list all files open on tree connect 373 /* list all files open on tree connection and mark them invalid */ 374 spin_lock(&tcon->open_file_lock); 374 spin_lock(&tcon->open_file_lock); 375 list_for_each_safe(tmp, tmp1, &tcon->o 375 list_for_each_safe(tmp, tmp1, &tcon->openFileList) { 376 open_file = list_entry(tmp, st 376 open_file = list_entry(tmp, struct cifsFileInfo, tlist); 377 open_file->invalidHandle = tru 377 open_file->invalidHandle = true; 378 open_file->oplock_break_cancel 378 open_file->oplock_break_cancelled = true; 379 } 379 } 380 spin_unlock(&tcon->open_file_lock); 380 spin_unlock(&tcon->open_file_lock); 381 381 382 invalidate_all_cached_dirs(tcon); 382 invalidate_all_cached_dirs(tcon); 383 spin_lock(&tcon->tc_lock); 383 spin_lock(&tcon->tc_lock); 384 if (tcon->status == TID_IN_FILES_INVAL 384 if (tcon->status == TID_IN_FILES_INVALIDATE) 385 tcon->status = TID_NEED_TCON; 385 tcon->status = TID_NEED_TCON; 386 spin_unlock(&tcon->tc_lock); 386 spin_unlock(&tcon->tc_lock); 387 387 388 /* 388 /* 389 * BB Add call to invalidate_inodes(sb 389 * BB Add call to invalidate_inodes(sb) for all superblocks mounted 390 * to this tcon. 390 * to this tcon. 391 */ 391 */ 392 } 392 } 393 393 394 static inline int cifs_convert_flags(unsigned 394 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache) 395 { 395 { 396 if ((flags & O_ACCMODE) == O_RDONLY) 396 if ((flags & O_ACCMODE) == O_RDONLY) 397 return GENERIC_READ; 397 return GENERIC_READ; 398 else if ((flags & O_ACCMODE) == O_WRON 398 else if ((flags & O_ACCMODE) == O_WRONLY) 399 return rdwr_for_fscache == 1 ? 399 return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE; 400 else if ((flags & O_ACCMODE) == O_RDWR 400 else if ((flags & O_ACCMODE) == O_RDWR) { 401 /* GENERIC_ALL is too much per 401 /* GENERIC_ALL is too much permission to request 402 can cause unnecessary acces 402 can cause unnecessary access denied on create */ 403 /* return GENERIC_ALL; */ 403 /* return GENERIC_ALL; */ 404 return (GENERIC_READ | GENERIC 404 return (GENERIC_READ | GENERIC_WRITE); 405 } 405 } 406 406 407 return (READ_CONTROL | FILE_WRITE_ATTR 407 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES | 408 FILE_WRITE_EA | FILE_APPEND_DA 408 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA | 409 FILE_READ_DATA); 409 FILE_READ_DATA); 410 } 410 } 411 411 412 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 412 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 413 static u32 cifs_posix_convert_flags(unsigned i 413 static u32 cifs_posix_convert_flags(unsigned int flags) 414 { 414 { 415 u32 posix_flags = 0; 415 u32 posix_flags = 0; 416 416 417 if ((flags & O_ACCMODE) == O_RDONLY) 417 if ((flags & O_ACCMODE) == O_RDONLY) 418 posix_flags = SMB_O_RDONLY; 418 posix_flags = SMB_O_RDONLY; 419 else if ((flags & O_ACCMODE) == O_WRON 419 else if ((flags & O_ACCMODE) == O_WRONLY) 420 posix_flags = SMB_O_WRONLY; 420 posix_flags = SMB_O_WRONLY; 421 else if ((flags & O_ACCMODE) == O_RDWR 421 else if ((flags & O_ACCMODE) == O_RDWR) 422 posix_flags = SMB_O_RDWR; 422 posix_flags = SMB_O_RDWR; 423 423 424 if (flags & O_CREAT) { 424 if (flags & O_CREAT) { 425 posix_flags |= SMB_O_CREAT; 425 posix_flags |= SMB_O_CREAT; 426 if (flags & O_EXCL) 426 if (flags & O_EXCL) 427 posix_flags |= SMB_O_E 427 posix_flags |= SMB_O_EXCL; 428 } else if (flags & O_EXCL) 428 } else if (flags & O_EXCL) 429 cifs_dbg(FYI, "Application %s 429 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n", 430 current->comm, curren 430 current->comm, current->tgid); 431 431 432 if (flags & O_TRUNC) 432 if (flags & O_TRUNC) 433 posix_flags |= SMB_O_TRUNC; 433 posix_flags |= SMB_O_TRUNC; 434 /* be safe and imply O_SYNC for O_DSYN 434 /* be safe and imply O_SYNC for O_DSYNC */ 435 if (flags & O_DSYNC) 435 if (flags & O_DSYNC) 436 posix_flags |= SMB_O_SYNC; 436 posix_flags |= SMB_O_SYNC; 437 if (flags & O_DIRECTORY) 437 if (flags & O_DIRECTORY) 438 posix_flags |= SMB_O_DIRECTORY 438 posix_flags |= SMB_O_DIRECTORY; 439 if (flags & O_NOFOLLOW) 439 if (flags & O_NOFOLLOW) 440 posix_flags |= SMB_O_NOFOLLOW; 440 posix_flags |= SMB_O_NOFOLLOW; 441 if (flags & O_DIRECT) 441 if (flags & O_DIRECT) 442 posix_flags |= SMB_O_DIRECT; 442 posix_flags |= SMB_O_DIRECT; 443 443 444 return posix_flags; 444 return posix_flags; 445 } 445 } 446 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 446 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 447 447 448 static inline int cifs_get_disposition(unsigne 448 static inline int cifs_get_disposition(unsigned int flags) 449 { 449 { 450 if ((flags & (O_CREAT | O_EXCL)) == (O 450 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) 451 return FILE_CREATE; 451 return FILE_CREATE; 452 else if ((flags & (O_CREAT | O_TRUNC)) 452 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC)) 453 return FILE_OVERWRITE_IF; 453 return FILE_OVERWRITE_IF; 454 else if ((flags & O_CREAT) == O_CREAT) 454 else if ((flags & O_CREAT) == O_CREAT) 455 return FILE_OPEN_IF; 455 return FILE_OPEN_IF; 456 else if ((flags & O_TRUNC) == O_TRUNC) 456 else if ((flags & O_TRUNC) == O_TRUNC) 457 return FILE_OVERWRITE; 457 return FILE_OVERWRITE; 458 else 458 else 459 return FILE_OPEN; 459 return FILE_OPEN; 460 } 460 } 461 461 462 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 462 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 463 int cifs_posix_open(const char *full_path, str 463 int cifs_posix_open(const char *full_path, struct inode **pinode, 464 struct super_block *sb 464 struct super_block *sb, int mode, unsigned int f_flags, 465 __u32 *poplock, __u16 465 __u32 *poplock, __u16 *pnetfid, unsigned int xid) 466 { 466 { 467 int rc; 467 int rc; 468 FILE_UNIX_BASIC_INFO *presp_data; 468 FILE_UNIX_BASIC_INFO *presp_data; 469 __u32 posix_flags = 0; 469 __u32 posix_flags = 0; 470 struct cifs_sb_info *cifs_sb = CIFS_SB 470 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 471 struct cifs_fattr fattr; 471 struct cifs_fattr fattr; 472 struct tcon_link *tlink; 472 struct tcon_link *tlink; 473 struct cifs_tcon *tcon; 473 struct cifs_tcon *tcon; 474 474 475 cifs_dbg(FYI, "posix open %s\n", full_ 475 cifs_dbg(FYI, "posix open %s\n", full_path); 476 476 477 presp_data = kzalloc(sizeof(FILE_UNIX_ 477 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL); 478 if (presp_data == NULL) 478 if (presp_data == NULL) 479 return -ENOMEM; 479 return -ENOMEM; 480 480 481 tlink = cifs_sb_tlink(cifs_sb); 481 tlink = cifs_sb_tlink(cifs_sb); 482 if (IS_ERR(tlink)) { 482 if (IS_ERR(tlink)) { 483 rc = PTR_ERR(tlink); 483 rc = PTR_ERR(tlink); 484 goto posix_open_ret; 484 goto posix_open_ret; 485 } 485 } 486 486 487 tcon = tlink_tcon(tlink); 487 tcon = tlink_tcon(tlink); 488 mode &= ~current_umask(); 488 mode &= ~current_umask(); 489 489 490 posix_flags = cifs_posix_convert_flags 490 posix_flags = cifs_posix_convert_flags(f_flags); 491 rc = CIFSPOSIXCreate(xid, tcon, posix_ 491 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data, 492 poplock, full_pat 492 poplock, full_path, cifs_sb->local_nls, 493 cifs_remap(cifs_s 493 cifs_remap(cifs_sb)); 494 cifs_put_tlink(tlink); 494 cifs_put_tlink(tlink); 495 495 496 if (rc) 496 if (rc) 497 goto posix_open_ret; 497 goto posix_open_ret; 498 498 499 if (presp_data->Type == cpu_to_le32(-1 499 if (presp_data->Type == cpu_to_le32(-1)) 500 goto posix_open_ret; /* open o 500 goto posix_open_ret; /* open ok, caller does qpathinfo */ 501 501 502 if (!pinode) 502 if (!pinode) 503 goto posix_open_ret; /* caller 503 goto posix_open_ret; /* caller does not need info */ 504 504 505 cifs_unix_basic_to_fattr(&fattr, presp 505 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb); 506 506 507 /* get new inode and set it up */ 507 /* get new inode and set it up */ 508 if (*pinode == NULL) { 508 if (*pinode == NULL) { 509 cifs_fill_uniqueid(sb, &fattr) 509 cifs_fill_uniqueid(sb, &fattr); 510 *pinode = cifs_iget(sb, &fattr 510 *pinode = cifs_iget(sb, &fattr); 511 if (!*pinode) { 511 if (!*pinode) { 512 rc = -ENOMEM; 512 rc = -ENOMEM; 513 goto posix_open_ret; 513 goto posix_open_ret; 514 } 514 } 515 } else { 515 } else { 516 cifs_revalidate_mapping(*pinod 516 cifs_revalidate_mapping(*pinode); 517 rc = cifs_fattr_to_inode(*pino 517 rc = cifs_fattr_to_inode(*pinode, &fattr, false); 518 } 518 } 519 519 520 posix_open_ret: 520 posix_open_ret: 521 kfree(presp_data); 521 kfree(presp_data); 522 return rc; 522 return rc; 523 } 523 } 524 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 524 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 525 525 526 static int cifs_nt_open(const char *full_path, 526 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, 527 struct cifs_tcon *tcon 527 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock, 528 struct cifs_fid *fid, 528 struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf) 529 { 529 { 530 int rc; 530 int rc; 531 int desired_access; 531 int desired_access; 532 int disposition; 532 int disposition; 533 int create_options = CREATE_NOT_DIR; 533 int create_options = CREATE_NOT_DIR; 534 struct TCP_Server_Info *server = tcon- 534 struct TCP_Server_Info *server = tcon->ses->server; 535 struct cifs_open_parms oparms; 535 struct cifs_open_parms oparms; 536 int rdwr_for_fscache = 0; 536 int rdwr_for_fscache = 0; 537 537 538 if (!server->ops->open) 538 if (!server->ops->open) 539 return -ENOSYS; 539 return -ENOSYS; 540 540 541 /* If we're caching, we need to be abl 541 /* If we're caching, we need to be able to fill in around partial writes. */ 542 if (cifs_fscache_enabled(inode) && (f_ 542 if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY) 543 rdwr_for_fscache = 1; 543 rdwr_for_fscache = 1; 544 544 545 desired_access = cifs_convert_flags(f_ 545 desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache); 546 546 547 /********************************************* 547 /********************************************************************* 548 * open flag mapping table: 548 * open flag mapping table: 549 * 549 * 550 * POSIX Flag CIFS Disposition 550 * POSIX Flag CIFS Disposition 551 * ---------- ---------------- 551 * ---------- ---------------- 552 * O_CREAT FILE_OPEN_IF 552 * O_CREAT FILE_OPEN_IF 553 * O_CREAT | O_EXCL FILE_CREATE 553 * O_CREAT | O_EXCL FILE_CREATE 554 * O_CREAT | O_TRUNC FILE_OVERWRITE_I 554 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF 555 * O_TRUNC FILE_OVERWRITE 555 * O_TRUNC FILE_OVERWRITE 556 * none of the above FILE_OPEN 556 * none of the above FILE_OPEN 557 * 557 * 558 * Note that there is not a direct match 558 * Note that there is not a direct match between disposition 559 * FILE_SUPERSEDE (ie create whether or n 559 * FILE_SUPERSEDE (ie create whether or not file exists although 560 * O_CREAT | O_TRUNC is similar but trunc 560 * O_CREAT | O_TRUNC is similar but truncates the existing 561 * file rather than creating a new file a 561 * file rather than creating a new file as FILE_SUPERSEDE does 562 * (which uses the attributes / metadata 562 * (which uses the attributes / metadata passed in on open call) 563 *? 563 *? 564 *? O_SYNC is a reasonable match to CIFS writ 564 *? O_SYNC is a reasonable match to CIFS writethrough flag 565 *? and the read write flags match reasonably 565 *? and the read write flags match reasonably. O_LARGEFILE 566 *? is irrelevant because largefile support i 566 *? is irrelevant because largefile support is always used 567 *? by this client. Flags O_APPEND, O_DIRECT, 567 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY, 568 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need 568 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation 569 ********************************************* 569 *********************************************************************/ 570 570 571 disposition = cifs_get_disposition(f_f 571 disposition = cifs_get_disposition(f_flags); 572 572 573 /* BB pass O_SYNC flag through on file 573 /* BB pass O_SYNC flag through on file attributes .. BB */ 574 574 575 /* O_SYNC also has bit for O_DSYNC so 575 /* O_SYNC also has bit for O_DSYNC so following check picks up either */ 576 if (f_flags & O_SYNC) 576 if (f_flags & O_SYNC) 577 create_options |= CREATE_WRITE 577 create_options |= CREATE_WRITE_THROUGH; 578 578 579 if (f_flags & O_DIRECT) 579 if (f_flags & O_DIRECT) 580 create_options |= CREATE_NO_BU 580 create_options |= CREATE_NO_BUFFER; 581 581 582 retry_open: 582 retry_open: 583 oparms = (struct cifs_open_parms) { 583 oparms = (struct cifs_open_parms) { 584 .tcon = tcon, 584 .tcon = tcon, 585 .cifs_sb = cifs_sb, 585 .cifs_sb = cifs_sb, 586 .desired_access = desired_acce 586 .desired_access = desired_access, 587 .create_options = cifs_create_ 587 .create_options = cifs_create_options(cifs_sb, create_options), 588 .disposition = disposition, 588 .disposition = disposition, 589 .path = full_path, 589 .path = full_path, 590 .fid = fid, 590 .fid = fid, 591 }; 591 }; 592 592 593 rc = server->ops->open(xid, &oparms, o 593 rc = server->ops->open(xid, &oparms, oplock, buf); 594 if (rc) { 594 if (rc) { 595 if (rc == -EACCES && rdwr_for_ 595 if (rc == -EACCES && rdwr_for_fscache == 1) { 596 desired_access = cifs_ 596 desired_access = cifs_convert_flags(f_flags, 0); 597 rdwr_for_fscache = 2; 597 rdwr_for_fscache = 2; 598 goto retry_open; 598 goto retry_open; 599 } 599 } 600 return rc; 600 return rc; 601 } 601 } 602 if (rdwr_for_fscache == 2) 602 if (rdwr_for_fscache == 2) 603 cifs_invalidate_cache(inode, F 603 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE); 604 604 605 /* TODO: Add support for calling posix 605 /* TODO: Add support for calling posix query info but with passing in fid */ 606 if (tcon->unix_ext) 606 if (tcon->unix_ext) 607 rc = cifs_get_inode_info_unix( 607 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, 608 608 xid); 609 else 609 else 610 rc = cifs_get_inode_info(&inod 610 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, 611 xid, 611 xid, fid); 612 612 613 if (rc) { 613 if (rc) { 614 server->ops->close(xid, tcon, 614 server->ops->close(xid, tcon, fid); 615 if (rc == -ESTALE) 615 if (rc == -ESTALE) 616 rc = -EOPENSTALE; 616 rc = -EOPENSTALE; 617 } 617 } 618 618 619 return rc; 619 return rc; 620 } 620 } 621 621 622 static bool 622 static bool 623 cifs_has_mand_locks(struct cifsInodeInfo *cino 623 cifs_has_mand_locks(struct cifsInodeInfo *cinode) 624 { 624 { 625 struct cifs_fid_locks *cur; 625 struct cifs_fid_locks *cur; 626 bool has_locks = false; 626 bool has_locks = false; 627 627 628 down_read(&cinode->lock_sem); 628 down_read(&cinode->lock_sem); 629 list_for_each_entry(cur, &cinode->llis 629 list_for_each_entry(cur, &cinode->llist, llist) { 630 if (!list_empty(&cur->locks)) 630 if (!list_empty(&cur->locks)) { 631 has_locks = true; 631 has_locks = true; 632 break; 632 break; 633 } 633 } 634 } 634 } 635 up_read(&cinode->lock_sem); 635 up_read(&cinode->lock_sem); 636 return has_locks; 636 return has_locks; 637 } 637 } 638 638 639 void 639 void 640 cifs_down_write(struct rw_semaphore *sem) 640 cifs_down_write(struct rw_semaphore *sem) 641 { 641 { 642 while (!down_write_trylock(sem)) 642 while (!down_write_trylock(sem)) 643 msleep(10); 643 msleep(10); 644 } 644 } 645 645 646 static void cifsFileInfo_put_work(struct work_ 646 static void cifsFileInfo_put_work(struct work_struct *work); 647 void serverclose_work(struct work_struct *work 647 void serverclose_work(struct work_struct *work); 648 648 649 struct cifsFileInfo *cifs_new_fileinfo(struct 649 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, 650 struct 650 struct tcon_link *tlink, __u32 oplock, 651 const c 651 const char *symlink_target) 652 { 652 { 653 struct dentry *dentry = file_dentry(fi 653 struct dentry *dentry = file_dentry(file); 654 struct inode *inode = d_inode(dentry); 654 struct inode *inode = d_inode(dentry); 655 struct cifsInodeInfo *cinode = CIFS_I( 655 struct cifsInodeInfo *cinode = CIFS_I(inode); 656 struct cifsFileInfo *cfile; 656 struct cifsFileInfo *cfile; 657 struct cifs_fid_locks *fdlocks; 657 struct cifs_fid_locks *fdlocks; 658 struct cifs_tcon *tcon = tlink_tcon(tl 658 struct cifs_tcon *tcon = tlink_tcon(tlink); 659 struct TCP_Server_Info *server = tcon- 659 struct TCP_Server_Info *server = tcon->ses->server; 660 660 661 cfile = kzalloc(sizeof(struct cifsFile 661 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); 662 if (cfile == NULL) 662 if (cfile == NULL) 663 return cfile; 663 return cfile; 664 664 665 fdlocks = kzalloc(sizeof(struct cifs_f 665 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL); 666 if (!fdlocks) { 666 if (!fdlocks) { 667 kfree(cfile); 667 kfree(cfile); 668 return NULL; 668 return NULL; 669 } 669 } 670 670 671 if (symlink_target) { 671 if (symlink_target) { 672 cfile->symlink_target = kstrdu 672 cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL); 673 if (!cfile->symlink_target) { 673 if (!cfile->symlink_target) { 674 kfree(fdlocks); 674 kfree(fdlocks); 675 kfree(cfile); 675 kfree(cfile); 676 return NULL; 676 return NULL; 677 } 677 } 678 } 678 } 679 679 680 INIT_LIST_HEAD(&fdlocks->locks); 680 INIT_LIST_HEAD(&fdlocks->locks); 681 fdlocks->cfile = cfile; 681 fdlocks->cfile = cfile; 682 cfile->llist = fdlocks; 682 cfile->llist = fdlocks; 683 683 684 cfile->count = 1; 684 cfile->count = 1; 685 cfile->pid = current->tgid; 685 cfile->pid = current->tgid; 686 cfile->uid = current_fsuid(); 686 cfile->uid = current_fsuid(); 687 cfile->dentry = dget(dentry); 687 cfile->dentry = dget(dentry); 688 cfile->f_flags = file->f_flags; 688 cfile->f_flags = file->f_flags; 689 cfile->invalidHandle = false; 689 cfile->invalidHandle = false; 690 cfile->deferred_close_scheduled = fals 690 cfile->deferred_close_scheduled = false; 691 cfile->tlink = cifs_get_tlink(tlink); 691 cfile->tlink = cifs_get_tlink(tlink); 692 INIT_WORK(&cfile->oplock_break, cifs_o 692 INIT_WORK(&cfile->oplock_break, cifs_oplock_break); 693 INIT_WORK(&cfile->put, cifsFileInfo_pu 693 INIT_WORK(&cfile->put, cifsFileInfo_put_work); 694 INIT_WORK(&cfile->serverclose, serverc 694 INIT_WORK(&cfile->serverclose, serverclose_work); 695 INIT_DELAYED_WORK(&cfile->deferred, sm 695 INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close); 696 mutex_init(&cfile->fh_mutex); 696 mutex_init(&cfile->fh_mutex); 697 spin_lock_init(&cfile->file_info_lock) 697 spin_lock_init(&cfile->file_info_lock); 698 698 699 cifs_sb_active(inode->i_sb); 699 cifs_sb_active(inode->i_sb); 700 700 701 /* 701 /* 702 * If the server returned a read oploc 702 * If the server returned a read oplock and we have mandatory brlocks, 703 * set oplock level to None. 703 * set oplock level to None. 704 */ 704 */ 705 if (server->ops->is_read_op(oplock) && 705 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) { 706 cifs_dbg(FYI, "Reset oplock va 706 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n"); 707 oplock = 0; 707 oplock = 0; 708 } 708 } 709 709 710 cifs_down_write(&cinode->lock_sem); 710 cifs_down_write(&cinode->lock_sem); 711 list_add(&fdlocks->llist, &cinode->lli 711 list_add(&fdlocks->llist, &cinode->llist); 712 up_write(&cinode->lock_sem); 712 up_write(&cinode->lock_sem); 713 713 714 spin_lock(&tcon->open_file_lock); 714 spin_lock(&tcon->open_file_lock); 715 if (fid->pending_open->oplock != CIFS_ 715 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock) 716 oplock = fid->pending_open->op 716 oplock = fid->pending_open->oplock; 717 list_del(&fid->pending_open->olist); 717 list_del(&fid->pending_open->olist); 718 718 719 fid->purge_cache = false; 719 fid->purge_cache = false; 720 server->ops->set_fid(cfile, fid, oploc 720 server->ops->set_fid(cfile, fid, oplock); 721 721 722 list_add(&cfile->tlist, &tcon->openFil 722 list_add(&cfile->tlist, &tcon->openFileList); 723 atomic_inc(&tcon->num_local_opens); 723 atomic_inc(&tcon->num_local_opens); 724 724 725 /* if readable file instance put first 725 /* if readable file instance put first in list*/ 726 spin_lock(&cinode->open_file_lock); 726 spin_lock(&cinode->open_file_lock); 727 if (file->f_mode & FMODE_READ) 727 if (file->f_mode & FMODE_READ) 728 list_add(&cfile->flist, &cinod 728 list_add(&cfile->flist, &cinode->openFileList); 729 else 729 else 730 list_add_tail(&cfile->flist, & 730 list_add_tail(&cfile->flist, &cinode->openFileList); 731 spin_unlock(&cinode->open_file_lock); 731 spin_unlock(&cinode->open_file_lock); 732 spin_unlock(&tcon->open_file_lock); 732 spin_unlock(&tcon->open_file_lock); 733 733 734 if (fid->purge_cache) 734 if (fid->purge_cache) 735 cifs_zap_mapping(inode); 735 cifs_zap_mapping(inode); 736 736 737 file->private_data = cfile; 737 file->private_data = cfile; 738 return cfile; 738 return cfile; 739 } 739 } 740 740 741 struct cifsFileInfo * 741 struct cifsFileInfo * 742 cifsFileInfo_get(struct cifsFileInfo *cifs_fil 742 cifsFileInfo_get(struct cifsFileInfo *cifs_file) 743 { 743 { 744 spin_lock(&cifs_file->file_info_lock); 744 spin_lock(&cifs_file->file_info_lock); 745 cifsFileInfo_get_locked(cifs_file); 745 cifsFileInfo_get_locked(cifs_file); 746 spin_unlock(&cifs_file->file_info_lock 746 spin_unlock(&cifs_file->file_info_lock); 747 return cifs_file; 747 return cifs_file; 748 } 748 } 749 749 750 static void cifsFileInfo_put_final(struct cifs 750 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file) 751 { 751 { 752 struct inode *inode = d_inode(cifs_fil 752 struct inode *inode = d_inode(cifs_file->dentry); 753 struct cifsInodeInfo *cifsi = CIFS_I(i 753 struct cifsInodeInfo *cifsi = CIFS_I(inode); 754 struct cifsLockInfo *li, *tmp; 754 struct cifsLockInfo *li, *tmp; 755 struct super_block *sb = inode->i_sb; 755 struct super_block *sb = inode->i_sb; 756 756 757 /* 757 /* 758 * Delete any outstanding lock records 758 * Delete any outstanding lock records. We'll lose them when the file 759 * is closed anyway. 759 * is closed anyway. 760 */ 760 */ 761 cifs_down_write(&cifsi->lock_sem); 761 cifs_down_write(&cifsi->lock_sem); 762 list_for_each_entry_safe(li, tmp, &cif 762 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) { 763 list_del(&li->llist); 763 list_del(&li->llist); 764 cifs_del_lock_waiters(li); 764 cifs_del_lock_waiters(li); 765 kfree(li); 765 kfree(li); 766 } 766 } 767 list_del(&cifs_file->llist->llist); 767 list_del(&cifs_file->llist->llist); 768 kfree(cifs_file->llist); 768 kfree(cifs_file->llist); 769 up_write(&cifsi->lock_sem); 769 up_write(&cifsi->lock_sem); 770 770 771 cifs_put_tlink(cifs_file->tlink); 771 cifs_put_tlink(cifs_file->tlink); 772 dput(cifs_file->dentry); 772 dput(cifs_file->dentry); 773 cifs_sb_deactive(sb); 773 cifs_sb_deactive(sb); 774 kfree(cifs_file->symlink_target); 774 kfree(cifs_file->symlink_target); 775 kfree(cifs_file); 775 kfree(cifs_file); 776 } 776 } 777 777 778 static void cifsFileInfo_put_work(struct work_ 778 static void cifsFileInfo_put_work(struct work_struct *work) 779 { 779 { 780 struct cifsFileInfo *cifs_file = conta 780 struct cifsFileInfo *cifs_file = container_of(work, 781 struct cifsFileInfo, p 781 struct cifsFileInfo, put); 782 782 783 cifsFileInfo_put_final(cifs_file); 783 cifsFileInfo_put_final(cifs_file); 784 } 784 } 785 785 786 void serverclose_work(struct work_struct *work 786 void serverclose_work(struct work_struct *work) 787 { 787 { 788 struct cifsFileInfo *cifs_file = conta 788 struct cifsFileInfo *cifs_file = container_of(work, 789 struct cifsFileInfo, s 789 struct cifsFileInfo, serverclose); 790 790 791 struct cifs_tcon *tcon = tlink_tcon(ci 791 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); 792 792 793 struct TCP_Server_Info *server = tcon- 793 struct TCP_Server_Info *server = tcon->ses->server; 794 int rc = 0; 794 int rc = 0; 795 int retries = 0; 795 int retries = 0; 796 int MAX_RETRIES = 4; 796 int MAX_RETRIES = 4; 797 797 798 do { 798 do { 799 if (server->ops->close_getattr 799 if (server->ops->close_getattr) 800 rc = server->ops->clos 800 rc = server->ops->close_getattr(0, tcon, cifs_file); 801 else if (server->ops->close) 801 else if (server->ops->close) 802 rc = server->ops->clos 802 rc = server->ops->close(0, tcon, &cifs_file->fid); 803 803 804 if (rc == -EBUSY || rc == -EAG 804 if (rc == -EBUSY || rc == -EAGAIN) { 805 retries++; 805 retries++; 806 msleep(250); 806 msleep(250); 807 } 807 } 808 } while ((rc == -EBUSY || rc == -EAGAI 808 } while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES) 809 ); 809 ); 810 810 811 if (retries == MAX_RETRIES) 811 if (retries == MAX_RETRIES) 812 pr_warn("Serverclose failed %d 812 pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES); 813 813 814 if (cifs_file->offload) 814 if (cifs_file->offload) 815 queue_work(fileinfo_put_wq, &c 815 queue_work(fileinfo_put_wq, &cifs_file->put); 816 else 816 else 817 cifsFileInfo_put_final(cifs_fi 817 cifsFileInfo_put_final(cifs_file); 818 } 818 } 819 819 820 /** 820 /** 821 * cifsFileInfo_put - release a reference of f 821 * cifsFileInfo_put - release a reference of file priv data 822 * 822 * 823 * Always potentially wait for oplock handler. 823 * Always potentially wait for oplock handler. See _cifsFileInfo_put(). 824 * 824 * 825 * @cifs_file: cifs/smb3 specific info (eg re 825 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file 826 */ 826 */ 827 void cifsFileInfo_put(struct cifsFileInfo *cif 827 void cifsFileInfo_put(struct cifsFileInfo *cifs_file) 828 { 828 { 829 _cifsFileInfo_put(cifs_file, true, tru 829 _cifsFileInfo_put(cifs_file, true, true); 830 } 830 } 831 831 832 /** 832 /** 833 * _cifsFileInfo_put - release a reference of 833 * _cifsFileInfo_put - release a reference of file priv data 834 * 834 * 835 * This may involve closing the filehandle @ci 835 * This may involve closing the filehandle @cifs_file out on the 836 * server. Must be called without holding tcon 836 * server. Must be called without holding tcon->open_file_lock, 837 * cinode->open_file_lock and cifs_file->file_ 837 * cinode->open_file_lock and cifs_file->file_info_lock. 838 * 838 * 839 * If @wait_for_oplock_handler is true and we 839 * If @wait_for_oplock_handler is true and we are releasing the last 840 * reference, wait for any running oplock brea 840 * reference, wait for any running oplock break handler of the file 841 * and cancel any pending one. 841 * and cancel any pending one. 842 * 842 * 843 * @cifs_file: cifs/smb3 specific info (eg re 843 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file 844 * @wait_oplock_handler: must be false if call 844 * @wait_oplock_handler: must be false if called from oplock_break_handler 845 * @offload: not offloaded on close and opl 845 * @offload: not offloaded on close and oplock breaks 846 * 846 * 847 */ 847 */ 848 void _cifsFileInfo_put(struct cifsFileInfo *ci 848 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, 849 bool wait_oplock_handle 849 bool wait_oplock_handler, bool offload) 850 { 850 { 851 struct inode *inode = d_inode(cifs_fil 851 struct inode *inode = d_inode(cifs_file->dentry); 852 struct cifs_tcon *tcon = tlink_tcon(ci 852 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); 853 struct TCP_Server_Info *server = tcon- 853 struct TCP_Server_Info *server = tcon->ses->server; 854 struct cifsInodeInfo *cifsi = CIFS_I(i 854 struct cifsInodeInfo *cifsi = CIFS_I(inode); 855 struct super_block *sb = inode->i_sb; 855 struct super_block *sb = inode->i_sb; 856 struct cifs_sb_info *cifs_sb = CIFS_SB 856 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 857 struct cifs_fid fid = {}; 857 struct cifs_fid fid = {}; 858 struct cifs_pending_open open; 858 struct cifs_pending_open open; 859 bool oplock_break_cancelled; 859 bool oplock_break_cancelled; 860 bool serverclose_offloaded = false; 860 bool serverclose_offloaded = false; 861 861 862 spin_lock(&tcon->open_file_lock); 862 spin_lock(&tcon->open_file_lock); 863 spin_lock(&cifsi->open_file_lock); 863 spin_lock(&cifsi->open_file_lock); 864 spin_lock(&cifs_file->file_info_lock); 864 spin_lock(&cifs_file->file_info_lock); 865 865 866 cifs_file->offload = offload; 866 cifs_file->offload = offload; 867 if (--cifs_file->count > 0) { 867 if (--cifs_file->count > 0) { 868 spin_unlock(&cifs_file->file_i 868 spin_unlock(&cifs_file->file_info_lock); 869 spin_unlock(&cifsi->open_file_ 869 spin_unlock(&cifsi->open_file_lock); 870 spin_unlock(&tcon->open_file_l 870 spin_unlock(&tcon->open_file_lock); 871 return; 871 return; 872 } 872 } 873 spin_unlock(&cifs_file->file_info_lock 873 spin_unlock(&cifs_file->file_info_lock); 874 874 875 if (server->ops->get_lease_key) 875 if (server->ops->get_lease_key) 876 server->ops->get_lease_key(ino 876 server->ops->get_lease_key(inode, &fid); 877 877 878 /* store open in pending opens to make 878 /* store open in pending opens to make sure we don't miss lease break */ 879 cifs_add_pending_open_locked(&fid, cif 879 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open); 880 880 881 /* remove it from the lists */ 881 /* remove it from the lists */ 882 list_del(&cifs_file->flist); 882 list_del(&cifs_file->flist); 883 list_del(&cifs_file->tlist); 883 list_del(&cifs_file->tlist); 884 atomic_dec(&tcon->num_local_opens); 884 atomic_dec(&tcon->num_local_opens); 885 885 886 if (list_empty(&cifsi->openFileList)) 886 if (list_empty(&cifsi->openFileList)) { 887 cifs_dbg(FYI, "closing last op 887 cifs_dbg(FYI, "closing last open instance for inode %p\n", 888 d_inode(cifs_file->de 888 d_inode(cifs_file->dentry)); 889 /* 889 /* 890 * In strict cache mode we nee 890 * In strict cache mode we need invalidate mapping on the last 891 * close because it may cause 891 * close because it may cause a error when we open this file 892 * again and get at least leve 892 * again and get at least level II oplock. 893 */ 893 */ 894 if (cifs_sb->mnt_cifs_flags & 894 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) 895 set_bit(CIFS_INO_INVAL 895 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags); 896 cifs_set_oplock_level(cifsi, 0 896 cifs_set_oplock_level(cifsi, 0); 897 } 897 } 898 898 899 spin_unlock(&cifsi->open_file_lock); 899 spin_unlock(&cifsi->open_file_lock); 900 spin_unlock(&tcon->open_file_lock); 900 spin_unlock(&tcon->open_file_lock); 901 901 902 oplock_break_cancelled = wait_oplock_h 902 oplock_break_cancelled = wait_oplock_handler ? 903 cancel_work_sync(&cifs_file->o 903 cancel_work_sync(&cifs_file->oplock_break) : false; 904 904 905 if (!tcon->need_reconnect && !cifs_fil 905 if (!tcon->need_reconnect && !cifs_file->invalidHandle) { 906 struct TCP_Server_Info *server 906 struct TCP_Server_Info *server = tcon->ses->server; 907 unsigned int xid; 907 unsigned int xid; 908 int rc = 0; 908 int rc = 0; 909 909 910 xid = get_xid(); 910 xid = get_xid(); 911 if (server->ops->close_getattr 911 if (server->ops->close_getattr) 912 rc = server->ops->clos 912 rc = server->ops->close_getattr(xid, tcon, cifs_file); 913 else if (server->ops->close) 913 else if (server->ops->close) 914 rc = server->ops->clos 914 rc = server->ops->close(xid, tcon, &cifs_file->fid); 915 _free_xid(xid); 915 _free_xid(xid); 916 916 917 if (rc == -EBUSY || rc == -EAG 917 if (rc == -EBUSY || rc == -EAGAIN) { 918 // Server close failed 918 // Server close failed, hence offloading it as an async op 919 queue_work(serverclose 919 queue_work(serverclose_wq, &cifs_file->serverclose); 920 serverclose_offloaded 920 serverclose_offloaded = true; 921 } 921 } 922 } 922 } 923 923 924 if (oplock_break_cancelled) 924 if (oplock_break_cancelled) 925 cifs_done_oplock_break(cifsi); 925 cifs_done_oplock_break(cifsi); 926 926 927 cifs_del_pending_open(&open); 927 cifs_del_pending_open(&open); 928 928 929 // if serverclose has been offloaded t 929 // if serverclose has been offloaded to wq (on failure), it will 930 // handle offloading put as well. If s 930 // handle offloading put as well. If serverclose not offloaded, 931 // we need to handle offloading put he 931 // we need to handle offloading put here. 932 if (!serverclose_offloaded) { 932 if (!serverclose_offloaded) { 933 if (offload) 933 if (offload) 934 queue_work(fileinfo_pu 934 queue_work(fileinfo_put_wq, &cifs_file->put); 935 else 935 else 936 cifsFileInfo_put_final 936 cifsFileInfo_put_final(cifs_file); 937 } 937 } 938 } 938 } 939 939 940 int cifs_open(struct inode *inode, struct file 940 int cifs_open(struct inode *inode, struct file *file) 941 941 942 { 942 { 943 int rc = -EACCES; 943 int rc = -EACCES; 944 unsigned int xid; 944 unsigned int xid; 945 __u32 oplock; 945 __u32 oplock; 946 struct cifs_sb_info *cifs_sb; 946 struct cifs_sb_info *cifs_sb; 947 struct TCP_Server_Info *server; 947 struct TCP_Server_Info *server; 948 struct cifs_tcon *tcon; 948 struct cifs_tcon *tcon; 949 struct tcon_link *tlink; 949 struct tcon_link *tlink; 950 struct cifsFileInfo *cfile = NULL; 950 struct cifsFileInfo *cfile = NULL; 951 void *page; 951 void *page; 952 const char *full_path; 952 const char *full_path; 953 bool posix_open_ok = false; 953 bool posix_open_ok = false; 954 struct cifs_fid fid = {}; 954 struct cifs_fid fid = {}; 955 struct cifs_pending_open open; 955 struct cifs_pending_open open; 956 struct cifs_open_info_data data = {}; 956 struct cifs_open_info_data data = {}; 957 957 958 xid = get_xid(); 958 xid = get_xid(); 959 959 960 cifs_sb = CIFS_SB(inode->i_sb); 960 cifs_sb = CIFS_SB(inode->i_sb); 961 if (unlikely(cifs_forced_shutdown(cifs 961 if (unlikely(cifs_forced_shutdown(cifs_sb))) { 962 free_xid(xid); 962 free_xid(xid); 963 return -EIO; 963 return -EIO; 964 } 964 } 965 965 966 tlink = cifs_sb_tlink(cifs_sb); 966 tlink = cifs_sb_tlink(cifs_sb); 967 if (IS_ERR(tlink)) { 967 if (IS_ERR(tlink)) { 968 free_xid(xid); 968 free_xid(xid); 969 return PTR_ERR(tlink); 969 return PTR_ERR(tlink); 970 } 970 } 971 tcon = tlink_tcon(tlink); 971 tcon = tlink_tcon(tlink); 972 server = tcon->ses->server; 972 server = tcon->ses->server; 973 973 974 page = alloc_dentry_path(); 974 page = alloc_dentry_path(); 975 full_path = build_path_from_dentry(fil 975 full_path = build_path_from_dentry(file_dentry(file), page); 976 if (IS_ERR(full_path)) { 976 if (IS_ERR(full_path)) { 977 rc = PTR_ERR(full_path); 977 rc = PTR_ERR(full_path); 978 goto out; 978 goto out; 979 } 979 } 980 980 981 cifs_dbg(FYI, "inode = 0x%p file flags 981 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n", 982 inode, file->f_flags, full_pa 982 inode, file->f_flags, full_path); 983 983 984 if (file->f_flags & O_DIRECT && 984 if (file->f_flags & O_DIRECT && 985 cifs_sb->mnt_cifs_flags & CIFS_MOU 985 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) { 986 if (cifs_sb->mnt_cifs_flags & 986 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 987 file->f_op = &cifs_fil 987 file->f_op = &cifs_file_direct_nobrl_ops; 988 else 988 else 989 file->f_op = &cifs_fil 989 file->f_op = &cifs_file_direct_ops; 990 } 990 } 991 991 992 /* Get the cached handle as SMB2 close 992 /* Get the cached handle as SMB2 close is deferred */ 993 rc = cifs_get_readable_path(tcon, full 993 rc = cifs_get_readable_path(tcon, full_path, &cfile); 994 if (rc == 0) { 994 if (rc == 0) { 995 if (file->f_flags == cfile->f_ 995 if (file->f_flags == cfile->f_flags) { 996 file->private_data = c 996 file->private_data = cfile; 997 spin_lock(&CIFS_I(inod 997 spin_lock(&CIFS_I(inode)->deferred_lock); 998 cifs_del_deferred_clos 998 cifs_del_deferred_close(cfile); 999 spin_unlock(&CIFS_I(in 999 spin_unlock(&CIFS_I(inode)->deferred_lock); 1000 goto use_cache; 1000 goto use_cache; 1001 } else { 1001 } else { 1002 _cifsFileInfo_put(cfi 1002 _cifsFileInfo_put(cfile, true, false); 1003 } 1003 } 1004 } 1004 } 1005 1005 1006 if (server->oplocks) 1006 if (server->oplocks) 1007 oplock = REQ_OPLOCK; 1007 oplock = REQ_OPLOCK; 1008 else 1008 else 1009 oplock = 0; 1009 oplock = 0; 1010 1010 1011 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1011 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1012 if (!tcon->broken_posix_open && tcon- 1012 if (!tcon->broken_posix_open && tcon->unix_ext && 1013 cap_unix(tcon->ses) && (CIFS_UNIX 1013 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & 1014 le64_to_cpu(t 1014 le64_to_cpu(tcon->fsUnixInfo.Capability))) { 1015 /* can not refresh inode info 1015 /* can not refresh inode info since size could be stale */ 1016 rc = cifs_posix_open(full_pat 1016 rc = cifs_posix_open(full_path, &inode, inode->i_sb, 1017 cifs_sb->ctx- 1017 cifs_sb->ctx->file_mode /* ignored */, 1018 file->f_flags 1018 file->f_flags, &oplock, &fid.netfid, xid); 1019 if (rc == 0) { 1019 if (rc == 0) { 1020 cifs_dbg(FYI, "posix 1020 cifs_dbg(FYI, "posix open succeeded\n"); 1021 posix_open_ok = true; 1021 posix_open_ok = true; 1022 } else if ((rc == -EINVAL) || 1022 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { 1023 if (tcon->ses->server 1023 if (tcon->ses->serverNOS) 1024 cifs_dbg(VFS, 1024 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n", 1025 tcon 1025 tcon->ses->ip_addr, 1026 tcon 1026 tcon->ses->serverNOS); 1027 tcon->broken_posix_op 1027 tcon->broken_posix_open = true; 1028 } else if ((rc != -EIO) && (r 1028 } else if ((rc != -EIO) && (rc != -EREMOTE) && 1029 (rc != -EOPNOTSUPP)) 1029 (rc != -EOPNOTSUPP)) /* path not found or net err */ 1030 goto out; 1030 goto out; 1031 /* 1031 /* 1032 * Else fallthrough to retry 1032 * Else fallthrough to retry open the old way on network i/o 1033 * or DFS errors. 1033 * or DFS errors. 1034 */ 1034 */ 1035 } 1035 } 1036 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY * 1036 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1037 1037 1038 if (server->ops->get_lease_key) 1038 if (server->ops->get_lease_key) 1039 server->ops->get_lease_key(in 1039 server->ops->get_lease_key(inode, &fid); 1040 1040 1041 cifs_add_pending_open(&fid, tlink, &o 1041 cifs_add_pending_open(&fid, tlink, &open); 1042 1042 1043 if (!posix_open_ok) { 1043 if (!posix_open_ok) { 1044 if (server->ops->get_lease_ke 1044 if (server->ops->get_lease_key) 1045 server->ops->get_leas 1045 server->ops->get_lease_key(inode, &fid); 1046 1046 1047 rc = cifs_nt_open(full_path, 1047 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid, 1048 xid, &data) 1048 xid, &data); 1049 if (rc) { 1049 if (rc) { 1050 cifs_del_pending_open 1050 cifs_del_pending_open(&open); 1051 goto out; 1051 goto out; 1052 } 1052 } 1053 } 1053 } 1054 1054 1055 cfile = cifs_new_fileinfo(&fid, file, 1055 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target); 1056 if (cfile == NULL) { 1056 if (cfile == NULL) { 1057 if (server->ops->close) 1057 if (server->ops->close) 1058 server->ops->close(xi 1058 server->ops->close(xid, tcon, &fid); 1059 cifs_del_pending_open(&open); 1059 cifs_del_pending_open(&open); 1060 rc = -ENOMEM; 1060 rc = -ENOMEM; 1061 goto out; 1061 goto out; 1062 } 1062 } 1063 1063 1064 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1064 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1065 if ((oplock & CIFS_CREATE_ACTION) && 1065 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) { 1066 /* 1066 /* 1067 * Time to set mode which we 1067 * Time to set mode which we can not set earlier due to 1068 * problems creating new read 1068 * problems creating new read-only files. 1069 */ 1069 */ 1070 struct cifs_unix_set_info_arg 1070 struct cifs_unix_set_info_args args = { 1071 .mode = inode->i_mo 1071 .mode = inode->i_mode, 1072 .uid = INVALID_UID 1072 .uid = INVALID_UID, /* no change */ 1073 .gid = INVALID_GID 1073 .gid = INVALID_GID, /* no change */ 1074 .ctime = NO_CHANGE_6 1074 .ctime = NO_CHANGE_64, 1075 .atime = NO_CHANGE_6 1075 .atime = NO_CHANGE_64, 1076 .mtime = NO_CHANGE_6 1076 .mtime = NO_CHANGE_64, 1077 .device = 0, 1077 .device = 0, 1078 }; 1078 }; 1079 CIFSSMBUnixSetFileInfo(xid, t 1079 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid, 1080 cfile- 1080 cfile->pid); 1081 } 1081 } 1082 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY * 1082 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1083 1083 1084 use_cache: 1084 use_cache: 1085 fscache_use_cookie(cifs_inode_cookie( 1085 fscache_use_cookie(cifs_inode_cookie(file_inode(file)), 1086 file->f_mode & FMO 1086 file->f_mode & FMODE_WRITE); 1087 if (!(file->f_flags & O_DIRECT)) 1087 if (!(file->f_flags & O_DIRECT)) 1088 goto out; 1088 goto out; 1089 if ((file->f_flags & (O_ACCMODE | O_A 1089 if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY) 1090 goto out; 1090 goto out; 1091 cifs_invalidate_cache(file_inode(file 1091 cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE); 1092 1092 1093 out: 1093 out: 1094 free_dentry_path(page); 1094 free_dentry_path(page); 1095 free_xid(xid); 1095 free_xid(xid); 1096 cifs_put_tlink(tlink); 1096 cifs_put_tlink(tlink); 1097 cifs_free_open_info(&data); 1097 cifs_free_open_info(&data); 1098 return rc; 1098 return rc; 1099 } 1099 } 1100 1100 1101 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1101 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1102 static int cifs_push_posix_locks(struct cifsF 1102 static int cifs_push_posix_locks(struct cifsFileInfo *cfile); 1103 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY * 1103 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1104 1104 1105 /* 1105 /* 1106 * Try to reacquire byte range locks that wer 1106 * Try to reacquire byte range locks that were released when session 1107 * to server was lost. 1107 * to server was lost. 1108 */ 1108 */ 1109 static int 1109 static int 1110 cifs_relock_file(struct cifsFileInfo *cfile) 1110 cifs_relock_file(struct cifsFileInfo *cfile) 1111 { 1111 { 1112 struct cifsInodeInfo *cinode = CIFS_I 1112 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 1113 struct cifs_tcon *tcon = tlink_tcon(c 1113 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 1114 int rc = 0; 1114 int rc = 0; 1115 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1115 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1116 struct cifs_sb_info *cifs_sb = CIFS_S 1116 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); 1117 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY * 1117 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1118 1118 1119 down_read_nested(&cinode->lock_sem, S 1119 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING); 1120 if (cinode->can_cache_brlcks) { 1120 if (cinode->can_cache_brlcks) { 1121 /* can cache locks - no need 1121 /* can cache locks - no need to relock */ 1122 up_read(&cinode->lock_sem); 1122 up_read(&cinode->lock_sem); 1123 return rc; 1123 return rc; 1124 } 1124 } 1125 1125 1126 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1126 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1127 if (cap_unix(tcon->ses) && 1127 if (cap_unix(tcon->ses) && 1128 (CIFS_UNIX_FCNTL_CAP & le64_to_cp 1128 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 1129 ((cifs_sb->mnt_cifs_flags & CIFS_ 1129 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 1130 rc = cifs_push_posix_locks(cf 1130 rc = cifs_push_posix_locks(cfile); 1131 else 1131 else 1132 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY * 1132 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1133 rc = tcon->ses->server->ops-> 1133 rc = tcon->ses->server->ops->push_mand_locks(cfile); 1134 1134 1135 up_read(&cinode->lock_sem); 1135 up_read(&cinode->lock_sem); 1136 return rc; 1136 return rc; 1137 } 1137 } 1138 1138 1139 static int 1139 static int 1140 cifs_reopen_file(struct cifsFileInfo *cfile, 1140 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) 1141 { 1141 { 1142 int rc = -EACCES; 1142 int rc = -EACCES; 1143 unsigned int xid; 1143 unsigned int xid; 1144 __u32 oplock; 1144 __u32 oplock; 1145 struct cifs_sb_info *cifs_sb; 1145 struct cifs_sb_info *cifs_sb; 1146 struct cifs_tcon *tcon; 1146 struct cifs_tcon *tcon; 1147 struct TCP_Server_Info *server; 1147 struct TCP_Server_Info *server; 1148 struct cifsInodeInfo *cinode; 1148 struct cifsInodeInfo *cinode; 1149 struct inode *inode; 1149 struct inode *inode; 1150 void *page; 1150 void *page; 1151 const char *full_path; 1151 const char *full_path; 1152 int desired_access; 1152 int desired_access; 1153 int disposition = FILE_OPEN; 1153 int disposition = FILE_OPEN; 1154 int create_options = CREATE_NOT_DIR; 1154 int create_options = CREATE_NOT_DIR; 1155 struct cifs_open_parms oparms; 1155 struct cifs_open_parms oparms; 1156 int rdwr_for_fscache = 0; 1156 int rdwr_for_fscache = 0; 1157 1157 1158 xid = get_xid(); 1158 xid = get_xid(); 1159 mutex_lock(&cfile->fh_mutex); 1159 mutex_lock(&cfile->fh_mutex); 1160 if (!cfile->invalidHandle) { 1160 if (!cfile->invalidHandle) { 1161 mutex_unlock(&cfile->fh_mutex 1161 mutex_unlock(&cfile->fh_mutex); 1162 free_xid(xid); 1162 free_xid(xid); 1163 return 0; 1163 return 0; 1164 } 1164 } 1165 1165 1166 inode = d_inode(cfile->dentry); 1166 inode = d_inode(cfile->dentry); 1167 cifs_sb = CIFS_SB(inode->i_sb); 1167 cifs_sb = CIFS_SB(inode->i_sb); 1168 tcon = tlink_tcon(cfile->tlink); 1168 tcon = tlink_tcon(cfile->tlink); 1169 server = tcon->ses->server; 1169 server = tcon->ses->server; 1170 1170 1171 /* 1171 /* 1172 * Can not grab rename sem here becau 1172 * Can not grab rename sem here because various ops, including those 1173 * that already have the rename sem c 1173 * that already have the rename sem can end up causing writepage to get 1174 * called and if the server was down 1174 * called and if the server was down that means we end up here, and we 1175 * can never tell if the caller alrea 1175 * can never tell if the caller already has the rename_sem. 1176 */ 1176 */ 1177 page = alloc_dentry_path(); 1177 page = alloc_dentry_path(); 1178 full_path = build_path_from_dentry(cf 1178 full_path = build_path_from_dentry(cfile->dentry, page); 1179 if (IS_ERR(full_path)) { 1179 if (IS_ERR(full_path)) { 1180 mutex_unlock(&cfile->fh_mutex 1180 mutex_unlock(&cfile->fh_mutex); 1181 free_dentry_path(page); 1181 free_dentry_path(page); 1182 free_xid(xid); 1182 free_xid(xid); 1183 return PTR_ERR(full_path); 1183 return PTR_ERR(full_path); 1184 } 1184 } 1185 1185 1186 cifs_dbg(FYI, "inode = 0x%p file flag 1186 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n", 1187 inode, cfile->f_flags, full_ 1187 inode, cfile->f_flags, full_path); 1188 1188 1189 if (tcon->ses->server->oplocks) 1189 if (tcon->ses->server->oplocks) 1190 oplock = REQ_OPLOCK; 1190 oplock = REQ_OPLOCK; 1191 else 1191 else 1192 oplock = 0; 1192 oplock = 0; 1193 1193 1194 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1194 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1195 if (tcon->unix_ext && cap_unix(tcon-> 1195 if (tcon->unix_ext && cap_unix(tcon->ses) && 1196 (CIFS_UNIX_POSIX_PATH_OPS_CAP & 1196 (CIFS_UNIX_POSIX_PATH_OPS_CAP & 1197 le64_to_cpu(t 1197 le64_to_cpu(tcon->fsUnixInfo.Capability))) { 1198 /* 1198 /* 1199 * O_CREAT, O_EXCL and O_TRUN 1199 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the 1200 * original open. Must mask t 1200 * original open. Must mask them off for a reopen. 1201 */ 1201 */ 1202 unsigned int oflags = cfile-> 1202 unsigned int oflags = cfile->f_flags & 1203 1203 ~(O_CREAT | O_EXCL | O_TRUNC); 1204 1204 1205 rc = cifs_posix_open(full_pat 1205 rc = cifs_posix_open(full_path, NULL, inode->i_sb, 1206 cifs_sb- 1206 cifs_sb->ctx->file_mode /* ignored */, 1207 oflags, 1207 oflags, &oplock, &cfile->fid.netfid, xid); 1208 if (rc == 0) { 1208 if (rc == 0) { 1209 cifs_dbg(FYI, "posix 1209 cifs_dbg(FYI, "posix reopen succeeded\n"); 1210 oparms.reconnect = tr 1210 oparms.reconnect = true; 1211 goto reopen_success; 1211 goto reopen_success; 1212 } 1212 } 1213 /* 1213 /* 1214 * fallthrough to retry open 1214 * fallthrough to retry open the old way on errors, especially 1215 * in the reconnect path it i 1215 * in the reconnect path it is important to retry hard 1216 */ 1216 */ 1217 } 1217 } 1218 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY * 1218 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1219 1219 1220 /* If we're caching, we need to be ab 1220 /* If we're caching, we need to be able to fill in around partial writes. */ 1221 if (cifs_fscache_enabled(inode) && (c 1221 if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY) 1222 rdwr_for_fscache = 1; 1222 rdwr_for_fscache = 1; 1223 1223 1224 desired_access = cifs_convert_flags(c 1224 desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache); 1225 1225 1226 /* O_SYNC also has bit for O_DSYNC so 1226 /* O_SYNC also has bit for O_DSYNC so following check picks up either */ 1227 if (cfile->f_flags & O_SYNC) 1227 if (cfile->f_flags & O_SYNC) 1228 create_options |= CREATE_WRIT 1228 create_options |= CREATE_WRITE_THROUGH; 1229 1229 1230 if (cfile->f_flags & O_DIRECT) 1230 if (cfile->f_flags & O_DIRECT) 1231 create_options |= CREATE_NO_B 1231 create_options |= CREATE_NO_BUFFER; 1232 1232 1233 if (server->ops->get_lease_key) 1233 if (server->ops->get_lease_key) 1234 server->ops->get_lease_key(in 1234 server->ops->get_lease_key(inode, &cfile->fid); 1235 1235 1236 retry_open: 1236 retry_open: 1237 oparms = (struct cifs_open_parms) { 1237 oparms = (struct cifs_open_parms) { 1238 .tcon = tcon, 1238 .tcon = tcon, 1239 .cifs_sb = cifs_sb, 1239 .cifs_sb = cifs_sb, 1240 .desired_access = desired_acc 1240 .desired_access = desired_access, 1241 .create_options = cifs_create 1241 .create_options = cifs_create_options(cifs_sb, create_options), 1242 .disposition = disposition, 1242 .disposition = disposition, 1243 .path = full_path, 1243 .path = full_path, 1244 .fid = &cfile->fid, 1244 .fid = &cfile->fid, 1245 .reconnect = true, 1245 .reconnect = true, 1246 }; 1246 }; 1247 1247 1248 /* 1248 /* 1249 * Can not refresh inode by passing i 1249 * Can not refresh inode by passing in file_info buf to be returned by 1250 * ops->open and then calling get_ino 1250 * ops->open and then calling get_inode_info with returned buf since 1251 * file might have write behind data 1251 * file might have write behind data that needs to be flushed and server 1252 * version of file size can be stale. 1252 * version of file size can be stale. If we knew for sure that inode was 1253 * not dirty locally we could do this 1253 * not dirty locally we could do this. 1254 */ 1254 */ 1255 rc = server->ops->open(xid, &oparms, 1255 rc = server->ops->open(xid, &oparms, &oplock, NULL); 1256 if (rc == -ENOENT && oparms.reconnect 1256 if (rc == -ENOENT && oparms.reconnect == false) { 1257 /* durable handle timeout is 1257 /* durable handle timeout is expired - open the file again */ 1258 rc = server->ops->open(xid, & 1258 rc = server->ops->open(xid, &oparms, &oplock, NULL); 1259 /* indicate that we need to r 1259 /* indicate that we need to relock the file */ 1260 oparms.reconnect = true; 1260 oparms.reconnect = true; 1261 } 1261 } 1262 if (rc == -EACCES && rdwr_for_fscache 1262 if (rc == -EACCES && rdwr_for_fscache == 1) { 1263 desired_access = cifs_convert 1263 desired_access = cifs_convert_flags(cfile->f_flags, 0); 1264 rdwr_for_fscache = 2; 1264 rdwr_for_fscache = 2; 1265 goto retry_open; 1265 goto retry_open; 1266 } 1266 } 1267 1267 1268 if (rc) { 1268 if (rc) { 1269 mutex_unlock(&cfile->fh_mutex 1269 mutex_unlock(&cfile->fh_mutex); 1270 cifs_dbg(FYI, "cifs_reopen re 1270 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc); 1271 cifs_dbg(FYI, "oplock: %d\n", 1271 cifs_dbg(FYI, "oplock: %d\n", oplock); 1272 goto reopen_error_exit; 1272 goto reopen_error_exit; 1273 } 1273 } 1274 1274 1275 if (rdwr_for_fscache == 2) 1275 if (rdwr_for_fscache == 2) 1276 cifs_invalidate_cache(inode, 1276 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE); 1277 1277 1278 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1278 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1279 reopen_success: 1279 reopen_success: 1280 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY * 1280 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1281 cfile->invalidHandle = false; 1281 cfile->invalidHandle = false; 1282 mutex_unlock(&cfile->fh_mutex); 1282 mutex_unlock(&cfile->fh_mutex); 1283 cinode = CIFS_I(inode); 1283 cinode = CIFS_I(inode); 1284 1284 1285 if (can_flush) { 1285 if (can_flush) { 1286 rc = filemap_write_and_wait(i 1286 rc = filemap_write_and_wait(inode->i_mapping); 1287 if (!is_interrupt_error(rc)) 1287 if (!is_interrupt_error(rc)) 1288 mapping_set_error(ino 1288 mapping_set_error(inode->i_mapping, rc); 1289 1289 1290 if (tcon->posix_extensions) { 1290 if (tcon->posix_extensions) { 1291 rc = smb311_posix_get 1291 rc = smb311_posix_get_inode_info(&inode, full_path, 1292 1292 NULL, inode->i_sb, xid); 1293 } else if (tcon->unix_ext) { 1293 } else if (tcon->unix_ext) { 1294 rc = cifs_get_inode_i 1294 rc = cifs_get_inode_info_unix(&inode, full_path, 1295 1295 inode->i_sb, xid); 1296 } else { 1296 } else { 1297 rc = cifs_get_inode_i 1297 rc = cifs_get_inode_info(&inode, full_path, NULL, 1298 1298 inode->i_sb, xid, NULL); 1299 } 1299 } 1300 } 1300 } 1301 /* 1301 /* 1302 * Else we are writing out data to se 1302 * Else we are writing out data to server already and could deadlock if 1303 * we tried to flush data, and since 1303 * we tried to flush data, and since we do not know if we have data that 1304 * would invalidate the current end o 1304 * would invalidate the current end of file on the server we can not go 1305 * to the server to get the new inode 1305 * to the server to get the new inode info. 1306 */ 1306 */ 1307 1307 1308 /* 1308 /* 1309 * If the server returned a read oplo 1309 * If the server returned a read oplock and we have mandatory brlocks, 1310 * set oplock level to None. 1310 * set oplock level to None. 1311 */ 1311 */ 1312 if (server->ops->is_read_op(oplock) & 1312 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) { 1313 cifs_dbg(FYI, "Reset oplock v 1313 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n"); 1314 oplock = 0; 1314 oplock = 0; 1315 } 1315 } 1316 1316 1317 server->ops->set_fid(cfile, &cfile->f 1317 server->ops->set_fid(cfile, &cfile->fid, oplock); 1318 if (oparms.reconnect) 1318 if (oparms.reconnect) 1319 cifs_relock_file(cfile); 1319 cifs_relock_file(cfile); 1320 1320 1321 reopen_error_exit: 1321 reopen_error_exit: 1322 free_dentry_path(page); 1322 free_dentry_path(page); 1323 free_xid(xid); 1323 free_xid(xid); 1324 return rc; 1324 return rc; 1325 } 1325 } 1326 1326 1327 void smb2_deferred_work_close(struct work_str 1327 void smb2_deferred_work_close(struct work_struct *work) 1328 { 1328 { 1329 struct cifsFileInfo *cfile = containe 1329 struct cifsFileInfo *cfile = container_of(work, 1330 struct cifsFileInfo, 1330 struct cifsFileInfo, deferred.work); 1331 1331 1332 spin_lock(&CIFS_I(d_inode(cfile->dent 1332 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); 1333 cifs_del_deferred_close(cfile); 1333 cifs_del_deferred_close(cfile); 1334 cfile->deferred_close_scheduled = fal 1334 cfile->deferred_close_scheduled = false; 1335 spin_unlock(&CIFS_I(d_inode(cfile->de 1335 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); 1336 _cifsFileInfo_put(cfile, true, false) 1336 _cifsFileInfo_put(cfile, true, false); 1337 } 1337 } 1338 1338 1339 static bool 1339 static bool 1340 smb2_can_defer_close(struct inode *inode, str 1340 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose) 1341 { 1341 { 1342 struct cifs_sb_info *cifs_sb = CIFS_S 1342 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1343 struct cifsInodeInfo *cinode = CIFS_I 1343 struct cifsInodeInfo *cinode = CIFS_I(inode); 1344 1344 1345 return (cifs_sb->ctx->closetimeo && c 1345 return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose && 1346 (cinode->oplock == CI 1346 (cinode->oplock == CIFS_CACHE_RHW_FLG || 1347 cinode->oplock == CI 1347 cinode->oplock == CIFS_CACHE_RH_FLG) && 1348 !test_bit(CIFS_INO_CL 1348 !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags)); 1349 1349 1350 } 1350 } 1351 1351 1352 int cifs_close(struct inode *inode, struct fi 1352 int cifs_close(struct inode *inode, struct file *file) 1353 { 1353 { 1354 struct cifsFileInfo *cfile; 1354 struct cifsFileInfo *cfile; 1355 struct cifsInodeInfo *cinode = CIFS_I 1355 struct cifsInodeInfo *cinode = CIFS_I(inode); 1356 struct cifs_sb_info *cifs_sb = CIFS_S 1356 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1357 struct cifs_deferred_close *dclose; 1357 struct cifs_deferred_close *dclose; 1358 1358 1359 cifs_fscache_unuse_inode_cookie(inode 1359 cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE); 1360 1360 1361 if (file->private_data != NULL) { 1361 if (file->private_data != NULL) { 1362 cfile = file->private_data; 1362 cfile = file->private_data; 1363 file->private_data = NULL; 1363 file->private_data = NULL; 1364 dclose = kmalloc(sizeof(struc 1364 dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL); 1365 if ((cfile->status_file_delet 1365 if ((cfile->status_file_deleted == false) && 1366 (smb2_can_defer_close(ino 1366 (smb2_can_defer_close(inode, dclose))) { 1367 if (test_and_clear_bi 1367 if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) { 1368 inode_set_mti 1368 inode_set_mtime_to_ts(inode, 1369 1369 inode_set_ctime_current(inode)); 1370 } 1370 } 1371 spin_lock(&cinode->de 1371 spin_lock(&cinode->deferred_lock); 1372 cifs_add_deferred_clo 1372 cifs_add_deferred_close(cfile, dclose); 1373 if (cfile->deferred_c 1373 if (cfile->deferred_close_scheduled && 1374 delayed_work_pend 1374 delayed_work_pending(&cfile->deferred)) { 1375 /* 1375 /* 1376 * If there i 1376 * If there is no pending work, mod_delayed_work queues new work. 1377 * So, Increa 1377 * So, Increase the ref count to avoid use-after-free. 1378 */ 1378 */ 1379 if (!mod_dela 1379 if (!mod_delayed_work(deferredclose_wq, 1380 1380 &cfile->deferred, cifs_sb->ctx->closetimeo)) 1381 cifsF 1381 cifsFileInfo_get(cfile); 1382 } else { 1382 } else { 1383 /* Deferred c 1383 /* Deferred close for files */ 1384 queue_delayed 1384 queue_delayed_work(deferredclose_wq, 1385 1385 &cfile->deferred, cifs_sb->ctx->closetimeo); 1386 cfile->deferr 1386 cfile->deferred_close_scheduled = true; 1387 spin_unlock(& 1387 spin_unlock(&cinode->deferred_lock); 1388 return 0; 1388 return 0; 1389 } 1389 } 1390 spin_unlock(&cinode-> 1390 spin_unlock(&cinode->deferred_lock); 1391 _cifsFileInfo_put(cfi 1391 _cifsFileInfo_put(cfile, true, false); 1392 } else { 1392 } else { 1393 _cifsFileInfo_put(cfi 1393 _cifsFileInfo_put(cfile, true, false); 1394 kfree(dclose); 1394 kfree(dclose); 1395 } 1395 } 1396 } 1396 } 1397 1397 1398 /* return code from the ->release op 1398 /* return code from the ->release op is always ignored */ 1399 return 0; 1399 return 0; 1400 } 1400 } 1401 1401 1402 void 1402 void 1403 cifs_reopen_persistent_handles(struct cifs_tc 1403 cifs_reopen_persistent_handles(struct cifs_tcon *tcon) 1404 { 1404 { 1405 struct cifsFileInfo *open_file, *tmp; 1405 struct cifsFileInfo *open_file, *tmp; 1406 LIST_HEAD(tmp_list); 1406 LIST_HEAD(tmp_list); 1407 1407 1408 if (!tcon->use_persistent || !tcon->n 1408 if (!tcon->use_persistent || !tcon->need_reopen_files) 1409 return; 1409 return; 1410 1410 1411 tcon->need_reopen_files = false; 1411 tcon->need_reopen_files = false; 1412 1412 1413 cifs_dbg(FYI, "Reopen persistent hand 1413 cifs_dbg(FYI, "Reopen persistent handles\n"); 1414 1414 1415 /* list all files open on tree connec 1415 /* list all files open on tree connection, reopen resilient handles */ 1416 spin_lock(&tcon->open_file_lock); 1416 spin_lock(&tcon->open_file_lock); 1417 list_for_each_entry(open_file, &tcon- 1417 list_for_each_entry(open_file, &tcon->openFileList, tlist) { 1418 if (!open_file->invalidHandle 1418 if (!open_file->invalidHandle) 1419 continue; 1419 continue; 1420 cifsFileInfo_get(open_file); 1420 cifsFileInfo_get(open_file); 1421 list_add_tail(&open_file->rli 1421 list_add_tail(&open_file->rlist, &tmp_list); 1422 } 1422 } 1423 spin_unlock(&tcon->open_file_lock); 1423 spin_unlock(&tcon->open_file_lock); 1424 1424 1425 list_for_each_entry_safe(open_file, t 1425 list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) { 1426 if (cifs_reopen_file(open_fil 1426 if (cifs_reopen_file(open_file, false /* do not flush */)) 1427 tcon->need_reopen_fil 1427 tcon->need_reopen_files = true; 1428 list_del_init(&open_file->rli 1428 list_del_init(&open_file->rlist); 1429 cifsFileInfo_put(open_file); 1429 cifsFileInfo_put(open_file); 1430 } 1430 } 1431 } 1431 } 1432 1432 1433 int cifs_closedir(struct inode *inode, struct 1433 int cifs_closedir(struct inode *inode, struct file *file) 1434 { 1434 { 1435 int rc = 0; 1435 int rc = 0; 1436 unsigned int xid; 1436 unsigned int xid; 1437 struct cifsFileInfo *cfile = file->pr 1437 struct cifsFileInfo *cfile = file->private_data; 1438 struct cifs_tcon *tcon; 1438 struct cifs_tcon *tcon; 1439 struct TCP_Server_Info *server; 1439 struct TCP_Server_Info *server; 1440 char *buf; 1440 char *buf; 1441 1441 1442 cifs_dbg(FYI, "Closedir inode = 0x%p\ 1442 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode); 1443 1443 1444 if (cfile == NULL) 1444 if (cfile == NULL) 1445 return rc; 1445 return rc; 1446 1446 1447 xid = get_xid(); 1447 xid = get_xid(); 1448 tcon = tlink_tcon(cfile->tlink); 1448 tcon = tlink_tcon(cfile->tlink); 1449 server = tcon->ses->server; 1449 server = tcon->ses->server; 1450 1450 1451 cifs_dbg(FYI, "Freeing private data i 1451 cifs_dbg(FYI, "Freeing private data in close dir\n"); 1452 spin_lock(&cfile->file_info_lock); 1452 spin_lock(&cfile->file_info_lock); 1453 if (server->ops->dir_needs_close(cfil 1453 if (server->ops->dir_needs_close(cfile)) { 1454 cfile->invalidHandle = true; 1454 cfile->invalidHandle = true; 1455 spin_unlock(&cfile->file_info 1455 spin_unlock(&cfile->file_info_lock); 1456 if (server->ops->close_dir) 1456 if (server->ops->close_dir) 1457 rc = server->ops->clo 1457 rc = server->ops->close_dir(xid, tcon, &cfile->fid); 1458 else 1458 else 1459 rc = -ENOSYS; 1459 rc = -ENOSYS; 1460 cifs_dbg(FYI, "Closing uncomp 1460 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc); 1461 /* not much we can do if it f 1461 /* not much we can do if it fails anyway, ignore rc */ 1462 rc = 0; 1462 rc = 0; 1463 } else 1463 } else 1464 spin_unlock(&cfile->file_info 1464 spin_unlock(&cfile->file_info_lock); 1465 1465 1466 buf = cfile->srch_inf.ntwrk_buf_start 1466 buf = cfile->srch_inf.ntwrk_buf_start; 1467 if (buf) { 1467 if (buf) { 1468 cifs_dbg(FYI, "closedir free 1468 cifs_dbg(FYI, "closedir free smb buf in srch struct\n"); 1469 cfile->srch_inf.ntwrk_buf_sta 1469 cfile->srch_inf.ntwrk_buf_start = NULL; 1470 if (cfile->srch_inf.smallBuf) 1470 if (cfile->srch_inf.smallBuf) 1471 cifs_small_buf_releas 1471 cifs_small_buf_release(buf); 1472 else 1472 else 1473 cifs_buf_release(buf) 1473 cifs_buf_release(buf); 1474 } 1474 } 1475 1475 1476 cifs_put_tlink(cfile->tlink); 1476 cifs_put_tlink(cfile->tlink); 1477 kfree(file->private_data); 1477 kfree(file->private_data); 1478 file->private_data = NULL; 1478 file->private_data = NULL; 1479 /* BB can we lock the filestruct whil 1479 /* BB can we lock the filestruct while this is going on? */ 1480 free_xid(xid); 1480 free_xid(xid); 1481 return rc; 1481 return rc; 1482 } 1482 } 1483 1483 1484 static struct cifsLockInfo * 1484 static struct cifsLockInfo * 1485 cifs_lock_init(__u64 offset, __u64 length, __ 1485 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags) 1486 { 1486 { 1487 struct cifsLockInfo *lock = 1487 struct cifsLockInfo *lock = 1488 kmalloc(sizeof(struct cifsLoc 1488 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL); 1489 if (!lock) 1489 if (!lock) 1490 return lock; 1490 return lock; 1491 lock->offset = offset; 1491 lock->offset = offset; 1492 lock->length = length; 1492 lock->length = length; 1493 lock->type = type; 1493 lock->type = type; 1494 lock->pid = current->tgid; 1494 lock->pid = current->tgid; 1495 lock->flags = flags; 1495 lock->flags = flags; 1496 INIT_LIST_HEAD(&lock->blist); 1496 INIT_LIST_HEAD(&lock->blist); 1497 init_waitqueue_head(&lock->block_q); 1497 init_waitqueue_head(&lock->block_q); 1498 return lock; 1498 return lock; 1499 } 1499 } 1500 1500 1501 void 1501 void 1502 cifs_del_lock_waiters(struct cifsLockInfo *lo 1502 cifs_del_lock_waiters(struct cifsLockInfo *lock) 1503 { 1503 { 1504 struct cifsLockInfo *li, *tmp; 1504 struct cifsLockInfo *li, *tmp; 1505 list_for_each_entry_safe(li, tmp, &lo 1505 list_for_each_entry_safe(li, tmp, &lock->blist, blist) { 1506 list_del_init(&li->blist); 1506 list_del_init(&li->blist); 1507 wake_up(&li->block_q); 1507 wake_up(&li->block_q); 1508 } 1508 } 1509 } 1509 } 1510 1510 1511 #define CIFS_LOCK_OP 0 1511 #define CIFS_LOCK_OP 0 1512 #define CIFS_READ_OP 1 1512 #define CIFS_READ_OP 1 1513 #define CIFS_WRITE_OP 2 1513 #define CIFS_WRITE_OP 2 1514 1514 1515 /* @rw_check : 0 - no op, 1 - read, 2 - write 1515 /* @rw_check : 0 - no op, 1 - read, 2 - write */ 1516 static bool 1516 static bool 1517 cifs_find_fid_lock_conflict(struct cifs_fid_l 1517 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset, 1518 __u64 length, __u 1518 __u64 length, __u8 type, __u16 flags, 1519 struct cifsFileIn 1519 struct cifsFileInfo *cfile, 1520 struct cifsLockIn 1520 struct cifsLockInfo **conf_lock, int rw_check) 1521 { 1521 { 1522 struct cifsLockInfo *li; 1522 struct cifsLockInfo *li; 1523 struct cifsFileInfo *cur_cfile = fdlo 1523 struct cifsFileInfo *cur_cfile = fdlocks->cfile; 1524 struct TCP_Server_Info *server = tlin 1524 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; 1525 1525 1526 list_for_each_entry(li, &fdlocks->loc 1526 list_for_each_entry(li, &fdlocks->locks, llist) { 1527 if (offset + length <= li->of 1527 if (offset + length <= li->offset || 1528 offset >= li->offset + li 1528 offset >= li->offset + li->length) 1529 continue; 1529 continue; 1530 if (rw_check != CIFS_LOCK_OP 1530 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid && 1531 server->ops->compare_fids 1531 server->ops->compare_fids(cfile, cur_cfile)) { 1532 /* shared lock preven 1532 /* shared lock prevents write op through the same fid */ 1533 if (!(li->type & serv 1533 if (!(li->type & server->vals->shared_lock_type) || 1534 rw_check != CIFS_ 1534 rw_check != CIFS_WRITE_OP) 1535 continue; 1535 continue; 1536 } 1536 } 1537 if ((type & server->vals->sha 1537 if ((type & server->vals->shared_lock_type) && 1538 ((server->ops->compare_fi 1538 ((server->ops->compare_fids(cfile, cur_cfile) && 1539 current->tgid == li->pid 1539 current->tgid == li->pid) || type == li->type)) 1540 continue; 1540 continue; 1541 if (rw_check == CIFS_LOCK_OP 1541 if (rw_check == CIFS_LOCK_OP && 1542 (flags & FL_OFDLCK) && (l 1542 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) && 1543 server->ops->compare_fids 1543 server->ops->compare_fids(cfile, cur_cfile)) 1544 continue; 1544 continue; 1545 if (conf_lock) 1545 if (conf_lock) 1546 *conf_lock = li; 1546 *conf_lock = li; 1547 return true; 1547 return true; 1548 } 1548 } 1549 return false; 1549 return false; 1550 } 1550 } 1551 1551 1552 bool 1552 bool 1553 cifs_find_lock_conflict(struct cifsFileInfo * 1553 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length, 1554 __u8 type, __u16 flag 1554 __u8 type, __u16 flags, 1555 struct cifsLockInfo * 1555 struct cifsLockInfo **conf_lock, int rw_check) 1556 { 1556 { 1557 bool rc = false; 1557 bool rc = false; 1558 struct cifs_fid_locks *cur; 1558 struct cifs_fid_locks *cur; 1559 struct cifsInodeInfo *cinode = CIFS_I 1559 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 1560 1560 1561 list_for_each_entry(cur, &cinode->lli 1561 list_for_each_entry(cur, &cinode->llist, llist) { 1562 rc = cifs_find_fid_lock_confl 1562 rc = cifs_find_fid_lock_conflict(cur, offset, length, type, 1563 1563 flags, cfile, conf_lock, 1564 1564 rw_check); 1565 if (rc) 1565 if (rc) 1566 break; 1566 break; 1567 } 1567 } 1568 1568 1569 return rc; 1569 return rc; 1570 } 1570 } 1571 1571 1572 /* 1572 /* 1573 * Check if there is another lock that preven 1573 * Check if there is another lock that prevents us to set the lock (mandatory 1574 * style). If such a lock exists, update the 1574 * style). If such a lock exists, update the flock structure with its 1575 * properties. Otherwise, set the flock type 1575 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks 1576 * or leave it the same if we can't. Returns 1576 * or leave it the same if we can't. Returns 0 if we don't need to request to 1577 * the server or 1 otherwise. 1577 * the server or 1 otherwise. 1578 */ 1578 */ 1579 static int 1579 static int 1580 cifs_lock_test(struct cifsFileInfo *cfile, __ 1580 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length, 1581 __u8 type, struct file_lock *f 1581 __u8 type, struct file_lock *flock) 1582 { 1582 { 1583 int rc = 0; 1583 int rc = 0; 1584 struct cifsLockInfo *conf_lock; 1584 struct cifsLockInfo *conf_lock; 1585 struct cifsInodeInfo *cinode = CIFS_I 1585 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 1586 struct TCP_Server_Info *server = tlin 1586 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; 1587 bool exist; 1587 bool exist; 1588 1588 1589 down_read(&cinode->lock_sem); 1589 down_read(&cinode->lock_sem); 1590 1590 1591 exist = cifs_find_lock_conflict(cfile 1591 exist = cifs_find_lock_conflict(cfile, offset, length, type, 1592 flock 1592 flock->c.flc_flags, &conf_lock, 1593 CIFS_ 1593 CIFS_LOCK_OP); 1594 if (exist) { 1594 if (exist) { 1595 flock->fl_start = conf_lock-> 1595 flock->fl_start = conf_lock->offset; 1596 flock->fl_end = conf_lock->of 1596 flock->fl_end = conf_lock->offset + conf_lock->length - 1; 1597 flock->c.flc_pid = conf_lock- 1597 flock->c.flc_pid = conf_lock->pid; 1598 if (conf_lock->type & server- 1598 if (conf_lock->type & server->vals->shared_lock_type) 1599 flock->c.flc_type = F 1599 flock->c.flc_type = F_RDLCK; 1600 else 1600 else 1601 flock->c.flc_type = F 1601 flock->c.flc_type = F_WRLCK; 1602 } else if (!cinode->can_cache_brlcks) 1602 } else if (!cinode->can_cache_brlcks) 1603 rc = 1; 1603 rc = 1; 1604 else 1604 else 1605 flock->c.flc_type = F_UNLCK; 1605 flock->c.flc_type = F_UNLCK; 1606 1606 1607 up_read(&cinode->lock_sem); 1607 up_read(&cinode->lock_sem); 1608 return rc; 1608 return rc; 1609 } 1609 } 1610 1610 1611 static void 1611 static void 1612 cifs_lock_add(struct cifsFileInfo *cfile, str 1612 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock) 1613 { 1613 { 1614 struct cifsInodeInfo *cinode = CIFS_I 1614 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 1615 cifs_down_write(&cinode->lock_sem); 1615 cifs_down_write(&cinode->lock_sem); 1616 list_add_tail(&lock->llist, &cfile->l 1616 list_add_tail(&lock->llist, &cfile->llist->locks); 1617 up_write(&cinode->lock_sem); 1617 up_write(&cinode->lock_sem); 1618 } 1618 } 1619 1619 1620 /* 1620 /* 1621 * Set the byte-range lock (mandatory style). 1621 * Set the byte-range lock (mandatory style). Returns: 1622 * 1) 0, if we set the lock and don't need to 1622 * 1) 0, if we set the lock and don't need to request to the server; 1623 * 2) 1, if no locks prevent us but we need t 1623 * 2) 1, if no locks prevent us but we need to request to the server; 1624 * 3) -EACCES, if there is a lock that preven 1624 * 3) -EACCES, if there is a lock that prevents us and wait is false. 1625 */ 1625 */ 1626 static int 1626 static int 1627 cifs_lock_add_if(struct cifsFileInfo *cfile, 1627 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock, 1628 bool wait) 1628 bool wait) 1629 { 1629 { 1630 struct cifsLockInfo *conf_lock; 1630 struct cifsLockInfo *conf_lock; 1631 struct cifsInodeInfo *cinode = CIFS_I 1631 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 1632 bool exist; 1632 bool exist; 1633 int rc = 0; 1633 int rc = 0; 1634 1634 1635 try_again: 1635 try_again: 1636 exist = false; 1636 exist = false; 1637 cifs_down_write(&cinode->lock_sem); 1637 cifs_down_write(&cinode->lock_sem); 1638 1638 1639 exist = cifs_find_lock_conflict(cfile 1639 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length, 1640 lock- 1640 lock->type, lock->flags, &conf_lock, 1641 CIFS_ 1641 CIFS_LOCK_OP); 1642 if (!exist && cinode->can_cache_brlck 1642 if (!exist && cinode->can_cache_brlcks) { 1643 list_add_tail(&lock->llist, & 1643 list_add_tail(&lock->llist, &cfile->llist->locks); 1644 up_write(&cinode->lock_sem); 1644 up_write(&cinode->lock_sem); 1645 return rc; 1645 return rc; 1646 } 1646 } 1647 1647 1648 if (!exist) 1648 if (!exist) 1649 rc = 1; 1649 rc = 1; 1650 else if (!wait) 1650 else if (!wait) 1651 rc = -EACCES; 1651 rc = -EACCES; 1652 else { 1652 else { 1653 list_add_tail(&lock->blist, & 1653 list_add_tail(&lock->blist, &conf_lock->blist); 1654 up_write(&cinode->lock_sem); 1654 up_write(&cinode->lock_sem); 1655 rc = wait_event_interruptible 1655 rc = wait_event_interruptible(lock->block_q, 1656 (lock 1656 (lock->blist.prev == &lock->blist) && 1657 (lock 1657 (lock->blist.next == &lock->blist)); 1658 if (!rc) 1658 if (!rc) 1659 goto try_again; 1659 goto try_again; 1660 cifs_down_write(&cinode->lock 1660 cifs_down_write(&cinode->lock_sem); 1661 list_del_init(&lock->blist); 1661 list_del_init(&lock->blist); 1662 } 1662 } 1663 1663 1664 up_write(&cinode->lock_sem); 1664 up_write(&cinode->lock_sem); 1665 return rc; 1665 return rc; 1666 } 1666 } 1667 1667 1668 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1668 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1669 /* 1669 /* 1670 * Check if there is another lock that preven 1670 * Check if there is another lock that prevents us to set the lock (posix 1671 * style). If such a lock exists, update the 1671 * style). If such a lock exists, update the flock structure with its 1672 * properties. Otherwise, set the flock type 1672 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks 1673 * or leave it the same if we can't. Returns 1673 * or leave it the same if we can't. Returns 0 if we don't need to request to 1674 * the server or 1 otherwise. 1674 * the server or 1 otherwise. 1675 */ 1675 */ 1676 static int 1676 static int 1677 cifs_posix_lock_test(struct file *file, struc 1677 cifs_posix_lock_test(struct file *file, struct file_lock *flock) 1678 { 1678 { 1679 int rc = 0; 1679 int rc = 0; 1680 struct cifsInodeInfo *cinode = CIFS_I 1680 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); 1681 unsigned char saved_type = flock->c.f 1681 unsigned char saved_type = flock->c.flc_type; 1682 1682 1683 if ((flock->c.flc_flags & FL_POSIX) = 1683 if ((flock->c.flc_flags & FL_POSIX) == 0) 1684 return 1; 1684 return 1; 1685 1685 1686 down_read(&cinode->lock_sem); 1686 down_read(&cinode->lock_sem); 1687 posix_test_lock(file, flock); 1687 posix_test_lock(file, flock); 1688 1688 1689 if (lock_is_unlock(flock) && !cinode- 1689 if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) { 1690 flock->c.flc_type = saved_typ 1690 flock->c.flc_type = saved_type; 1691 rc = 1; 1691 rc = 1; 1692 } 1692 } 1693 1693 1694 up_read(&cinode->lock_sem); 1694 up_read(&cinode->lock_sem); 1695 return rc; 1695 return rc; 1696 } 1696 } 1697 1697 1698 /* 1698 /* 1699 * Set the byte-range lock (posix style). Ret 1699 * Set the byte-range lock (posix style). Returns: 1700 * 1) <0, if the error occurs while setting t 1700 * 1) <0, if the error occurs while setting the lock; 1701 * 2) 0, if we set the lock and don't need to 1701 * 2) 0, if we set the lock and don't need to request to the server; 1702 * 3) FILE_LOCK_DEFERRED, if we will wait for 1702 * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock; 1703 * 4) FILE_LOCK_DEFERRED + 1, if we need to r 1703 * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server. 1704 */ 1704 */ 1705 static int 1705 static int 1706 cifs_posix_lock_set(struct file *file, struct 1706 cifs_posix_lock_set(struct file *file, struct file_lock *flock) 1707 { 1707 { 1708 struct cifsInodeInfo *cinode = CIFS_I 1708 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); 1709 int rc = FILE_LOCK_DEFERRED + 1; 1709 int rc = FILE_LOCK_DEFERRED + 1; 1710 1710 1711 if ((flock->c.flc_flags & FL_POSIX) = 1711 if ((flock->c.flc_flags & FL_POSIX) == 0) 1712 return rc; 1712 return rc; 1713 1713 1714 cifs_down_write(&cinode->lock_sem); 1714 cifs_down_write(&cinode->lock_sem); 1715 if (!cinode->can_cache_brlcks) { 1715 if (!cinode->can_cache_brlcks) { 1716 up_write(&cinode->lock_sem); 1716 up_write(&cinode->lock_sem); 1717 return rc; 1717 return rc; 1718 } 1718 } 1719 1719 1720 rc = posix_lock_file(file, flock, NUL 1720 rc = posix_lock_file(file, flock, NULL); 1721 up_write(&cinode->lock_sem); 1721 up_write(&cinode->lock_sem); 1722 return rc; 1722 return rc; 1723 } 1723 } 1724 1724 1725 int 1725 int 1726 cifs_push_mandatory_locks(struct cifsFileInfo 1726 cifs_push_mandatory_locks(struct cifsFileInfo *cfile) 1727 { 1727 { 1728 unsigned int xid; 1728 unsigned int xid; 1729 int rc = 0, stored_rc; 1729 int rc = 0, stored_rc; 1730 struct cifsLockInfo *li, *tmp; 1730 struct cifsLockInfo *li, *tmp; 1731 struct cifs_tcon *tcon; 1731 struct cifs_tcon *tcon; 1732 unsigned int num, max_num, max_buf; 1732 unsigned int num, max_num, max_buf; 1733 LOCKING_ANDX_RANGE *buf, *cur; 1733 LOCKING_ANDX_RANGE *buf, *cur; 1734 static const int types[] = { 1734 static const int types[] = { 1735 LOCKING_ANDX_LARGE_FILES, 1735 LOCKING_ANDX_LARGE_FILES, 1736 LOCKING_ANDX_SHARED_LOCK | LO 1736 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES 1737 }; 1737 }; 1738 int i; 1738 int i; 1739 1739 1740 xid = get_xid(); 1740 xid = get_xid(); 1741 tcon = tlink_tcon(cfile->tlink); 1741 tcon = tlink_tcon(cfile->tlink); 1742 1742 1743 /* 1743 /* 1744 * Accessing maxBuf is racy with cifs 1744 * Accessing maxBuf is racy with cifs_reconnect - need to store value 1745 * and check it before using. 1745 * and check it before using. 1746 */ 1746 */ 1747 max_buf = tcon->ses->server->maxBuf; 1747 max_buf = tcon->ses->server->maxBuf; 1748 if (max_buf < (sizeof(struct smb_hdr) 1748 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) { 1749 free_xid(xid); 1749 free_xid(xid); 1750 return -EINVAL; 1750 return -EINVAL; 1751 } 1751 } 1752 1752 1753 BUILD_BUG_ON(sizeof(struct smb_hdr) + 1753 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > 1754 PAGE_SIZE); 1754 PAGE_SIZE); 1755 max_buf = min_t(unsigned int, max_buf 1755 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), 1756 PAGE_SIZE); 1756 PAGE_SIZE); 1757 max_num = (max_buf - sizeof(struct sm 1757 max_num = (max_buf - sizeof(struct smb_hdr)) / 1758 1758 sizeof(LOCKING_ANDX_RANGE); 1759 buf = kcalloc(max_num, sizeof(LOCKING 1759 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); 1760 if (!buf) { 1760 if (!buf) { 1761 free_xid(xid); 1761 free_xid(xid); 1762 return -ENOMEM; 1762 return -ENOMEM; 1763 } 1763 } 1764 1764 1765 for (i = 0; i < 2; i++) { 1765 for (i = 0; i < 2; i++) { 1766 cur = buf; 1766 cur = buf; 1767 num = 0; 1767 num = 0; 1768 list_for_each_entry_safe(li, 1768 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { 1769 if (li->type != types 1769 if (li->type != types[i]) 1770 continue; 1770 continue; 1771 cur->Pid = cpu_to_le1 1771 cur->Pid = cpu_to_le16(li->pid); 1772 cur->LengthLow = cpu_ 1772 cur->LengthLow = cpu_to_le32((u32)li->length); 1773 cur->LengthHigh = cpu 1773 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); 1774 cur->OffsetLow = cpu_ 1774 cur->OffsetLow = cpu_to_le32((u32)li->offset); 1775 cur->OffsetHigh = cpu 1775 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); 1776 if (++num == max_num) 1776 if (++num == max_num) { 1777 stored_rc = c 1777 stored_rc = cifs_lockv(xid, tcon, 1778 1778 cfile->fid.netfid, 1779 1779 (__u8)li->type, 0, num, 1780 1780 buf); 1781 if (stored_rc 1781 if (stored_rc) 1782 rc = 1782 rc = stored_rc; 1783 cur = buf; 1783 cur = buf; 1784 num = 0; 1784 num = 0; 1785 } else 1785 } else 1786 cur++; 1786 cur++; 1787 } 1787 } 1788 1788 1789 if (num) { 1789 if (num) { 1790 stored_rc = cifs_lock 1790 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, 1791 1791 (__u8)types[i], 0, num, buf); 1792 if (stored_rc) 1792 if (stored_rc) 1793 rc = stored_r 1793 rc = stored_rc; 1794 } 1794 } 1795 } 1795 } 1796 1796 1797 kfree(buf); 1797 kfree(buf); 1798 free_xid(xid); 1798 free_xid(xid); 1799 return rc; 1799 return rc; 1800 } 1800 } 1801 1801 1802 static __u32 1802 static __u32 1803 hash_lockowner(fl_owner_t owner) 1803 hash_lockowner(fl_owner_t owner) 1804 { 1804 { 1805 return cifs_lock_secret ^ hash32_ptr( 1805 return cifs_lock_secret ^ hash32_ptr((const void *)owner); 1806 } 1806 } 1807 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY * 1807 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1808 1808 1809 struct lock_to_push { 1809 struct lock_to_push { 1810 struct list_head llist; 1810 struct list_head llist; 1811 __u64 offset; 1811 __u64 offset; 1812 __u64 length; 1812 __u64 length; 1813 __u32 pid; 1813 __u32 pid; 1814 __u16 netfid; 1814 __u16 netfid; 1815 __u8 type; 1815 __u8 type; 1816 }; 1816 }; 1817 1817 1818 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1818 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1819 static int 1819 static int 1820 cifs_push_posix_locks(struct cifsFileInfo *cf 1820 cifs_push_posix_locks(struct cifsFileInfo *cfile) 1821 { 1821 { 1822 struct inode *inode = d_inode(cfile-> 1822 struct inode *inode = d_inode(cfile->dentry); 1823 struct cifs_tcon *tcon = tlink_tcon(c 1823 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 1824 struct file_lock *flock; 1824 struct file_lock *flock; 1825 struct file_lock_context *flctx = loc 1825 struct file_lock_context *flctx = locks_inode_context(inode); 1826 unsigned int count = 0, i; 1826 unsigned int count = 0, i; 1827 int rc = 0, xid, type; 1827 int rc = 0, xid, type; 1828 struct list_head locks_to_send, *el; 1828 struct list_head locks_to_send, *el; 1829 struct lock_to_push *lck, *tmp; 1829 struct lock_to_push *lck, *tmp; 1830 __u64 length; 1830 __u64 length; 1831 1831 1832 xid = get_xid(); 1832 xid = get_xid(); 1833 1833 1834 if (!flctx) 1834 if (!flctx) 1835 goto out; 1835 goto out; 1836 1836 1837 spin_lock(&flctx->flc_lock); 1837 spin_lock(&flctx->flc_lock); 1838 list_for_each(el, &flctx->flc_posix) 1838 list_for_each(el, &flctx->flc_posix) { 1839 count++; 1839 count++; 1840 } 1840 } 1841 spin_unlock(&flctx->flc_lock); 1841 spin_unlock(&flctx->flc_lock); 1842 1842 1843 INIT_LIST_HEAD(&locks_to_send); 1843 INIT_LIST_HEAD(&locks_to_send); 1844 1844 1845 /* 1845 /* 1846 * Allocating count locks is enough b 1846 * Allocating count locks is enough because no FL_POSIX locks can be 1847 * added to the list while we are hol 1847 * added to the list while we are holding cinode->lock_sem that 1848 * protects locking operations of thi 1848 * protects locking operations of this inode. 1849 */ 1849 */ 1850 for (i = 0; i < count; i++) { 1850 for (i = 0; i < count; i++) { 1851 lck = kmalloc(sizeof(struct l 1851 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); 1852 if (!lck) { 1852 if (!lck) { 1853 rc = -ENOMEM; 1853 rc = -ENOMEM; 1854 goto err_out; 1854 goto err_out; 1855 } 1855 } 1856 list_add_tail(&lck->llist, &l 1856 list_add_tail(&lck->llist, &locks_to_send); 1857 } 1857 } 1858 1858 1859 el = locks_to_send.next; 1859 el = locks_to_send.next; 1860 spin_lock(&flctx->flc_lock); 1860 spin_lock(&flctx->flc_lock); 1861 for_each_file_lock(flock, &flctx->flc 1861 for_each_file_lock(flock, &flctx->flc_posix) { 1862 unsigned char ftype = flock-> 1862 unsigned char ftype = flock->c.flc_type; 1863 1863 1864 if (el == &locks_to_send) { 1864 if (el == &locks_to_send) { 1865 /* 1865 /* 1866 * The list ended. We 1866 * The list ended. We don't have enough allocated 1867 * structures - somet 1867 * structures - something is really wrong. 1868 */ 1868 */ 1869 cifs_dbg(VFS, "Can't 1869 cifs_dbg(VFS, "Can't push all brlocks!\n"); 1870 break; 1870 break; 1871 } 1871 } 1872 length = cifs_flock_len(flock 1872 length = cifs_flock_len(flock); 1873 if (ftype == F_RDLCK || ftype 1873 if (ftype == F_RDLCK || ftype == F_SHLCK) 1874 type = CIFS_RDLCK; 1874 type = CIFS_RDLCK; 1875 else 1875 else 1876 type = CIFS_WRLCK; 1876 type = CIFS_WRLCK; 1877 lck = list_entry(el, struct l 1877 lck = list_entry(el, struct lock_to_push, llist); 1878 lck->pid = hash_lockowner(flo 1878 lck->pid = hash_lockowner(flock->c.flc_owner); 1879 lck->netfid = cfile->fid.netf 1879 lck->netfid = cfile->fid.netfid; 1880 lck->length = length; 1880 lck->length = length; 1881 lck->type = type; 1881 lck->type = type; 1882 lck->offset = flock->fl_start 1882 lck->offset = flock->fl_start; 1883 } 1883 } 1884 spin_unlock(&flctx->flc_lock); 1884 spin_unlock(&flctx->flc_lock); 1885 1885 1886 list_for_each_entry_safe(lck, tmp, &l 1886 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { 1887 int stored_rc; 1887 int stored_rc; 1888 1888 1889 stored_rc = CIFSSMBPosixLock( 1889 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid, 1890 1890 lck->offset, lck->length, NULL, 1891 1891 lck->type, 0); 1892 if (stored_rc) 1892 if (stored_rc) 1893 rc = stored_rc; 1893 rc = stored_rc; 1894 list_del(&lck->llist); 1894 list_del(&lck->llist); 1895 kfree(lck); 1895 kfree(lck); 1896 } 1896 } 1897 1897 1898 out: 1898 out: 1899 free_xid(xid); 1899 free_xid(xid); 1900 return rc; 1900 return rc; 1901 err_out: 1901 err_out: 1902 list_for_each_entry_safe(lck, tmp, &l 1902 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { 1903 list_del(&lck->llist); 1903 list_del(&lck->llist); 1904 kfree(lck); 1904 kfree(lck); 1905 } 1905 } 1906 goto out; 1906 goto out; 1907 } 1907 } 1908 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY * 1908 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1909 1909 1910 static int 1910 static int 1911 cifs_push_locks(struct cifsFileInfo *cfile) 1911 cifs_push_locks(struct cifsFileInfo *cfile) 1912 { 1912 { 1913 struct cifsInodeInfo *cinode = CIFS_I 1913 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 1914 struct cifs_tcon *tcon = tlink_tcon(c 1914 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 1915 int rc = 0; 1915 int rc = 0; 1916 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1916 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1917 struct cifs_sb_info *cifs_sb = CIFS_S 1917 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); 1918 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY * 1918 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1919 1919 1920 /* we are going to update can_cache_b 1920 /* we are going to update can_cache_brlcks here - need a write access */ 1921 cifs_down_write(&cinode->lock_sem); 1921 cifs_down_write(&cinode->lock_sem); 1922 if (!cinode->can_cache_brlcks) { 1922 if (!cinode->can_cache_brlcks) { 1923 up_write(&cinode->lock_sem); 1923 up_write(&cinode->lock_sem); 1924 return rc; 1924 return rc; 1925 } 1925 } 1926 1926 1927 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1927 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1928 if (cap_unix(tcon->ses) && 1928 if (cap_unix(tcon->ses) && 1929 (CIFS_UNIX_FCNTL_CAP & le64_to_cp 1929 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 1930 ((cifs_sb->mnt_cifs_flags & CIFS_ 1930 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 1931 rc = cifs_push_posix_locks(cf 1931 rc = cifs_push_posix_locks(cfile); 1932 else 1932 else 1933 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY * 1933 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1934 rc = tcon->ses->server->ops-> 1934 rc = tcon->ses->server->ops->push_mand_locks(cfile); 1935 1935 1936 cinode->can_cache_brlcks = false; 1936 cinode->can_cache_brlcks = false; 1937 up_write(&cinode->lock_sem); 1937 up_write(&cinode->lock_sem); 1938 return rc; 1938 return rc; 1939 } 1939 } 1940 1940 1941 static void 1941 static void 1942 cifs_read_flock(struct file_lock *flock, __u3 1942 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock, 1943 bool *wait_flag, struct TCP_S 1943 bool *wait_flag, struct TCP_Server_Info *server) 1944 { 1944 { 1945 if (flock->c.flc_flags & FL_POSIX) 1945 if (flock->c.flc_flags & FL_POSIX) 1946 cifs_dbg(FYI, "Posix\n"); 1946 cifs_dbg(FYI, "Posix\n"); 1947 if (flock->c.flc_flags & FL_FLOCK) 1947 if (flock->c.flc_flags & FL_FLOCK) 1948 cifs_dbg(FYI, "Flock\n"); 1948 cifs_dbg(FYI, "Flock\n"); 1949 if (flock->c.flc_flags & FL_SLEEP) { 1949 if (flock->c.flc_flags & FL_SLEEP) { 1950 cifs_dbg(FYI, "Blocking lock\ 1950 cifs_dbg(FYI, "Blocking lock\n"); 1951 *wait_flag = true; 1951 *wait_flag = true; 1952 } 1952 } 1953 if (flock->c.flc_flags & FL_ACCESS) 1953 if (flock->c.flc_flags & FL_ACCESS) 1954 cifs_dbg(FYI, "Process suspen 1954 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n"); 1955 if (flock->c.flc_flags & FL_LEASE) 1955 if (flock->c.flc_flags & FL_LEASE) 1956 cifs_dbg(FYI, "Lease on file 1956 cifs_dbg(FYI, "Lease on file - not implemented yet\n"); 1957 if (flock->c.flc_flags & 1957 if (flock->c.flc_flags & 1958 (~(FL_POSIX | FL_FLOCK | FL_SLEEP 1958 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | 1959 FL_ACCESS | FL_LEASE | FL_CLOS 1959 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK))) 1960 cifs_dbg(FYI, "Unknown lock f 1960 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", 1961 flock->c.flc_flags); 1961 flock->c.flc_flags); 1962 1962 1963 *type = server->vals->large_lock_type 1963 *type = server->vals->large_lock_type; 1964 if (lock_is_write(flock)) { 1964 if (lock_is_write(flock)) { 1965 cifs_dbg(FYI, "F_WRLCK\n"); 1965 cifs_dbg(FYI, "F_WRLCK\n"); 1966 *type |= server->vals->exclus 1966 *type |= server->vals->exclusive_lock_type; 1967 *lock = 1; 1967 *lock = 1; 1968 } else if (lock_is_unlock(flock)) { 1968 } else if (lock_is_unlock(flock)) { 1969 cifs_dbg(FYI, "F_UNLCK\n"); 1969 cifs_dbg(FYI, "F_UNLCK\n"); 1970 *type |= server->vals->unlock 1970 *type |= server->vals->unlock_lock_type; 1971 *unlock = 1; 1971 *unlock = 1; 1972 /* Check if unlock includes m 1972 /* Check if unlock includes more than one lock range */ 1973 } else if (lock_is_read(flock)) { 1973 } else if (lock_is_read(flock)) { 1974 cifs_dbg(FYI, "F_RDLCK\n"); 1974 cifs_dbg(FYI, "F_RDLCK\n"); 1975 *type |= server->vals->shared 1975 *type |= server->vals->shared_lock_type; 1976 *lock = 1; 1976 *lock = 1; 1977 } else if (flock->c.flc_type == F_EXL 1977 } else if (flock->c.flc_type == F_EXLCK) { 1978 cifs_dbg(FYI, "F_EXLCK\n"); 1978 cifs_dbg(FYI, "F_EXLCK\n"); 1979 *type |= server->vals->exclus 1979 *type |= server->vals->exclusive_lock_type; 1980 *lock = 1; 1980 *lock = 1; 1981 } else if (flock->c.flc_type == F_SHL 1981 } else if (flock->c.flc_type == F_SHLCK) { 1982 cifs_dbg(FYI, "F_SHLCK\n"); 1982 cifs_dbg(FYI, "F_SHLCK\n"); 1983 *type |= server->vals->shared 1983 *type |= server->vals->shared_lock_type; 1984 *lock = 1; 1984 *lock = 1; 1985 } else 1985 } else 1986 cifs_dbg(FYI, "Unknown type o 1986 cifs_dbg(FYI, "Unknown type of lock\n"); 1987 } 1987 } 1988 1988 1989 static int 1989 static int 1990 cifs_getlk(struct file *file, struct file_loc 1990 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type, 1991 bool wait_flag, bool posix_lck, un 1991 bool wait_flag, bool posix_lck, unsigned int xid) 1992 { 1992 { 1993 int rc = 0; 1993 int rc = 0; 1994 __u64 length = cifs_flock_len(flock); 1994 __u64 length = cifs_flock_len(flock); 1995 struct cifsFileInfo *cfile = (struct 1995 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; 1996 struct cifs_tcon *tcon = tlink_tcon(c 1996 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 1997 struct TCP_Server_Info *server = tcon 1997 struct TCP_Server_Info *server = tcon->ses->server; 1998 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1998 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1999 __u16 netfid = cfile->fid.netfid; 1999 __u16 netfid = cfile->fid.netfid; 2000 2000 2001 if (posix_lck) { 2001 if (posix_lck) { 2002 int posix_lock_type; 2002 int posix_lock_type; 2003 2003 2004 rc = cifs_posix_lock_test(fil 2004 rc = cifs_posix_lock_test(file, flock); 2005 if (!rc) 2005 if (!rc) 2006 return rc; 2006 return rc; 2007 2007 2008 if (type & server->vals->shar 2008 if (type & server->vals->shared_lock_type) 2009 posix_lock_type = CIF 2009 posix_lock_type = CIFS_RDLCK; 2010 else 2010 else 2011 posix_lock_type = CIF 2011 posix_lock_type = CIFS_WRLCK; 2012 rc = CIFSSMBPosixLock(xid, tc 2012 rc = CIFSSMBPosixLock(xid, tcon, netfid, 2013 hash_lo 2013 hash_lockowner(flock->c.flc_owner), 2014 flock-> 2014 flock->fl_start, length, flock, 2015 posix_l 2015 posix_lock_type, wait_flag); 2016 return rc; 2016 return rc; 2017 } 2017 } 2018 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY * 2018 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2019 2019 2020 rc = cifs_lock_test(cfile, flock->fl_ 2020 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock); 2021 if (!rc) 2021 if (!rc) 2022 return rc; 2022 return rc; 2023 2023 2024 /* BB we could chain these into one l 2024 /* BB we could chain these into one lock request BB */ 2025 rc = server->ops->mand_lock(xid, cfil 2025 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type, 2026 1, 0, fal 2026 1, 0, false); 2027 if (rc == 0) { 2027 if (rc == 0) { 2028 rc = server->ops->mand_lock(x 2028 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, 2029 t 2029 type, 0, 1, false); 2030 flock->c.flc_type = F_UNLCK; 2030 flock->c.flc_type = F_UNLCK; 2031 if (rc != 0) 2031 if (rc != 0) 2032 cifs_dbg(VFS, "Error 2032 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n", 2033 rc); 2033 rc); 2034 return 0; 2034 return 0; 2035 } 2035 } 2036 2036 2037 if (type & server->vals->shared_lock_ 2037 if (type & server->vals->shared_lock_type) { 2038 flock->c.flc_type = F_WRLCK; 2038 flock->c.flc_type = F_WRLCK; 2039 return 0; 2039 return 0; 2040 } 2040 } 2041 2041 2042 type &= ~server->vals->exclusive_lock 2042 type &= ~server->vals->exclusive_lock_type; 2043 2043 2044 rc = server->ops->mand_lock(xid, cfil 2044 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, 2045 type | se 2045 type | server->vals->shared_lock_type, 2046 1, 0, fal 2046 1, 0, false); 2047 if (rc == 0) { 2047 if (rc == 0) { 2048 rc = server->ops->mand_lock(x 2048 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, 2049 type | server->vals-> 2049 type | server->vals->shared_lock_type, 0, 1, false); 2050 flock->c.flc_type = F_RDLCK; 2050 flock->c.flc_type = F_RDLCK; 2051 if (rc != 0) 2051 if (rc != 0) 2052 cifs_dbg(VFS, "Error 2052 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n", 2053 rc); 2053 rc); 2054 } else 2054 } else 2055 flock->c.flc_type = F_WRLCK; 2055 flock->c.flc_type = F_WRLCK; 2056 2056 2057 return 0; 2057 return 0; 2058 } 2058 } 2059 2059 2060 void 2060 void 2061 cifs_move_llist(struct list_head *source, str 2061 cifs_move_llist(struct list_head *source, struct list_head *dest) 2062 { 2062 { 2063 struct list_head *li, *tmp; 2063 struct list_head *li, *tmp; 2064 list_for_each_safe(li, tmp, source) 2064 list_for_each_safe(li, tmp, source) 2065 list_move(li, dest); 2065 list_move(li, dest); 2066 } 2066 } 2067 2067 2068 void 2068 void 2069 cifs_free_llist(struct list_head *llist) 2069 cifs_free_llist(struct list_head *llist) 2070 { 2070 { 2071 struct cifsLockInfo *li, *tmp; 2071 struct cifsLockInfo *li, *tmp; 2072 list_for_each_entry_safe(li, tmp, lli 2072 list_for_each_entry_safe(li, tmp, llist, llist) { 2073 cifs_del_lock_waiters(li); 2073 cifs_del_lock_waiters(li); 2074 list_del(&li->llist); 2074 list_del(&li->llist); 2075 kfree(li); 2075 kfree(li); 2076 } 2076 } 2077 } 2077 } 2078 2078 2079 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2079 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2080 int 2080 int 2081 cifs_unlock_range(struct cifsFileInfo *cfile, 2081 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, 2082 unsigned int xid) 2082 unsigned int xid) 2083 { 2083 { 2084 int rc = 0, stored_rc; 2084 int rc = 0, stored_rc; 2085 static const int types[] = { 2085 static const int types[] = { 2086 LOCKING_ANDX_LARGE_FILES, 2086 LOCKING_ANDX_LARGE_FILES, 2087 LOCKING_ANDX_SHARED_LOCK | LO 2087 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES 2088 }; 2088 }; 2089 unsigned int i; 2089 unsigned int i; 2090 unsigned int max_num, num, max_buf; 2090 unsigned int max_num, num, max_buf; 2091 LOCKING_ANDX_RANGE *buf, *cur; 2091 LOCKING_ANDX_RANGE *buf, *cur; 2092 struct cifs_tcon *tcon = tlink_tcon(c 2092 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 2093 struct cifsInodeInfo *cinode = CIFS_I 2093 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 2094 struct cifsLockInfo *li, *tmp; 2094 struct cifsLockInfo *li, *tmp; 2095 __u64 length = cifs_flock_len(flock); 2095 __u64 length = cifs_flock_len(flock); 2096 LIST_HEAD(tmp_llist); 2096 LIST_HEAD(tmp_llist); 2097 2097 2098 /* 2098 /* 2099 * Accessing maxBuf is racy with cifs 2099 * Accessing maxBuf is racy with cifs_reconnect - need to store value 2100 * and check it before using. 2100 * and check it before using. 2101 */ 2101 */ 2102 max_buf = tcon->ses->server->maxBuf; 2102 max_buf = tcon->ses->server->maxBuf; 2103 if (max_buf < (sizeof(struct smb_hdr) 2103 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) 2104 return -EINVAL; 2104 return -EINVAL; 2105 2105 2106 BUILD_BUG_ON(sizeof(struct smb_hdr) + 2106 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) > 2107 PAGE_SIZE); 2107 PAGE_SIZE); 2108 max_buf = min_t(unsigned int, max_buf 2108 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr), 2109 PAGE_SIZE); 2109 PAGE_SIZE); 2110 max_num = (max_buf - sizeof(struct sm 2110 max_num = (max_buf - sizeof(struct smb_hdr)) / 2111 2111 sizeof(LOCKING_ANDX_RANGE); 2112 buf = kcalloc(max_num, sizeof(LOCKING 2112 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); 2113 if (!buf) 2113 if (!buf) 2114 return -ENOMEM; 2114 return -ENOMEM; 2115 2115 2116 cifs_down_write(&cinode->lock_sem); 2116 cifs_down_write(&cinode->lock_sem); 2117 for (i = 0; i < 2; i++) { 2117 for (i = 0; i < 2; i++) { 2118 cur = buf; 2118 cur = buf; 2119 num = 0; 2119 num = 0; 2120 list_for_each_entry_safe(li, 2120 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { 2121 if (flock->fl_start > 2121 if (flock->fl_start > li->offset || 2122 (flock->fl_start 2122 (flock->fl_start + length) < 2123 (li->offset + li- 2123 (li->offset + li->length)) 2124 continue; 2124 continue; 2125 if (current->tgid != 2125 if (current->tgid != li->pid) 2126 continue; 2126 continue; 2127 if (types[i] != li->t 2127 if (types[i] != li->type) 2128 continue; 2128 continue; 2129 if (cinode->can_cache 2129 if (cinode->can_cache_brlcks) { 2130 /* 2130 /* 2131 * We can cac 2131 * We can cache brlock requests - simply remove 2132 * a lock fro 2132 * a lock from the file's list. 2133 */ 2133 */ 2134 list_del(&li- 2134 list_del(&li->llist); 2135 cifs_del_lock 2135 cifs_del_lock_waiters(li); 2136 kfree(li); 2136 kfree(li); 2137 continue; 2137 continue; 2138 } 2138 } 2139 cur->Pid = cpu_to_le1 2139 cur->Pid = cpu_to_le16(li->pid); 2140 cur->LengthLow = cpu_ 2140 cur->LengthLow = cpu_to_le32((u32)li->length); 2141 cur->LengthHigh = cpu 2141 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); 2142 cur->OffsetLow = cpu_ 2142 cur->OffsetLow = cpu_to_le32((u32)li->offset); 2143 cur->OffsetHigh = cpu 2143 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); 2144 /* 2144 /* 2145 * We need to save a 2145 * We need to save a lock here to let us add it again to 2146 * the file's list if 2146 * the file's list if the unlock range request fails on 2147 * the server. 2147 * the server. 2148 */ 2148 */ 2149 list_move(&li->llist, 2149 list_move(&li->llist, &tmp_llist); 2150 if (++num == max_num) 2150 if (++num == max_num) { 2151 stored_rc = c 2151 stored_rc = cifs_lockv(xid, tcon, 2152 2152 cfile->fid.netfid, 2153 2153 li->type, num, 0, buf); 2154 if (stored_rc 2154 if (stored_rc) { 2155 /* 2155 /* 2156 * We 2156 * We failed on the unlock range 2157 * re 2157 * request - add all locks from the tmp 2158 * li 2158 * list to the head of the file's list. 2159 */ 2159 */ 2160 cifs_ 2160 cifs_move_llist(&tmp_llist, 2161 2161 &cfile->llist->locks); 2162 rc = 2162 rc = stored_rc; 2163 } else 2163 } else 2164 /* 2164 /* 2165 * Th 2165 * The unlock range request succeed - 2166 * fr 2166 * free the tmp list. 2167 */ 2167 */ 2168 cifs_ 2168 cifs_free_llist(&tmp_llist); 2169 cur = buf; 2169 cur = buf; 2170 num = 0; 2170 num = 0; 2171 } else 2171 } else 2172 cur++; 2172 cur++; 2173 } 2173 } 2174 if (num) { 2174 if (num) { 2175 stored_rc = cifs_lock 2175 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid, 2176 2176 types[i], num, 0, buf); 2177 if (stored_rc) { 2177 if (stored_rc) { 2178 cifs_move_lli 2178 cifs_move_llist(&tmp_llist, 2179 2179 &cfile->llist->locks); 2180 rc = stored_r 2180 rc = stored_rc; 2181 } else 2181 } else 2182 cifs_free_lli 2182 cifs_free_llist(&tmp_llist); 2183 } 2183 } 2184 } 2184 } 2185 2185 2186 up_write(&cinode->lock_sem); 2186 up_write(&cinode->lock_sem); 2187 kfree(buf); 2187 kfree(buf); 2188 return rc; 2188 return rc; 2189 } 2189 } 2190 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY * 2190 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2191 2191 2192 static int 2192 static int 2193 cifs_setlk(struct file *file, struct file_loc 2193 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, 2194 bool wait_flag, bool posix_lck, in 2194 bool wait_flag, bool posix_lck, int lock, int unlock, 2195 unsigned int xid) 2195 unsigned int xid) 2196 { 2196 { 2197 int rc = 0; 2197 int rc = 0; 2198 __u64 length = cifs_flock_len(flock); 2198 __u64 length = cifs_flock_len(flock); 2199 struct cifsFileInfo *cfile = (struct 2199 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; 2200 struct cifs_tcon *tcon = tlink_tcon(c 2200 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 2201 struct TCP_Server_Info *server = tcon 2201 struct TCP_Server_Info *server = tcon->ses->server; 2202 struct inode *inode = d_inode(cfile-> 2202 struct inode *inode = d_inode(cfile->dentry); 2203 2203 2204 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2204 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2205 if (posix_lck) { 2205 if (posix_lck) { 2206 int posix_lock_type; 2206 int posix_lock_type; 2207 2207 2208 rc = cifs_posix_lock_set(file 2208 rc = cifs_posix_lock_set(file, flock); 2209 if (rc <= FILE_LOCK_DEFERRED) 2209 if (rc <= FILE_LOCK_DEFERRED) 2210 return rc; 2210 return rc; 2211 2211 2212 if (type & server->vals->shar 2212 if (type & server->vals->shared_lock_type) 2213 posix_lock_type = CIF 2213 posix_lock_type = CIFS_RDLCK; 2214 else 2214 else 2215 posix_lock_type = CIF 2215 posix_lock_type = CIFS_WRLCK; 2216 2216 2217 if (unlock == 1) 2217 if (unlock == 1) 2218 posix_lock_type = CIF 2218 posix_lock_type = CIFS_UNLCK; 2219 2219 2220 rc = CIFSSMBPosixLock(xid, tc 2220 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid, 2221 hash_lo 2221 hash_lockowner(flock->c.flc_owner), 2222 flock-> 2222 flock->fl_start, length, 2223 NULL, p 2223 NULL, posix_lock_type, wait_flag); 2224 goto out; 2224 goto out; 2225 } 2225 } 2226 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY * 2226 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2227 if (lock) { 2227 if (lock) { 2228 struct cifsLockInfo *lock; 2228 struct cifsLockInfo *lock; 2229 2229 2230 lock = cifs_lock_init(flock-> 2230 lock = cifs_lock_init(flock->fl_start, length, type, 2231 flock-> 2231 flock->c.flc_flags); 2232 if (!lock) 2232 if (!lock) 2233 return -ENOMEM; 2233 return -ENOMEM; 2234 2234 2235 rc = cifs_lock_add_if(cfile, 2235 rc = cifs_lock_add_if(cfile, lock, wait_flag); 2236 if (rc < 0) { 2236 if (rc < 0) { 2237 kfree(lock); 2237 kfree(lock); 2238 return rc; 2238 return rc; 2239 } 2239 } 2240 if (!rc) 2240 if (!rc) 2241 goto out; 2241 goto out; 2242 2242 2243 /* 2243 /* 2244 * Windows 7 server can delay 2244 * Windows 7 server can delay breaking lease from read to None 2245 * if we set a byte-range loc 2245 * if we set a byte-range lock on a file - break it explicitly 2246 * before sending the lock to 2246 * before sending the lock to the server to be sure the next 2247 * read won't conflict with n 2247 * read won't conflict with non-overlapted locks due to 2248 * pagereading. 2248 * pagereading. 2249 */ 2249 */ 2250 if (!CIFS_CACHE_WRITE(CIFS_I( 2250 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) && 2251 CIFS_ 2251 CIFS_CACHE_READ(CIFS_I(inode))) { 2252 cifs_zap_mapping(inod 2252 cifs_zap_mapping(inode); 2253 cifs_dbg(FYI, "Set no 2253 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n", 2254 inode); 2254 inode); 2255 CIFS_I(inode)->oplock 2255 CIFS_I(inode)->oplock = 0; 2256 } 2256 } 2257 2257 2258 rc = server->ops->mand_lock(x 2258 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, 2259 t 2259 type, 1, 0, wait_flag); 2260 if (rc) { 2260 if (rc) { 2261 kfree(lock); 2261 kfree(lock); 2262 return rc; 2262 return rc; 2263 } 2263 } 2264 2264 2265 cifs_lock_add(cfile, lock); 2265 cifs_lock_add(cfile, lock); 2266 } else if (unlock) 2266 } else if (unlock) 2267 rc = server->ops->mand_unlock 2267 rc = server->ops->mand_unlock_range(cfile, flock, xid); 2268 2268 2269 out: 2269 out: 2270 if ((flock->c.flc_flags & FL_POSIX) | 2270 if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) { 2271 /* 2271 /* 2272 * If this is a request to re 2272 * If this is a request to remove all locks because we 2273 * are closing the file, it d 2273 * are closing the file, it doesn't matter if the 2274 * unlocking failed as both c 2274 * unlocking failed as both cifs.ko and the SMB server 2275 * remove the lock on file cl 2275 * remove the lock on file close 2276 */ 2276 */ 2277 if (rc) { 2277 if (rc) { 2278 cifs_dbg(VFS, "%s fai 2278 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc); 2279 if (!(flock->c.flc_fl 2279 if (!(flock->c.flc_flags & FL_CLOSE)) 2280 return rc; 2280 return rc; 2281 } 2281 } 2282 rc = locks_lock_file_wait(fil 2282 rc = locks_lock_file_wait(file, flock); 2283 } 2283 } 2284 return rc; 2284 return rc; 2285 } 2285 } 2286 2286 2287 int cifs_flock(struct file *file, int cmd, st 2287 int cifs_flock(struct file *file, int cmd, struct file_lock *fl) 2288 { 2288 { 2289 int rc, xid; 2289 int rc, xid; 2290 int lock = 0, unlock = 0; 2290 int lock = 0, unlock = 0; 2291 bool wait_flag = false; 2291 bool wait_flag = false; 2292 bool posix_lck = false; 2292 bool posix_lck = false; 2293 struct cifs_sb_info *cifs_sb; 2293 struct cifs_sb_info *cifs_sb; 2294 struct cifs_tcon *tcon; 2294 struct cifs_tcon *tcon; 2295 struct cifsFileInfo *cfile; 2295 struct cifsFileInfo *cfile; 2296 __u32 type; 2296 __u32 type; 2297 2297 2298 xid = get_xid(); 2298 xid = get_xid(); 2299 2299 2300 if (!(fl->c.flc_flags & FL_FLOCK)) { 2300 if (!(fl->c.flc_flags & FL_FLOCK)) { 2301 rc = -ENOLCK; 2301 rc = -ENOLCK; 2302 free_xid(xid); 2302 free_xid(xid); 2303 return rc; 2303 return rc; 2304 } 2304 } 2305 2305 2306 cfile = (struct cifsFileInfo *)file-> 2306 cfile = (struct cifsFileInfo *)file->private_data; 2307 tcon = tlink_tcon(cfile->tlink); 2307 tcon = tlink_tcon(cfile->tlink); 2308 2308 2309 cifs_read_flock(fl, &type, &lock, &un 2309 cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag, 2310 tcon->ses->server); 2310 tcon->ses->server); 2311 cifs_sb = CIFS_FILE_SB(file); 2311 cifs_sb = CIFS_FILE_SB(file); 2312 2312 2313 if (cap_unix(tcon->ses) && 2313 if (cap_unix(tcon->ses) && 2314 (CIFS_UNIX_FCNTL_CAP & le64_to_cp 2314 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 2315 ((cifs_sb->mnt_cifs_flags & CIFS_ 2315 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 2316 posix_lck = true; 2316 posix_lck = true; 2317 2317 2318 if (!lock && !unlock) { 2318 if (!lock && !unlock) { 2319 /* 2319 /* 2320 * if no lock or unlock then 2320 * if no lock or unlock then nothing to do since we do not 2321 * know what it is 2321 * know what it is 2322 */ 2322 */ 2323 rc = -EOPNOTSUPP; 2323 rc = -EOPNOTSUPP; 2324 free_xid(xid); 2324 free_xid(xid); 2325 return rc; 2325 return rc; 2326 } 2326 } 2327 2327 2328 rc = cifs_setlk(file, fl, type, wait_ 2328 rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock, 2329 xid); 2329 xid); 2330 free_xid(xid); 2330 free_xid(xid); 2331 return rc; 2331 return rc; 2332 2332 2333 2333 2334 } 2334 } 2335 2335 2336 int cifs_lock(struct file *file, int cmd, str 2336 int cifs_lock(struct file *file, int cmd, struct file_lock *flock) 2337 { 2337 { 2338 int rc, xid; 2338 int rc, xid; 2339 int lock = 0, unlock = 0; 2339 int lock = 0, unlock = 0; 2340 bool wait_flag = false; 2340 bool wait_flag = false; 2341 bool posix_lck = false; 2341 bool posix_lck = false; 2342 struct cifs_sb_info *cifs_sb; 2342 struct cifs_sb_info *cifs_sb; 2343 struct cifs_tcon *tcon; 2343 struct cifs_tcon *tcon; 2344 struct cifsFileInfo *cfile; 2344 struct cifsFileInfo *cfile; 2345 __u32 type; 2345 __u32 type; 2346 2346 2347 rc = -EACCES; 2347 rc = -EACCES; 2348 xid = get_xid(); 2348 xid = get_xid(); 2349 2349 2350 cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type 2350 cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd, 2351 flock->c.flc_flags, flock->c 2351 flock->c.flc_flags, flock->c.flc_type, 2352 (long long)flock->fl_start, 2352 (long long)flock->fl_start, 2353 (long long)flock->fl_end); 2353 (long long)flock->fl_end); 2354 2354 2355 cfile = (struct cifsFileInfo *)file-> 2355 cfile = (struct cifsFileInfo *)file->private_data; 2356 tcon = tlink_tcon(cfile->tlink); 2356 tcon = tlink_tcon(cfile->tlink); 2357 2357 2358 cifs_read_flock(flock, &type, &lock, 2358 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag, 2359 tcon->ses->server); 2359 tcon->ses->server); 2360 cifs_sb = CIFS_FILE_SB(file); 2360 cifs_sb = CIFS_FILE_SB(file); 2361 set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS 2361 set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags); 2362 2362 2363 if (cap_unix(tcon->ses) && 2363 if (cap_unix(tcon->ses) && 2364 (CIFS_UNIX_FCNTL_CAP & le64_to_cp 2364 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 2365 ((cifs_sb->mnt_cifs_flags & CIFS_ 2365 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 2366 posix_lck = true; 2366 posix_lck = true; 2367 /* 2367 /* 2368 * BB add code here to normalize offs 2368 * BB add code here to normalize offset and length to account for 2369 * negative length which we can not a 2369 * negative length which we can not accept over the wire. 2370 */ 2370 */ 2371 if (IS_GETLK(cmd)) { 2371 if (IS_GETLK(cmd)) { 2372 rc = cifs_getlk(file, flock, 2372 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid); 2373 free_xid(xid); 2373 free_xid(xid); 2374 return rc; 2374 return rc; 2375 } 2375 } 2376 2376 2377 if (!lock && !unlock) { 2377 if (!lock && !unlock) { 2378 /* 2378 /* 2379 * if no lock or unlock then 2379 * if no lock or unlock then nothing to do since we do not 2380 * know what it is 2380 * know what it is 2381 */ 2381 */ 2382 free_xid(xid); 2382 free_xid(xid); 2383 return -EOPNOTSUPP; 2383 return -EOPNOTSUPP; 2384 } 2384 } 2385 2385 2386 rc = cifs_setlk(file, flock, type, wa 2386 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock, 2387 xid); 2387 xid); 2388 free_xid(xid); 2388 free_xid(xid); 2389 return rc; 2389 return rc; 2390 } 2390 } 2391 2391 2392 void cifs_write_subrequest_terminated(struct 2392 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result, 2393 bool wa 2393 bool was_async) 2394 { 2394 { 2395 struct netfs_io_request *wreq = wdata 2395 struct netfs_io_request *wreq = wdata->rreq; 2396 struct netfs_inode *ictx = netfs_inod 2396 struct netfs_inode *ictx = netfs_inode(wreq->inode); 2397 loff_t wrend; 2397 loff_t wrend; 2398 2398 2399 if (result > 0) { 2399 if (result > 0) { 2400 wrend = wdata->subreq.start + 2400 wrend = wdata->subreq.start + wdata->subreq.transferred + result; 2401 2401 2402 if (wrend > ictx->zero_point 2402 if (wrend > ictx->zero_point && 2403 (wdata->rreq->origin == N 2403 (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE || 2404 wdata->rreq->origin == N 2404 wdata->rreq->origin == NETFS_DIO_WRITE)) 2405 ictx->zero_point = wr 2405 ictx->zero_point = wrend; 2406 if (wrend > ictx->remote_i_si 2406 if (wrend > ictx->remote_i_size) 2407 netfs_resize_file(ict 2407 netfs_resize_file(ictx, wrend, true); 2408 } 2408 } 2409 2409 2410 netfs_write_subrequest_terminated(&wd 2410 netfs_write_subrequest_terminated(&wdata->subreq, result, was_async); 2411 } 2411 } 2412 2412 2413 struct cifsFileInfo *find_readable_file(struc 2413 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, 2414 bool 2414 bool fsuid_only) 2415 { 2415 { 2416 struct cifsFileInfo *open_file = NULL 2416 struct cifsFileInfo *open_file = NULL; 2417 struct cifs_sb_info *cifs_sb = CIFS_S 2417 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); 2418 2418 2419 /* only filter by fsuid on multiuser 2419 /* only filter by fsuid on multiuser mounts */ 2420 if (!(cifs_sb->mnt_cifs_flags & CIFS_ 2420 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 2421 fsuid_only = false; 2421 fsuid_only = false; 2422 2422 2423 spin_lock(&cifs_inode->open_file_lock 2423 spin_lock(&cifs_inode->open_file_lock); 2424 /* we could simply get the first_list 2424 /* we could simply get the first_list_entry since write-only entries 2425 are always at the end of the list 2425 are always at the end of the list but since the first entry might 2426 have a close pending, we go throug 2426 have a close pending, we go through the whole list */ 2427 list_for_each_entry(open_file, &cifs_ 2427 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { 2428 if (fsuid_only && !uid_eq(ope 2428 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) 2429 continue; 2429 continue; 2430 if (OPEN_FMODE(open_file->f_f 2430 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) { 2431 if ((!open_file->inva 2431 if ((!open_file->invalidHandle)) { 2432 /* found a go 2432 /* found a good file */ 2433 /* lock it so 2433 /* lock it so it will not be closed on us */ 2434 cifsFileInfo_ 2434 cifsFileInfo_get(open_file); 2435 spin_unlock(& 2435 spin_unlock(&cifs_inode->open_file_lock); 2436 return open_f 2436 return open_file; 2437 } /* else might as we 2437 } /* else might as well continue, and look for 2438 another, or simp 2438 another, or simply have the caller reopen it 2439 again rather tha 2439 again rather than trying to fix this handle */ 2440 } else /* write only file */ 2440 } else /* write only file */ 2441 break; /* write only 2441 break; /* write only files are last so must be done */ 2442 } 2442 } 2443 spin_unlock(&cifs_inode->open_file_lo 2443 spin_unlock(&cifs_inode->open_file_lock); 2444 return NULL; 2444 return NULL; 2445 } 2445 } 2446 2446 2447 /* Return -EBADF if no handle is found and ge 2447 /* Return -EBADF if no handle is found and general rc otherwise */ 2448 int 2448 int 2449 cifs_get_writable_file(struct cifsInodeInfo * 2449 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags, 2450 struct cifsFileInfo ** 2450 struct cifsFileInfo **ret_file) 2451 { 2451 { 2452 struct cifsFileInfo *open_file, *inv_ 2452 struct cifsFileInfo *open_file, *inv_file = NULL; 2453 struct cifs_sb_info *cifs_sb; 2453 struct cifs_sb_info *cifs_sb; 2454 bool any_available = false; 2454 bool any_available = false; 2455 int rc = -EBADF; 2455 int rc = -EBADF; 2456 unsigned int refind = 0; 2456 unsigned int refind = 0; 2457 bool fsuid_only = flags & FIND_WR_FSU 2457 bool fsuid_only = flags & FIND_WR_FSUID_ONLY; 2458 bool with_delete = flags & FIND_WR_WI 2458 bool with_delete = flags & FIND_WR_WITH_DELETE; 2459 *ret_file = NULL; 2459 *ret_file = NULL; 2460 2460 2461 /* 2461 /* 2462 * Having a null inode here (because 2462 * Having a null inode here (because mapping->host was set to zero by 2463 * the VFS or MM) should not happen b 2463 * the VFS or MM) should not happen but we had reports of on oops (due 2464 * to it being zero) during stress te 2464 * to it being zero) during stress testcases so we need to check for it 2465 */ 2465 */ 2466 2466 2467 if (cifs_inode == NULL) { 2467 if (cifs_inode == NULL) { 2468 cifs_dbg(VFS, "Null inode pas 2468 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n"); 2469 dump_stack(); 2469 dump_stack(); 2470 return rc; 2470 return rc; 2471 } 2471 } 2472 2472 2473 cifs_sb = CIFS_SB(cifs_inode->netfs.i 2473 cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); 2474 2474 2475 /* only filter by fsuid on multiuser 2475 /* only filter by fsuid on multiuser mounts */ 2476 if (!(cifs_sb->mnt_cifs_flags & CIFS_ 2476 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 2477 fsuid_only = false; 2477 fsuid_only = false; 2478 2478 2479 spin_lock(&cifs_inode->open_file_lock 2479 spin_lock(&cifs_inode->open_file_lock); 2480 refind_writable: 2480 refind_writable: 2481 if (refind > MAX_REOPEN_ATT) { 2481 if (refind > MAX_REOPEN_ATT) { 2482 spin_unlock(&cifs_inode->open 2482 spin_unlock(&cifs_inode->open_file_lock); 2483 return rc; 2483 return rc; 2484 } 2484 } 2485 list_for_each_entry(open_file, &cifs_ 2485 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { 2486 if (!any_available && open_fi 2486 if (!any_available && open_file->pid != current->tgid) 2487 continue; 2487 continue; 2488 if (fsuid_only && !uid_eq(ope 2488 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) 2489 continue; 2489 continue; 2490 if (with_delete && !(open_fil 2490 if (with_delete && !(open_file->fid.access & DELETE)) 2491 continue; 2491 continue; 2492 if (OPEN_FMODE(open_file->f_f 2492 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { 2493 if (!open_file->inval 2493 if (!open_file->invalidHandle) { 2494 /* found a go 2494 /* found a good writable file */ 2495 cifsFileInfo_ 2495 cifsFileInfo_get(open_file); 2496 spin_unlock(& 2496 spin_unlock(&cifs_inode->open_file_lock); 2497 *ret_file = o 2497 *ret_file = open_file; 2498 return 0; 2498 return 0; 2499 } else { 2499 } else { 2500 if (!inv_file 2500 if (!inv_file) 2501 inv_f 2501 inv_file = open_file; 2502 } 2502 } 2503 } 2503 } 2504 } 2504 } 2505 /* couldn't find usable FH with same 2505 /* couldn't find usable FH with same pid, try any available */ 2506 if (!any_available) { 2506 if (!any_available) { 2507 any_available = true; 2507 any_available = true; 2508 goto refind_writable; 2508 goto refind_writable; 2509 } 2509 } 2510 2510 2511 if (inv_file) { 2511 if (inv_file) { 2512 any_available = false; 2512 any_available = false; 2513 cifsFileInfo_get(inv_file); 2513 cifsFileInfo_get(inv_file); 2514 } 2514 } 2515 2515 2516 spin_unlock(&cifs_inode->open_file_lo 2516 spin_unlock(&cifs_inode->open_file_lock); 2517 2517 2518 if (inv_file) { 2518 if (inv_file) { 2519 rc = cifs_reopen_file(inv_fil 2519 rc = cifs_reopen_file(inv_file, false); 2520 if (!rc) { 2520 if (!rc) { 2521 *ret_file = inv_file; 2521 *ret_file = inv_file; 2522 return 0; 2522 return 0; 2523 } 2523 } 2524 2524 2525 spin_lock(&cifs_inode->open_f 2525 spin_lock(&cifs_inode->open_file_lock); 2526 list_move_tail(&inv_file->fli 2526 list_move_tail(&inv_file->flist, &cifs_inode->openFileList); 2527 spin_unlock(&cifs_inode->open 2527 spin_unlock(&cifs_inode->open_file_lock); 2528 cifsFileInfo_put(inv_file); 2528 cifsFileInfo_put(inv_file); 2529 ++refind; 2529 ++refind; 2530 inv_file = NULL; 2530 inv_file = NULL; 2531 spin_lock(&cifs_inode->open_f 2531 spin_lock(&cifs_inode->open_file_lock); 2532 goto refind_writable; 2532 goto refind_writable; 2533 } 2533 } 2534 2534 2535 return rc; 2535 return rc; 2536 } 2536 } 2537 2537 2538 struct cifsFileInfo * 2538 struct cifsFileInfo * 2539 find_writable_file(struct cifsInodeInfo *cifs 2539 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags) 2540 { 2540 { 2541 struct cifsFileInfo *cfile; 2541 struct cifsFileInfo *cfile; 2542 int rc; 2542 int rc; 2543 2543 2544 rc = cifs_get_writable_file(cifs_inod 2544 rc = cifs_get_writable_file(cifs_inode, flags, &cfile); 2545 if (rc) 2545 if (rc) 2546 cifs_dbg(FYI, "Couldn't find 2546 cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc); 2547 2547 2548 return cfile; 2548 return cfile; 2549 } 2549 } 2550 2550 2551 int 2551 int 2552 cifs_get_writable_path(struct cifs_tcon *tcon 2552 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name, 2553 int flags, 2553 int flags, 2554 struct cifsFileInfo ** 2554 struct cifsFileInfo **ret_file) 2555 { 2555 { 2556 struct cifsFileInfo *cfile; 2556 struct cifsFileInfo *cfile; 2557 void *page = alloc_dentry_path(); 2557 void *page = alloc_dentry_path(); 2558 2558 2559 *ret_file = NULL; 2559 *ret_file = NULL; 2560 2560 2561 spin_lock(&tcon->open_file_lock); 2561 spin_lock(&tcon->open_file_lock); 2562 list_for_each_entry(cfile, &tcon->ope 2562 list_for_each_entry(cfile, &tcon->openFileList, tlist) { 2563 struct cifsInodeInfo *cinode; 2563 struct cifsInodeInfo *cinode; 2564 const char *full_path = build 2564 const char *full_path = build_path_from_dentry(cfile->dentry, page); 2565 if (IS_ERR(full_path)) { 2565 if (IS_ERR(full_path)) { 2566 spin_unlock(&tcon->op 2566 spin_unlock(&tcon->open_file_lock); 2567 free_dentry_path(page 2567 free_dentry_path(page); 2568 return PTR_ERR(full_p 2568 return PTR_ERR(full_path); 2569 } 2569 } 2570 if (strcmp(full_path, name)) 2570 if (strcmp(full_path, name)) 2571 continue; 2571 continue; 2572 2572 2573 cinode = CIFS_I(d_inode(cfile 2573 cinode = CIFS_I(d_inode(cfile->dentry)); 2574 spin_unlock(&tcon->open_file_ 2574 spin_unlock(&tcon->open_file_lock); 2575 free_dentry_path(page); 2575 free_dentry_path(page); 2576 return cifs_get_writable_file 2576 return cifs_get_writable_file(cinode, flags, ret_file); 2577 } 2577 } 2578 2578 2579 spin_unlock(&tcon->open_file_lock); 2579 spin_unlock(&tcon->open_file_lock); 2580 free_dentry_path(page); 2580 free_dentry_path(page); 2581 return -ENOENT; 2581 return -ENOENT; 2582 } 2582 } 2583 2583 2584 int 2584 int 2585 cifs_get_readable_path(struct cifs_tcon *tcon 2585 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name, 2586 struct cifsFileInfo ** 2586 struct cifsFileInfo **ret_file) 2587 { 2587 { 2588 struct cifsFileInfo *cfile; 2588 struct cifsFileInfo *cfile; 2589 void *page = alloc_dentry_path(); 2589 void *page = alloc_dentry_path(); 2590 2590 2591 *ret_file = NULL; 2591 *ret_file = NULL; 2592 2592 2593 spin_lock(&tcon->open_file_lock); 2593 spin_lock(&tcon->open_file_lock); 2594 list_for_each_entry(cfile, &tcon->ope 2594 list_for_each_entry(cfile, &tcon->openFileList, tlist) { 2595 struct cifsInodeInfo *cinode; 2595 struct cifsInodeInfo *cinode; 2596 const char *full_path = build 2596 const char *full_path = build_path_from_dentry(cfile->dentry, page); 2597 if (IS_ERR(full_path)) { 2597 if (IS_ERR(full_path)) { 2598 spin_unlock(&tcon->op 2598 spin_unlock(&tcon->open_file_lock); 2599 free_dentry_path(page 2599 free_dentry_path(page); 2600 return PTR_ERR(full_p 2600 return PTR_ERR(full_path); 2601 } 2601 } 2602 if (strcmp(full_path, name)) 2602 if (strcmp(full_path, name)) 2603 continue; 2603 continue; 2604 2604 2605 cinode = CIFS_I(d_inode(cfile 2605 cinode = CIFS_I(d_inode(cfile->dentry)); 2606 spin_unlock(&tcon->open_file_ 2606 spin_unlock(&tcon->open_file_lock); 2607 free_dentry_path(page); 2607 free_dentry_path(page); 2608 *ret_file = find_readable_fil 2608 *ret_file = find_readable_file(cinode, 0); 2609 return *ret_file ? 0 : -ENOEN 2609 return *ret_file ? 0 : -ENOENT; 2610 } 2610 } 2611 2611 2612 spin_unlock(&tcon->open_file_lock); 2612 spin_unlock(&tcon->open_file_lock); 2613 free_dentry_path(page); 2613 free_dentry_path(page); 2614 return -ENOENT; 2614 return -ENOENT; 2615 } 2615 } 2616 2616 2617 /* 2617 /* 2618 * Flush data on a strict file. 2618 * Flush data on a strict file. 2619 */ 2619 */ 2620 int cifs_strict_fsync(struct file *file, loff 2620 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end, 2621 int datasync) 2621 int datasync) 2622 { 2622 { 2623 unsigned int xid; 2623 unsigned int xid; 2624 int rc = 0; 2624 int rc = 0; 2625 struct cifs_tcon *tcon; 2625 struct cifs_tcon *tcon; 2626 struct TCP_Server_Info *server; 2626 struct TCP_Server_Info *server; 2627 struct cifsFileInfo *smbfile = file-> 2627 struct cifsFileInfo *smbfile = file->private_data; 2628 struct inode *inode = file_inode(file 2628 struct inode *inode = file_inode(file); 2629 struct cifs_sb_info *cifs_sb = CIFS_S 2629 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2630 2630 2631 rc = file_write_and_wait_range(file, 2631 rc = file_write_and_wait_range(file, start, end); 2632 if (rc) { 2632 if (rc) { 2633 trace_cifs_fsync_err(inode->i 2633 trace_cifs_fsync_err(inode->i_ino, rc); 2634 return rc; 2634 return rc; 2635 } 2635 } 2636 2636 2637 xid = get_xid(); 2637 xid = get_xid(); 2638 2638 2639 cifs_dbg(FYI, "Sync file - name: %pD 2639 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n", 2640 file, datasync); 2640 file, datasync); 2641 2641 2642 if (!CIFS_CACHE_READ(CIFS_I(inode))) 2642 if (!CIFS_CACHE_READ(CIFS_I(inode))) { 2643 rc = cifs_zap_mapping(inode); 2643 rc = cifs_zap_mapping(inode); 2644 if (rc) { 2644 if (rc) { 2645 cifs_dbg(FYI, "rc: %d 2645 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc); 2646 rc = 0; /* don't care 2646 rc = 0; /* don't care about it in fsync */ 2647 } 2647 } 2648 } 2648 } 2649 2649 2650 tcon = tlink_tcon(smbfile->tlink); 2650 tcon = tlink_tcon(smbfile->tlink); 2651 if (!(cifs_sb->mnt_cifs_flags & CIFS_ 2651 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { 2652 server = tcon->ses->server; 2652 server = tcon->ses->server; 2653 if (server->ops->flush == NUL 2653 if (server->ops->flush == NULL) { 2654 rc = -ENOSYS; 2654 rc = -ENOSYS; 2655 goto strict_fsync_exi 2655 goto strict_fsync_exit; 2656 } 2656 } 2657 2657 2658 if ((OPEN_FMODE(smbfile->f_fl 2658 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) { 2659 smbfile = find_writab 2659 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY); 2660 if (smbfile) { 2660 if (smbfile) { 2661 rc = server-> 2661 rc = server->ops->flush(xid, tcon, &smbfile->fid); 2662 cifsFileInfo_ 2662 cifsFileInfo_put(smbfile); 2663 } else 2663 } else 2664 cifs_dbg(FYI, 2664 cifs_dbg(FYI, "ignore fsync for file not open for write\n"); 2665 } else 2665 } else 2666 rc = server->ops->flu 2666 rc = server->ops->flush(xid, tcon, &smbfile->fid); 2667 } 2667 } 2668 2668 2669 strict_fsync_exit: 2669 strict_fsync_exit: 2670 free_xid(xid); 2670 free_xid(xid); 2671 return rc; 2671 return rc; 2672 } 2672 } 2673 2673 2674 /* 2674 /* 2675 * Flush data on a non-strict data. 2675 * Flush data on a non-strict data. 2676 */ 2676 */ 2677 int cifs_fsync(struct file *file, loff_t star 2677 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 2678 { 2678 { 2679 unsigned int xid; 2679 unsigned int xid; 2680 int rc = 0; 2680 int rc = 0; 2681 struct cifs_tcon *tcon; 2681 struct cifs_tcon *tcon; 2682 struct TCP_Server_Info *server; 2682 struct TCP_Server_Info *server; 2683 struct cifsFileInfo *smbfile = file-> 2683 struct cifsFileInfo *smbfile = file->private_data; 2684 struct inode *inode = file_inode(file 2684 struct inode *inode = file_inode(file); 2685 struct cifs_sb_info *cifs_sb = CIFS_F 2685 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); 2686 2686 2687 rc = file_write_and_wait_range(file, 2687 rc = file_write_and_wait_range(file, start, end); 2688 if (rc) { 2688 if (rc) { 2689 trace_cifs_fsync_err(file_ino 2689 trace_cifs_fsync_err(file_inode(file)->i_ino, rc); 2690 return rc; 2690 return rc; 2691 } 2691 } 2692 2692 2693 xid = get_xid(); 2693 xid = get_xid(); 2694 2694 2695 cifs_dbg(FYI, "Sync file - name: %pD 2695 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n", 2696 file, datasync); 2696 file, datasync); 2697 2697 2698 tcon = tlink_tcon(smbfile->tlink); 2698 tcon = tlink_tcon(smbfile->tlink); 2699 if (!(cifs_sb->mnt_cifs_flags & CIFS_ 2699 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { 2700 server = tcon->ses->server; 2700 server = tcon->ses->server; 2701 if (server->ops->flush == NUL 2701 if (server->ops->flush == NULL) { 2702 rc = -ENOSYS; 2702 rc = -ENOSYS; 2703 goto fsync_exit; 2703 goto fsync_exit; 2704 } 2704 } 2705 2705 2706 if ((OPEN_FMODE(smbfile->f_fl 2706 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) { 2707 smbfile = find_writab 2707 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY); 2708 if (smbfile) { 2708 if (smbfile) { 2709 rc = server-> 2709 rc = server->ops->flush(xid, tcon, &smbfile->fid); 2710 cifsFileInfo_ 2710 cifsFileInfo_put(smbfile); 2711 } else 2711 } else 2712 cifs_dbg(FYI, 2712 cifs_dbg(FYI, "ignore fsync for file not open for write\n"); 2713 } else 2713 } else 2714 rc = server->ops->flu 2714 rc = server->ops->flush(xid, tcon, &smbfile->fid); 2715 } 2715 } 2716 2716 2717 fsync_exit: 2717 fsync_exit: 2718 free_xid(xid); 2718 free_xid(xid); 2719 return rc; 2719 return rc; 2720 } 2720 } 2721 2721 2722 /* 2722 /* 2723 * As file closes, flush all cached write dat 2723 * As file closes, flush all cached write data for this inode checking 2724 * for write behind errors. 2724 * for write behind errors. 2725 */ 2725 */ 2726 int cifs_flush(struct file *file, fl_owner_t 2726 int cifs_flush(struct file *file, fl_owner_t id) 2727 { 2727 { 2728 struct inode *inode = file_inode(file 2728 struct inode *inode = file_inode(file); 2729 int rc = 0; 2729 int rc = 0; 2730 2730 2731 if (file->f_mode & FMODE_WRITE) 2731 if (file->f_mode & FMODE_WRITE) 2732 rc = filemap_write_and_wait(i 2732 rc = filemap_write_and_wait(inode->i_mapping); 2733 2733 2734 cifs_dbg(FYI, "Flush inode %p file %p 2734 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc); 2735 if (rc) { 2735 if (rc) { 2736 /* get more nuanced writeback 2736 /* get more nuanced writeback errors */ 2737 rc = filemap_check_wb_err(fil 2737 rc = filemap_check_wb_err(file->f_mapping, 0); 2738 trace_cifs_flush_err(inode->i 2738 trace_cifs_flush_err(inode->i_ino, rc); 2739 } 2739 } 2740 return rc; 2740 return rc; 2741 } 2741 } 2742 2742 2743 static ssize_t 2743 static ssize_t 2744 cifs_writev(struct kiocb *iocb, struct iov_it 2744 cifs_writev(struct kiocb *iocb, struct iov_iter *from) 2745 { 2745 { 2746 struct file *file = iocb->ki_filp; 2746 struct file *file = iocb->ki_filp; 2747 struct cifsFileInfo *cfile = (struct 2747 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; 2748 struct inode *inode = file->f_mapping 2748 struct inode *inode = file->f_mapping->host; 2749 struct cifsInodeInfo *cinode = CIFS_I 2749 struct cifsInodeInfo *cinode = CIFS_I(inode); 2750 struct TCP_Server_Info *server = tlin 2750 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; 2751 struct cifs_sb_info *cifs_sb = CIFS_S 2751 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2752 ssize_t rc; 2752 ssize_t rc; 2753 2753 2754 rc = netfs_start_io_write(inode); 2754 rc = netfs_start_io_write(inode); 2755 if (rc < 0) 2755 if (rc < 0) 2756 return rc; 2756 return rc; 2757 2757 2758 /* 2758 /* 2759 * We need to hold the sem to be sure 2759 * We need to hold the sem to be sure nobody modifies lock list 2760 * with a brlock that prevents writin 2760 * with a brlock that prevents writing. 2761 */ 2761 */ 2762 down_read(&cinode->lock_sem); 2762 down_read(&cinode->lock_sem); 2763 2763 2764 rc = generic_write_checks(iocb, from) 2764 rc = generic_write_checks(iocb, from); 2765 if (rc <= 0) 2765 if (rc <= 0) 2766 goto out; 2766 goto out; 2767 2767 2768 if ((cifs_sb->mnt_cifs_flags & CIFS_M 2768 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) && 2769 (cifs_find_lock_conflict(cfile, i 2769 (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from), 2770 server-> 2770 server->vals->exclusive_lock_type, 0, 2771 NULL, CI 2771 NULL, CIFS_WRITE_OP))) { 2772 rc = -EACCES; 2772 rc = -EACCES; 2773 goto out; 2773 goto out; 2774 } 2774 } 2775 2775 2776 rc = netfs_buffered_write_iter_locked 2776 rc = netfs_buffered_write_iter_locked(iocb, from, NULL); 2777 2777 2778 out: 2778 out: 2779 up_read(&cinode->lock_sem); 2779 up_read(&cinode->lock_sem); 2780 netfs_end_io_write(inode); 2780 netfs_end_io_write(inode); 2781 if (rc > 0) 2781 if (rc > 0) 2782 rc = generic_write_sync(iocb, 2782 rc = generic_write_sync(iocb, rc); 2783 return rc; 2783 return rc; 2784 } 2784 } 2785 2785 2786 ssize_t 2786 ssize_t 2787 cifs_strict_writev(struct kiocb *iocb, struct 2787 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from) 2788 { 2788 { 2789 struct inode *inode = file_inode(iocb 2789 struct inode *inode = file_inode(iocb->ki_filp); 2790 struct cifsInodeInfo *cinode = CIFS_I 2790 struct cifsInodeInfo *cinode = CIFS_I(inode); 2791 struct cifs_sb_info *cifs_sb = CIFS_S 2791 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2792 struct cifsFileInfo *cfile = (struct 2792 struct cifsFileInfo *cfile = (struct cifsFileInfo *) 2793 2793 iocb->ki_filp->private_data; 2794 struct cifs_tcon *tcon = tlink_tcon(c 2794 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 2795 ssize_t written; 2795 ssize_t written; 2796 2796 2797 written = cifs_get_writer(cinode); 2797 written = cifs_get_writer(cinode); 2798 if (written) 2798 if (written) 2799 return written; 2799 return written; 2800 2800 2801 if (CIFS_CACHE_WRITE(cinode)) { 2801 if (CIFS_CACHE_WRITE(cinode)) { 2802 if (cap_unix(tcon->ses) && 2802 if (cap_unix(tcon->ses) && 2803 (CIFS_UNIX_FCNTL_CAP & le 2803 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 2804 ((cifs_sb->mnt_cifs_flags 2804 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) { 2805 written = netfs_file_ 2805 written = netfs_file_write_iter(iocb, from); 2806 goto out; 2806 goto out; 2807 } 2807 } 2808 written = cifs_writev(iocb, f 2808 written = cifs_writev(iocb, from); 2809 goto out; 2809 goto out; 2810 } 2810 } 2811 /* 2811 /* 2812 * For non-oplocked files in strict c 2812 * For non-oplocked files in strict cache mode we need to write the data 2813 * to the server exactly from the pos 2813 * to the server exactly from the pos to pos+len-1 rather than flush all 2814 * affected pages because it may caus 2814 * affected pages because it may cause a error with mandatory locks on 2815 * these pages but not on the region 2815 * these pages but not on the region from pos to ppos+len-1. 2816 */ 2816 */ 2817 written = netfs_file_write_iter(iocb, 2817 written = netfs_file_write_iter(iocb, from); 2818 if (CIFS_CACHE_READ(cinode)) { 2818 if (CIFS_CACHE_READ(cinode)) { 2819 /* 2819 /* 2820 * We have read level caching 2820 * We have read level caching and we have just sent a write 2821 * request to the server thus 2821 * request to the server thus making data in the cache stale. 2822 * Zap the cache and set oplo 2822 * Zap the cache and set oplock/lease level to NONE to avoid 2823 * reading stale data from th 2823 * reading stale data from the cache. All subsequent read 2824 * operations will read new d 2824 * operations will read new data from the server. 2825 */ 2825 */ 2826 cifs_zap_mapping(inode); 2826 cifs_zap_mapping(inode); 2827 cifs_dbg(FYI, "Set Oplock/Lea 2827 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n", 2828 inode); 2828 inode); 2829 cinode->oplock = 0; 2829 cinode->oplock = 0; 2830 } 2830 } 2831 out: 2831 out: 2832 cifs_put_writer(cinode); 2832 cifs_put_writer(cinode); 2833 return written; 2833 return written; 2834 } 2834 } 2835 2835 2836 ssize_t cifs_loose_read_iter(struct kiocb *io 2836 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter) 2837 { 2837 { 2838 ssize_t rc; 2838 ssize_t rc; 2839 struct inode *inode = file_inode(iocb 2839 struct inode *inode = file_inode(iocb->ki_filp); 2840 2840 2841 if (iocb->ki_flags & IOCB_DIRECT) 2841 if (iocb->ki_flags & IOCB_DIRECT) 2842 return netfs_unbuffered_read_ 2842 return netfs_unbuffered_read_iter(iocb, iter); 2843 2843 2844 rc = cifs_revalidate_mapping(inode); 2844 rc = cifs_revalidate_mapping(inode); 2845 if (rc) 2845 if (rc) 2846 return rc; 2846 return rc; 2847 2847 2848 return netfs_file_read_iter(iocb, ite 2848 return netfs_file_read_iter(iocb, iter); 2849 } 2849 } 2850 2850 2851 ssize_t cifs_file_write_iter(struct kiocb *io 2851 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2852 { 2852 { 2853 struct inode *inode = file_inode(iocb 2853 struct inode *inode = file_inode(iocb->ki_filp); 2854 struct cifsInodeInfo *cinode = CIFS_I 2854 struct cifsInodeInfo *cinode = CIFS_I(inode); 2855 ssize_t written; 2855 ssize_t written; 2856 int rc; 2856 int rc; 2857 2857 2858 if (iocb->ki_filp->f_flags & O_DIRECT 2858 if (iocb->ki_filp->f_flags & O_DIRECT) { 2859 written = netfs_unbuffered_wr 2859 written = netfs_unbuffered_write_iter(iocb, from); 2860 if (written > 0 && CIFS_CACHE 2860 if (written > 0 && CIFS_CACHE_READ(cinode)) { 2861 cifs_zap_mapping(inod 2861 cifs_zap_mapping(inode); 2862 cifs_dbg(FYI, 2862 cifs_dbg(FYI, 2863 "Set no oplo 2863 "Set no oplock for inode=%p after a write operation\n", 2864 inode); 2864 inode); 2865 cinode->oplock = 0; 2865 cinode->oplock = 0; 2866 } 2866 } 2867 return written; 2867 return written; 2868 } 2868 } 2869 2869 2870 written = cifs_get_writer(cinode); 2870 written = cifs_get_writer(cinode); 2871 if (written) 2871 if (written) 2872 return written; 2872 return written; 2873 2873 2874 written = netfs_file_write_iter(iocb, 2874 written = netfs_file_write_iter(iocb, from); 2875 2875 2876 if (!CIFS_CACHE_WRITE(CIFS_I(inode))) 2876 if (!CIFS_CACHE_WRITE(CIFS_I(inode))) { 2877 rc = filemap_fdatawrite(inode 2877 rc = filemap_fdatawrite(inode->i_mapping); 2878 if (rc) 2878 if (rc) 2879 cifs_dbg(FYI, "cifs_f 2879 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n", 2880 rc, inode); 2880 rc, inode); 2881 } 2881 } 2882 2882 2883 cifs_put_writer(cinode); 2883 cifs_put_writer(cinode); 2884 return written; 2884 return written; 2885 } 2885 } 2886 2886 2887 ssize_t 2887 ssize_t 2888 cifs_strict_readv(struct kiocb *iocb, struct 2888 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to) 2889 { 2889 { 2890 struct inode *inode = file_inode(iocb 2890 struct inode *inode = file_inode(iocb->ki_filp); 2891 struct cifsInodeInfo *cinode = CIFS_I 2891 struct cifsInodeInfo *cinode = CIFS_I(inode); 2892 struct cifs_sb_info *cifs_sb = CIFS_S 2892 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2893 struct cifsFileInfo *cfile = (struct 2893 struct cifsFileInfo *cfile = (struct cifsFileInfo *) 2894 2894 iocb->ki_filp->private_data; 2895 struct cifs_tcon *tcon = tlink_tcon(c 2895 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 2896 int rc = -EACCES; 2896 int rc = -EACCES; 2897 2897 2898 /* 2898 /* 2899 * In strict cache mode we need to re 2899 * In strict cache mode we need to read from the server all the time 2900 * if we don't have level II oplock b 2900 * if we don't have level II oplock because the server can delay mtime 2901 * change - so we can't make a decisi 2901 * change - so we can't make a decision about inode invalidating. 2902 * And we can also fail with pageread 2902 * And we can also fail with pagereading if there are mandatory locks 2903 * on pages affected by this read but 2903 * on pages affected by this read but not on the region from pos to 2904 * pos+len-1. 2904 * pos+len-1. 2905 */ 2905 */ 2906 if (!CIFS_CACHE_READ(cinode)) 2906 if (!CIFS_CACHE_READ(cinode)) 2907 return netfs_unbuffered_read_ 2907 return netfs_unbuffered_read_iter(iocb, to); 2908 2908 2909 if ((cifs_sb->mnt_cifs_flags & CIFS_M 2909 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) { 2910 if (iocb->ki_flags & IOCB_DIR 2910 if (iocb->ki_flags & IOCB_DIRECT) 2911 return netfs_unbuffer 2911 return netfs_unbuffered_read_iter(iocb, to); 2912 return netfs_buffered_read_it 2912 return netfs_buffered_read_iter(iocb, to); 2913 } 2913 } 2914 2914 2915 /* 2915 /* 2916 * We need to hold the sem to be sure 2916 * We need to hold the sem to be sure nobody modifies lock list 2917 * with a brlock that prevents readin 2917 * with a brlock that prevents reading. 2918 */ 2918 */ 2919 if (iocb->ki_flags & IOCB_DIRECT) { 2919 if (iocb->ki_flags & IOCB_DIRECT) { 2920 rc = netfs_start_io_direct(in 2920 rc = netfs_start_io_direct(inode); 2921 if (rc < 0) 2921 if (rc < 0) 2922 goto out; 2922 goto out; 2923 rc = -EACCES; 2923 rc = -EACCES; 2924 down_read(&cinode->lock_sem); 2924 down_read(&cinode->lock_sem); 2925 if (!cifs_find_lock_conflict( 2925 if (!cifs_find_lock_conflict( 2926 cfile, iocb->ki_p 2926 cfile, iocb->ki_pos, iov_iter_count(to), 2927 tcon->ses->server 2927 tcon->ses->server->vals->shared_lock_type, 2928 0, NULL, CIFS_REA 2928 0, NULL, CIFS_READ_OP)) 2929 rc = netfs_unbuffered 2929 rc = netfs_unbuffered_read_iter_locked(iocb, to); 2930 up_read(&cinode->lock_sem); 2930 up_read(&cinode->lock_sem); 2931 netfs_end_io_direct(inode); 2931 netfs_end_io_direct(inode); 2932 } else { 2932 } else { 2933 rc = netfs_start_io_read(inod 2933 rc = netfs_start_io_read(inode); 2934 if (rc < 0) 2934 if (rc < 0) 2935 goto out; 2935 goto out; 2936 rc = -EACCES; 2936 rc = -EACCES; 2937 down_read(&cinode->lock_sem); 2937 down_read(&cinode->lock_sem); 2938 if (!cifs_find_lock_conflict( 2938 if (!cifs_find_lock_conflict( 2939 cfile, iocb->ki_p 2939 cfile, iocb->ki_pos, iov_iter_count(to), 2940 tcon->ses->server 2940 tcon->ses->server->vals->shared_lock_type, 2941 0, NULL, CIFS_REA 2941 0, NULL, CIFS_READ_OP)) 2942 rc = filemap_read(ioc 2942 rc = filemap_read(iocb, to, 0); 2943 up_read(&cinode->lock_sem); 2943 up_read(&cinode->lock_sem); 2944 netfs_end_io_read(inode); 2944 netfs_end_io_read(inode); 2945 } 2945 } 2946 out: 2946 out: 2947 return rc; 2947 return rc; 2948 } 2948 } 2949 2949 2950 static vm_fault_t cifs_page_mkwrite(struct vm 2950 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf) 2951 { 2951 { 2952 return netfs_page_mkwrite(vmf, NULL); 2952 return netfs_page_mkwrite(vmf, NULL); 2953 } 2953 } 2954 2954 2955 static const struct vm_operations_struct cifs 2955 static const struct vm_operations_struct cifs_file_vm_ops = { 2956 .fault = filemap_fault, 2956 .fault = filemap_fault, 2957 .map_pages = filemap_map_pages, 2957 .map_pages = filemap_map_pages, 2958 .page_mkwrite = cifs_page_mkwrite, 2958 .page_mkwrite = cifs_page_mkwrite, 2959 }; 2959 }; 2960 2960 2961 int cifs_file_strict_mmap(struct file *file, 2961 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) 2962 { 2962 { 2963 int xid, rc = 0; 2963 int xid, rc = 0; 2964 struct inode *inode = file_inode(file 2964 struct inode *inode = file_inode(file); 2965 2965 2966 xid = get_xid(); 2966 xid = get_xid(); 2967 2967 2968 if (!CIFS_CACHE_READ(CIFS_I(inode))) 2968 if (!CIFS_CACHE_READ(CIFS_I(inode))) 2969 rc = cifs_zap_mapping(inode); 2969 rc = cifs_zap_mapping(inode); 2970 if (!rc) 2970 if (!rc) 2971 rc = generic_file_mmap(file, 2971 rc = generic_file_mmap(file, vma); 2972 if (!rc) 2972 if (!rc) 2973 vma->vm_ops = &cifs_file_vm_o 2973 vma->vm_ops = &cifs_file_vm_ops; 2974 2974 2975 free_xid(xid); 2975 free_xid(xid); 2976 return rc; 2976 return rc; 2977 } 2977 } 2978 2978 2979 int cifs_file_mmap(struct file *file, struct 2979 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) 2980 { 2980 { 2981 int rc, xid; 2981 int rc, xid; 2982 2982 2983 xid = get_xid(); 2983 xid = get_xid(); 2984 2984 2985 rc = cifs_revalidate_file(file); 2985 rc = cifs_revalidate_file(file); 2986 if (rc) 2986 if (rc) 2987 cifs_dbg(FYI, "Validation pri 2987 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n", 2988 rc); 2988 rc); 2989 if (!rc) 2989 if (!rc) 2990 rc = generic_file_mmap(file, 2990 rc = generic_file_mmap(file, vma); 2991 if (!rc) 2991 if (!rc) 2992 vma->vm_ops = &cifs_file_vm_o 2992 vma->vm_ops = &cifs_file_vm_ops; 2993 2993 2994 free_xid(xid); 2994 free_xid(xid); 2995 return rc; 2995 return rc; 2996 } 2996 } 2997 2997 2998 static int is_inode_writable(struct cifsInode 2998 static int is_inode_writable(struct cifsInodeInfo *cifs_inode) 2999 { 2999 { 3000 struct cifsFileInfo *open_file; 3000 struct cifsFileInfo *open_file; 3001 3001 3002 spin_lock(&cifs_inode->open_file_lock 3002 spin_lock(&cifs_inode->open_file_lock); 3003 list_for_each_entry(open_file, &cifs_ 3003 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { 3004 if (OPEN_FMODE(open_file->f_f 3004 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { 3005 spin_unlock(&cifs_ino 3005 spin_unlock(&cifs_inode->open_file_lock); 3006 return 1; 3006 return 1; 3007 } 3007 } 3008 } 3008 } 3009 spin_unlock(&cifs_inode->open_file_lo 3009 spin_unlock(&cifs_inode->open_file_lock); 3010 return 0; 3010 return 0; 3011 } 3011 } 3012 3012 3013 /* We do not want to update the file size fro 3013 /* We do not want to update the file size from server for inodes 3014 open for write - to avoid races with write 3014 open for write - to avoid races with writepage extending 3015 the file - in the future we could consider 3015 the file - in the future we could consider allowing 3016 refreshing the inode only on increases in 3016 refreshing the inode only on increases in the file size 3017 but this is tricky to do without racing wi 3017 but this is tricky to do without racing with writebehind 3018 page caching in the current Linux kernel d 3018 page caching in the current Linux kernel design */ 3019 bool is_size_safe_to_change(struct cifsInodeI 3019 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file, 3020 bool from_readdir 3020 bool from_readdir) 3021 { 3021 { 3022 if (!cifsInode) 3022 if (!cifsInode) 3023 return true; 3023 return true; 3024 3024 3025 if (is_inode_writable(cifsInode) || 3025 if (is_inode_writable(cifsInode) || 3026 ((cifsInode->oplock & CIFS_CA 3026 ((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) { 3027 /* This inode is open for wri 3027 /* This inode is open for write at least once */ 3028 struct cifs_sb_info *cifs_sb; 3028 struct cifs_sb_info *cifs_sb; 3029 3029 3030 cifs_sb = CIFS_SB(cifsInode-> 3030 cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb); 3031 if (cifs_sb->mnt_cifs_flags & 3031 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { 3032 /* since no page cach 3032 /* since no page cache to corrupt on directio 3033 we can change size sa 3033 we can change size safely */ 3034 return true; 3034 return true; 3035 } 3035 } 3036 3036 3037 if (i_size_read(&cifsInode->n 3037 if (i_size_read(&cifsInode->netfs.inode) < end_of_file) 3038 return true; 3038 return true; 3039 3039 3040 return false; 3040 return false; 3041 } else 3041 } else 3042 return true; 3042 return true; 3043 } 3043 } 3044 3044 3045 void cifs_oplock_break(struct work_struct *wo 3045 void cifs_oplock_break(struct work_struct *work) 3046 { 3046 { 3047 struct cifsFileInfo *cfile = containe 3047 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, 3048 3048 oplock_break); 3049 struct inode *inode = d_inode(cfile-> 3049 struct inode *inode = d_inode(cfile->dentry); 3050 struct cifs_sb_info *cifs_sb = CIFS_S 3050 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 3051 struct cifsInodeInfo *cinode = CIFS_I 3051 struct cifsInodeInfo *cinode = CIFS_I(inode); 3052 struct cifs_tcon *tcon; 3052 struct cifs_tcon *tcon; 3053 struct TCP_Server_Info *server; 3053 struct TCP_Server_Info *server; 3054 struct tcon_link *tlink; 3054 struct tcon_link *tlink; 3055 int rc = 0; 3055 int rc = 0; 3056 bool purge_cache = false, oplock_brea 3056 bool purge_cache = false, oplock_break_cancelled; 3057 __u64 persistent_fid, volatile_fid; 3057 __u64 persistent_fid, volatile_fid; 3058 __u16 net_fid; 3058 __u16 net_fid; 3059 3059 3060 wait_on_bit(&cinode->flags, CIFS_INOD 3060 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, 3061 TASK_UNINTERRUPTIBLE) 3061 TASK_UNINTERRUPTIBLE); 3062 3062 3063 tlink = cifs_sb_tlink(cifs_sb); 3063 tlink = cifs_sb_tlink(cifs_sb); 3064 if (IS_ERR(tlink)) 3064 if (IS_ERR(tlink)) 3065 goto out; 3065 goto out; 3066 tcon = tlink_tcon(tlink); 3066 tcon = tlink_tcon(tlink); 3067 server = tcon->ses->server; 3067 server = tcon->ses->server; 3068 3068 3069 server->ops->downgrade_oplock(server, 3069 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, 3070 cfile-> 3070 cfile->oplock_epoch, &purge_cache); 3071 3071 3072 if (!CIFS_CACHE_WRITE(cinode) && CIFS 3072 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) && 3073 3073 cifs_has_mand_locks(cinode)) { 3074 cifs_dbg(FYI, "Reset oplock t 3074 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n", 3075 inode); 3075 inode); 3076 cinode->oplock = 0; 3076 cinode->oplock = 0; 3077 } 3077 } 3078 3078 3079 if (inode && S_ISREG(inode->i_mode)) 3079 if (inode && S_ISREG(inode->i_mode)) { 3080 if (CIFS_CACHE_READ(cinode)) 3080 if (CIFS_CACHE_READ(cinode)) 3081 break_lease(inode, O_ 3081 break_lease(inode, O_RDONLY); 3082 else 3082 else 3083 break_lease(inode, O_ 3083 break_lease(inode, O_WRONLY); 3084 rc = filemap_fdatawrite(inode 3084 rc = filemap_fdatawrite(inode->i_mapping); 3085 if (!CIFS_CACHE_READ(cinode) 3085 if (!CIFS_CACHE_READ(cinode) || purge_cache) { 3086 rc = filemap_fdatawai 3086 rc = filemap_fdatawait(inode->i_mapping); 3087 mapping_set_error(ino 3087 mapping_set_error(inode->i_mapping, rc); 3088 cifs_zap_mapping(inod 3088 cifs_zap_mapping(inode); 3089 } 3089 } 3090 cifs_dbg(FYI, "Oplock flush i 3090 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc); 3091 if (CIFS_CACHE_WRITE(cinode)) 3091 if (CIFS_CACHE_WRITE(cinode)) 3092 goto oplock_break_ack 3092 goto oplock_break_ack; 3093 } 3093 } 3094 3094 3095 rc = cifs_push_locks(cfile); 3095 rc = cifs_push_locks(cfile); 3096 if (rc) 3096 if (rc) 3097 cifs_dbg(VFS, "Push locks rc 3097 cifs_dbg(VFS, "Push locks rc = %d\n", rc); 3098 3098 3099 oplock_break_ack: 3099 oplock_break_ack: 3100 /* 3100 /* 3101 * When oplock break is received and 3101 * When oplock break is received and there are no active 3102 * file handles but cached, then sche 3102 * file handles but cached, then schedule deferred close immediately. 3103 * So, new open will not use cached h 3103 * So, new open will not use cached handle. 3104 */ 3104 */ 3105 3105 3106 if (!CIFS_CACHE_HANDLE(cinode) && !li 3106 if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes)) 3107 cifs_close_deferred_file(cino 3107 cifs_close_deferred_file(cinode); 3108 3108 3109 persistent_fid = cfile->fid.persisten 3109 persistent_fid = cfile->fid.persistent_fid; 3110 volatile_fid = cfile->fid.volatile_fi 3110 volatile_fid = cfile->fid.volatile_fid; 3111 net_fid = cfile->fid.netfid; 3111 net_fid = cfile->fid.netfid; 3112 oplock_break_cancelled = cfile->oploc 3112 oplock_break_cancelled = cfile->oplock_break_cancelled; 3113 3113 3114 _cifsFileInfo_put(cfile, false /* do 3114 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false); 3115 /* 3115 /* 3116 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 3116 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require 3117 * an acknowledgment to be sent when 3117 * an acknowledgment to be sent when the file has already been closed. 3118 */ 3118 */ 3119 spin_lock(&cinode->open_file_lock); 3119 spin_lock(&cinode->open_file_lock); 3120 /* check list empty since can race wi 3120 /* check list empty since can race with kill_sb calling tree disconnect */ 3121 if (!oplock_break_cancelled && !list_ 3121 if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) { 3122 spin_unlock(&cinode->open_fil 3122 spin_unlock(&cinode->open_file_lock); 3123 rc = server->ops->oplock_resp 3123 rc = server->ops->oplock_response(tcon, persistent_fid, 3124 3124 volatile_fid, net_fid, cinode); 3125 cifs_dbg(FYI, "Oplock release 3125 cifs_dbg(FYI, "Oplock release rc = %d\n", rc); 3126 } else 3126 } else 3127 spin_unlock(&cinode->open_fil 3127 spin_unlock(&cinode->open_file_lock); 3128 3128 3129 cifs_put_tlink(tlink); 3129 cifs_put_tlink(tlink); 3130 out: 3130 out: 3131 cifs_done_oplock_break(cinode); 3131 cifs_done_oplock_break(cinode); 3132 } 3132 } 3133 3133 3134 static int cifs_swap_activate(struct swap_inf 3134 static int cifs_swap_activate(struct swap_info_struct *sis, 3135 struct file *sw 3135 struct file *swap_file, sector_t *span) 3136 { 3136 { 3137 struct cifsFileInfo *cfile = swap_fil 3137 struct cifsFileInfo *cfile = swap_file->private_data; 3138 struct inode *inode = swap_file->f_ma 3138 struct inode *inode = swap_file->f_mapping->host; 3139 unsigned long blocks; 3139 unsigned long blocks; 3140 long long isize; 3140 long long isize; 3141 3141 3142 cifs_dbg(FYI, "swap activate\n"); 3142 cifs_dbg(FYI, "swap activate\n"); 3143 3143 3144 if (!swap_file->f_mapping->a_ops->swa 3144 if (!swap_file->f_mapping->a_ops->swap_rw) 3145 /* Cannot support swap */ 3145 /* Cannot support swap */ 3146 return -EINVAL; 3146 return -EINVAL; 3147 3147 3148 spin_lock(&inode->i_lock); 3148 spin_lock(&inode->i_lock); 3149 blocks = inode->i_blocks; 3149 blocks = inode->i_blocks; 3150 isize = inode->i_size; 3150 isize = inode->i_size; 3151 spin_unlock(&inode->i_lock); 3151 spin_unlock(&inode->i_lock); 3152 if (blocks*512 < isize) { 3152 if (blocks*512 < isize) { 3153 pr_warn("swap activate: swapf 3153 pr_warn("swap activate: swapfile has holes\n"); 3154 return -EINVAL; 3154 return -EINVAL; 3155 } 3155 } 3156 *span = sis->pages; 3156 *span = sis->pages; 3157 3157 3158 pr_warn_once("Swap support over SMB3 3158 pr_warn_once("Swap support over SMB3 is experimental\n"); 3159 3159 3160 /* 3160 /* 3161 * TODO: consider adding ACL (or docu 3161 * TODO: consider adding ACL (or documenting how) to prevent other 3162 * users (on this or other systems) f 3162 * users (on this or other systems) from reading it 3163 */ 3163 */ 3164 3164 3165 3165 3166 /* TODO: add sk_set_memalloc(inet) or 3166 /* TODO: add sk_set_memalloc(inet) or similar */ 3167 3167 3168 if (cfile) 3168 if (cfile) 3169 cfile->swapfile = true; 3169 cfile->swapfile = true; 3170 /* 3170 /* 3171 * TODO: Since file already open, we 3171 * TODO: Since file already open, we can't open with DENY_ALL here 3172 * but we could add call to grab a by 3172 * but we could add call to grab a byte range lock to prevent others 3173 * from reading or writing the file 3173 * from reading or writing the file 3174 */ 3174 */ 3175 3175 3176 sis->flags |= SWP_FS_OPS; 3176 sis->flags |= SWP_FS_OPS; 3177 return add_swap_extent(sis, 0, sis->m 3177 return add_swap_extent(sis, 0, sis->max, 0); 3178 } 3178 } 3179 3179 3180 static void cifs_swap_deactivate(struct file 3180 static void cifs_swap_deactivate(struct file *file) 3181 { 3181 { 3182 struct cifsFileInfo *cfile = file->pr 3182 struct cifsFileInfo *cfile = file->private_data; 3183 3183 3184 cifs_dbg(FYI, "swap deactivate\n"); 3184 cifs_dbg(FYI, "swap deactivate\n"); 3185 3185 3186 /* TODO: undo sk_set_memalloc(inet) w 3186 /* TODO: undo sk_set_memalloc(inet) will eventually be needed */ 3187 3187 3188 if (cfile) 3188 if (cfile) 3189 cfile->swapfile = false; 3189 cfile->swapfile = false; 3190 3190 3191 /* do we need to unpin (or unlock) th 3191 /* do we need to unpin (or unlock) the file */ 3192 } 3192 } 3193 3193 3194 /** 3194 /** 3195 * cifs_swap_rw - SMB3 address space operatio 3195 * cifs_swap_rw - SMB3 address space operation for swap I/O 3196 * @iocb: target I/O control block 3196 * @iocb: target I/O control block 3197 * @iter: I/O buffer 3197 * @iter: I/O buffer 3198 * 3198 * 3199 * Perform IO to the swap-file. This is much 3199 * Perform IO to the swap-file. This is much like direct IO. 3200 */ 3200 */ 3201 static int cifs_swap_rw(struct kiocb *iocb, s 3201 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter) 3202 { 3202 { 3203 ssize_t ret; 3203 ssize_t ret; 3204 3204 3205 if (iov_iter_rw(iter) == READ) 3205 if (iov_iter_rw(iter) == READ) 3206 ret = netfs_unbuffered_read_i 3206 ret = netfs_unbuffered_read_iter_locked(iocb, iter); 3207 else 3207 else 3208 ret = netfs_unbuffered_write_ 3208 ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL); 3209 if (ret < 0) 3209 if (ret < 0) 3210 return ret; 3210 return ret; 3211 return 0; 3211 return 0; 3212 } 3212 } 3213 3213 3214 const struct address_space_operations cifs_ad 3214 const struct address_space_operations cifs_addr_ops = { 3215 .read_folio = netfs_read_folio, 3215 .read_folio = netfs_read_folio, 3216 .readahead = netfs_readahead, 3216 .readahead = netfs_readahead, 3217 .writepages = netfs_writepages, 3217 .writepages = netfs_writepages, 3218 .dirty_folio = netfs_dirty_folio, 3218 .dirty_folio = netfs_dirty_folio, 3219 .release_folio = netfs_release_folio 3219 .release_folio = netfs_release_folio, 3220 .direct_IO = noop_direct_IO, 3220 .direct_IO = noop_direct_IO, 3221 .invalidate_folio = netfs_invalidate_ 3221 .invalidate_folio = netfs_invalidate_folio, 3222 .migrate_folio = filemap_migrate_fol 3222 .migrate_folio = filemap_migrate_folio, 3223 /* 3223 /* 3224 * TODO: investigate and if useful we 3224 * TODO: investigate and if useful we could add an is_dirty_writeback 3225 * helper if needed 3225 * helper if needed 3226 */ 3226 */ 3227 .swap_activate = cifs_swap_activate, 3227 .swap_activate = cifs_swap_activate, 3228 .swap_deactivate = cifs_swap_deactiva 3228 .swap_deactivate = cifs_swap_deactivate, 3229 .swap_rw = cifs_swap_rw, 3229 .swap_rw = cifs_swap_rw, 3230 }; 3230 }; 3231 3231 3232 /* 3232 /* 3233 * cifs_readahead requires the server to supp 3233 * cifs_readahead requires the server to support a buffer large enough to 3234 * contain the header plus one complete page 3234 * contain the header plus one complete page of data. Otherwise, we need 3235 * to leave cifs_readahead out of the address 3235 * to leave cifs_readahead out of the address space operations. 3236 */ 3236 */ 3237 const struct address_space_operations cifs_ad 3237 const struct address_space_operations cifs_addr_ops_smallbuf = { 3238 .read_folio = netfs_read_folio, 3238 .read_folio = netfs_read_folio, 3239 .writepages = netfs_writepages, 3239 .writepages = netfs_writepages, 3240 .dirty_folio = netfs_dirty_folio, 3240 .dirty_folio = netfs_dirty_folio, 3241 .release_folio = netfs_release_folio 3241 .release_folio = netfs_release_folio, 3242 .invalidate_folio = netfs_invalidate_ 3242 .invalidate_folio = netfs_invalidate_folio, 3243 .migrate_folio = filemap_migrate_fol 3243 .migrate_folio = filemap_migrate_folio, 3244 }; 3244 }; 3245 3245
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.