1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 //! This module provides a wrapper for the C ` 3 //! This module provides a wrapper for the C `struct request` type. 4 //! 4 //! 5 //! C header: [`include/linux/blk-mq.h`](srctr 5 //! C header: [`include/linux/blk-mq.h`](srctree/include/linux/blk-mq.h) 6 6 7 use crate::{ 7 use crate::{ 8 bindings, 8 bindings, 9 block::mq::Operations, 9 block::mq::Operations, 10 error::Result, 10 error::Result, 11 types::{ARef, AlwaysRefCounted, Opaque}, 11 types::{ARef, AlwaysRefCounted, Opaque}, 12 }; 12 }; 13 use core::{ 13 use core::{ 14 marker::PhantomData, 14 marker::PhantomData, 15 ptr::{addr_of_mut, NonNull}, 15 ptr::{addr_of_mut, NonNull}, 16 sync::atomic::{AtomicU64, Ordering}, 16 sync::atomic::{AtomicU64, Ordering}, 17 }; 17 }; 18 18 19 /// A wrapper around a blk-mq `struct request` 19 /// A wrapper around a blk-mq `struct request`. This represents an IO request. 20 /// 20 /// 21 /// # Implementation details 21 /// # Implementation details 22 /// 22 /// 23 /// There are four states for a request that t 23 /// There are four states for a request that the Rust bindings care about: 24 /// 24 /// 25 /// A) Request is owned by block layer (refcou 25 /// A) Request is owned by block layer (refcount 0) 26 /// B) Request is owned by driver but with zer 26 /// B) Request is owned by driver but with zero `ARef`s in existence 27 /// (refcount 1) 27 /// (refcount 1) 28 /// C) Request is owned by driver with exactly 28 /// C) Request is owned by driver with exactly one `ARef` in existence 29 /// (refcount 2) 29 /// (refcount 2) 30 /// D) Request is owned by driver with more th 30 /// D) Request is owned by driver with more than one `ARef` in existence 31 /// (refcount > 2) 31 /// (refcount > 2) 32 /// 32 /// 33 /// 33 /// 34 /// We need to track A and B to ensure we fail 34 /// We need to track A and B to ensure we fail tag to request conversions for 35 /// requests that are not owned by the driver. 35 /// requests that are not owned by the driver. 36 /// 36 /// 37 /// We need to track C and D to ensure that it 37 /// We need to track C and D to ensure that it is safe to end the request and hand 38 /// back ownership to the block layer. 38 /// back ownership to the block layer. 39 /// 39 /// 40 /// The states are tracked through the private 40 /// The states are tracked through the private `refcount` field of 41 /// `RequestDataWrapper`. This structure lives 41 /// `RequestDataWrapper`. This structure lives in the private data area of the C 42 /// `struct request`. 42 /// `struct request`. 43 /// 43 /// 44 /// # Invariants 44 /// # Invariants 45 /// 45 /// 46 /// * `self.0` is a valid `struct request` cre 46 /// * `self.0` is a valid `struct request` created by the C portion of the kernel. 47 /// * The private data area associated with th 47 /// * The private data area associated with this request must be an initialized 48 /// and valid `RequestDataWrapper<T>`. 48 /// and valid `RequestDataWrapper<T>`. 49 /// * `self` is reference counted by atomic mo 49 /// * `self` is reference counted by atomic modification of 50 /// self.wrapper_ref().refcount(). 50 /// self.wrapper_ref().refcount(). 51 /// 51 /// 52 #[repr(transparent)] 52 #[repr(transparent)] 53 pub struct Request<T: Operations>(Opaque<bindi 53 pub struct Request<T: Operations>(Opaque<bindings::request>, PhantomData<T>); 54 54 55 impl<T: Operations> Request<T> { 55 impl<T: Operations> Request<T> { 56 /// Create an `ARef<Request>` from a `stru 56 /// Create an `ARef<Request>` from a `struct request` pointer. 57 /// 57 /// 58 /// # Safety 58 /// # Safety 59 /// 59 /// 60 /// * The caller must own a refcount on `p 60 /// * The caller must own a refcount on `ptr` that is transferred to the 61 /// returned `ARef`. 61 /// returned `ARef`. 62 /// * The type invariants for `Request` mu 62 /// * The type invariants for `Request` must hold for the pointee of `ptr`. 63 pub(crate) unsafe fn aref_from_raw(ptr: *m 63 pub(crate) unsafe fn aref_from_raw(ptr: *mut bindings::request) -> ARef<Self> { 64 // INVARIANT: By the safety requiremen 64 // INVARIANT: By the safety requirements of this function, invariants are upheld. 65 // SAFETY: By the safety requirement o 65 // SAFETY: By the safety requirement of this function, we own a 66 // reference count that we can pass to 66 // reference count that we can pass to `ARef`. 67 unsafe { ARef::from_raw(NonNull::new_u 67 unsafe { ARef::from_raw(NonNull::new_unchecked(ptr as *const Self as *mut Self)) } 68 } 68 } 69 69 70 /// Notify the block layer that a request 70 /// Notify the block layer that a request is going to be processed now. 71 /// 71 /// 72 /// The block layer uses this hook to do p 72 /// The block layer uses this hook to do proper initializations such as 73 /// starting the timeout timer. It is a re 73 /// starting the timeout timer. It is a requirement that block device 74 /// drivers call this function when starti 74 /// drivers call this function when starting to process a request. 75 /// 75 /// 76 /// # Safety 76 /// # Safety 77 /// 77 /// 78 /// The caller must have exclusive ownersh 78 /// The caller must have exclusive ownership of `self`, that is 79 /// `self.wrapper_ref().refcount() == 2`. 79 /// `self.wrapper_ref().refcount() == 2`. 80 pub(crate) unsafe fn start_unchecked(this: 80 pub(crate) unsafe fn start_unchecked(this: &ARef<Self>) { 81 // SAFETY: By type invariant, `self.0` 81 // SAFETY: By type invariant, `self.0` is a valid `struct request` and 82 // we have exclusive access. 82 // we have exclusive access. 83 unsafe { bindings::blk_mq_start_reques 83 unsafe { bindings::blk_mq_start_request(this.0.get()) }; 84 } 84 } 85 85 86 /// Try to take exclusive ownership of `th 86 /// Try to take exclusive ownership of `this` by dropping the refcount to 0. 87 /// This fails if `this` is not the only ` 87 /// This fails if `this` is not the only `ARef` pointing to the underlying 88 /// `Request`. 88 /// `Request`. 89 /// 89 /// 90 /// If the operation is successful, `Ok` i 90 /// If the operation is successful, `Ok` is returned with a pointer to the 91 /// C `struct request`. If the operation f 91 /// C `struct request`. If the operation fails, `this` is returned in the 92 /// `Err` variant. 92 /// `Err` variant. 93 fn try_set_end(this: ARef<Self>) -> Result 93 fn try_set_end(this: ARef<Self>) -> Result<*mut bindings::request, ARef<Self>> { 94 // We can race with `TagSet::tag_to_rq 94 // We can race with `TagSet::tag_to_rq` 95 if let Err(_old) = this.wrapper_ref(). 95 if let Err(_old) = this.wrapper_ref().refcount().compare_exchange( 96 2, 96 2, 97 0, 97 0, 98 Ordering::Relaxed, 98 Ordering::Relaxed, 99 Ordering::Relaxed, 99 Ordering::Relaxed, 100 ) { 100 ) { 101 return Err(this); 101 return Err(this); 102 } 102 } 103 103 104 let request_ptr = this.0.get(); 104 let request_ptr = this.0.get(); 105 core::mem::forget(this); 105 core::mem::forget(this); 106 106 107 Ok(request_ptr) 107 Ok(request_ptr) 108 } 108 } 109 109 110 /// Notify the block layer that the reques 110 /// Notify the block layer that the request has been completed without errors. 111 /// 111 /// 112 /// This function will return `Err` if `th 112 /// This function will return `Err` if `this` is not the only `ARef` 113 /// referencing the request. 113 /// referencing the request. 114 pub fn end_ok(this: ARef<Self>) -> Result< 114 pub fn end_ok(this: ARef<Self>) -> Result<(), ARef<Self>> { 115 let request_ptr = Self::try_set_end(th 115 let request_ptr = Self::try_set_end(this)?; 116 116 117 // SAFETY: By type invariant, `this.0` 117 // SAFETY: By type invariant, `this.0` was a valid `struct request`. The 118 // success of the call to `try_set_end 118 // success of the call to `try_set_end` guarantees that there are no 119 // `ARef`s pointing to this request. T 119 // `ARef`s pointing to this request. Therefore it is safe to hand it 120 // back to the block layer. 120 // back to the block layer. 121 unsafe { bindings::blk_mq_end_request( 121 unsafe { bindings::blk_mq_end_request(request_ptr, bindings::BLK_STS_OK as _) }; 122 122 123 Ok(()) 123 Ok(()) 124 } 124 } 125 125 126 /// Return a pointer to the `RequestDataWr 126 /// Return a pointer to the `RequestDataWrapper` stored in the private area 127 /// of the request structure. 127 /// of the request structure. 128 /// 128 /// 129 /// # Safety 129 /// # Safety 130 /// 130 /// 131 /// - `this` must point to a valid allocat 131 /// - `this` must point to a valid allocation of size at least size of 132 /// `Self` plus size of `RequestDataWrap 132 /// `Self` plus size of `RequestDataWrapper`. 133 pub(crate) unsafe fn wrapper_ptr(this: *mu 133 pub(crate) unsafe fn wrapper_ptr(this: *mut Self) -> NonNull<RequestDataWrapper> { 134 let request_ptr = this.cast::<bindings 134 let request_ptr = this.cast::<bindings::request>(); 135 // SAFETY: By safety requirements for 135 // SAFETY: By safety requirements for this function, `this` is a 136 // valid allocation. 136 // valid allocation. 137 let wrapper_ptr = 137 let wrapper_ptr = 138 unsafe { bindings::blk_mq_rq_to_pd 138 unsafe { bindings::blk_mq_rq_to_pdu(request_ptr).cast::<RequestDataWrapper>() }; 139 // SAFETY: By C API contract, wrapper_ 139 // SAFETY: By C API contract, wrapper_ptr points to a valid allocation 140 // and is not null. 140 // and is not null. 141 unsafe { NonNull::new_unchecked(wrappe 141 unsafe { NonNull::new_unchecked(wrapper_ptr) } 142 } 142 } 143 143 144 /// Return a reference to the `RequestData 144 /// Return a reference to the `RequestDataWrapper` stored in the private 145 /// area of the request structure. 145 /// area of the request structure. 146 pub(crate) fn wrapper_ref(&self) -> &Reque 146 pub(crate) fn wrapper_ref(&self) -> &RequestDataWrapper { 147 // SAFETY: By type invariant, `self.0` 147 // SAFETY: By type invariant, `self.0` is a valid allocation. Further, 148 // the private data associated with th 148 // the private data associated with this request is initialized and 149 // valid. The existence of `&self` gua 149 // valid. The existence of `&self` guarantees that the private data is 150 // valid as a shared reference. 150 // valid as a shared reference. 151 unsafe { Self::wrapper_ptr(self as *co 151 unsafe { Self::wrapper_ptr(self as *const Self as *mut Self).as_ref() } 152 } 152 } 153 } 153 } 154 154 155 /// A wrapper around data stored in the privat 155 /// A wrapper around data stored in the private area of the C `struct request`. 156 pub(crate) struct RequestDataWrapper { 156 pub(crate) struct RequestDataWrapper { 157 /// The Rust request refcount has the foll 157 /// The Rust request refcount has the following states: 158 /// 158 /// 159 /// - 0: The request is owned by C block l 159 /// - 0: The request is owned by C block layer. 160 /// - 1: The request is owned by Rust abst 160 /// - 1: The request is owned by Rust abstractions but there are no ARef references to it. 161 /// - 2+: There are `ARef` references to t 161 /// - 2+: There are `ARef` references to the request. 162 refcount: AtomicU64, 162 refcount: AtomicU64, 163 } 163 } 164 164 165 impl RequestDataWrapper { 165 impl RequestDataWrapper { 166 /// Return a reference to the refcount of 166 /// Return a reference to the refcount of the request that is embedding 167 /// `self`. 167 /// `self`. 168 pub(crate) fn refcount(&self) -> &AtomicU6 168 pub(crate) fn refcount(&self) -> &AtomicU64 { 169 &self.refcount 169 &self.refcount 170 } 170 } 171 171 172 /// Return a pointer to the refcount of th 172 /// Return a pointer to the refcount of the request that is embedding the 173 /// pointee of `this`. 173 /// pointee of `this`. 174 /// 174 /// 175 /// # Safety 175 /// # Safety 176 /// 176 /// 177 /// - `this` must point to a live allocati 177 /// - `this` must point to a live allocation of at least the size of `Self`. 178 pub(crate) unsafe fn refcount_ptr(this: *m 178 pub(crate) unsafe fn refcount_ptr(this: *mut Self) -> *mut AtomicU64 { 179 // SAFETY: Because of the safety requi 179 // SAFETY: Because of the safety requirements of this function, the 180 // field projection is safe. 180 // field projection is safe. 181 unsafe { addr_of_mut!((*this).refcount 181 unsafe { addr_of_mut!((*this).refcount) } 182 } 182 } 183 } 183 } 184 184 185 // SAFETY: Exclusive access is thread-safe for 185 // SAFETY: Exclusive access is thread-safe for `Request`. `Request` has no `&mut 186 // self` methods and `&self` methods that muta 186 // self` methods and `&self` methods that mutate `self` are internally 187 // synchronized. 187 // synchronized. 188 unsafe impl<T: Operations> Send for Request<T> 188 unsafe impl<T: Operations> Send for Request<T> {} 189 189 190 // SAFETY: Shared access is thread-safe for `R 190 // SAFETY: Shared access is thread-safe for `Request`. `&self` methods that 191 // mutate `self` are internally synchronized` 191 // mutate `self` are internally synchronized` 192 unsafe impl<T: Operations> Sync for Request<T> 192 unsafe impl<T: Operations> Sync for Request<T> {} 193 193 194 /// Store the result of `op(target.load())` in 194 /// Store the result of `op(target.load())` in target, returning new value of 195 /// target. 195 /// target. 196 fn atomic_relaxed_op_return(target: &AtomicU64 196 fn atomic_relaxed_op_return(target: &AtomicU64, op: impl Fn(u64) -> u64) -> u64 { 197 let old = target.fetch_update(Ordering::Re 197 let old = target.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| Some(op(x))); 198 198 199 // SAFETY: Because the operation passed to 199 // SAFETY: Because the operation passed to `fetch_update` above always 200 // return `Some`, `old` will always be `Ok 200 // return `Some`, `old` will always be `Ok`. 201 let old = unsafe { old.unwrap_unchecked() 201 let old = unsafe { old.unwrap_unchecked() }; 202 202 203 op(old) 203 op(old) 204 } 204 } 205 205 206 /// Store the result of `op(target.load)` in ` 206 /// Store the result of `op(target.load)` in `target` if `target.load() != 207 /// pred`, returning true if the target was up 207 /// pred`, returning true if the target was updated. 208 fn atomic_relaxed_op_unless(target: &AtomicU64 208 fn atomic_relaxed_op_unless(target: &AtomicU64, op: impl Fn(u64) -> u64, pred: u64) -> bool { 209 target 209 target 210 .fetch_update(Ordering::Relaxed, Order 210 .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| { 211 if x == pred { 211 if x == pred { 212 None 212 None 213 } else { 213 } else { 214 Some(op(x)) 214 Some(op(x)) 215 } 215 } 216 }) 216 }) 217 .is_ok() 217 .is_ok() 218 } 218 } 219 219 220 // SAFETY: All instances of `Request<T>` are r 220 // SAFETY: All instances of `Request<T>` are reference counted. This 221 // implementation of `AlwaysRefCounted` ensure 221 // implementation of `AlwaysRefCounted` ensure that increments to the ref count 222 // keeps the object alive in memory at least u 222 // keeps the object alive in memory at least until a matching reference count 223 // decrement is executed. 223 // decrement is executed. 224 unsafe impl<T: Operations> AlwaysRefCounted fo 224 unsafe impl<T: Operations> AlwaysRefCounted for Request<T> { 225 fn inc_ref(&self) { 225 fn inc_ref(&self) { 226 let refcount = &self.wrapper_ref().ref 226 let refcount = &self.wrapper_ref().refcount(); 227 227 228 #[cfg_attr(not(CONFIG_DEBUG_MISC), all 228 #[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))] 229 let updated = atomic_relaxed_op_unless 229 let updated = atomic_relaxed_op_unless(refcount, |x| x + 1, 0); 230 230 231 #[cfg(CONFIG_DEBUG_MISC)] 231 #[cfg(CONFIG_DEBUG_MISC)] 232 if !updated { 232 if !updated { 233 panic!("Request refcount zero on c 233 panic!("Request refcount zero on clone") 234 } 234 } 235 } 235 } 236 236 237 unsafe fn dec_ref(obj: core::ptr::NonNull< 237 unsafe fn dec_ref(obj: core::ptr::NonNull<Self>) { 238 // SAFETY: The type invariants of `ARe 238 // SAFETY: The type invariants of `ARef` guarantee that `obj` is valid 239 // for read. 239 // for read. 240 let wrapper_ptr = unsafe { Self::wrapp 240 let wrapper_ptr = unsafe { Self::wrapper_ptr(obj.as_ptr()).as_ptr() }; 241 // SAFETY: The type invariant of `Requ 241 // SAFETY: The type invariant of `Request` guarantees that the private 242 // data area is initialized and valid. 242 // data area is initialized and valid. 243 let refcount = unsafe { &*RequestDataW 243 let refcount = unsafe { &*RequestDataWrapper::refcount_ptr(wrapper_ptr) }; 244 244 245 #[cfg_attr(not(CONFIG_DEBUG_MISC), all 245 #[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))] 246 let new_refcount = atomic_relaxed_op_r 246 let new_refcount = atomic_relaxed_op_return(refcount, |x| x - 1); 247 247 248 #[cfg(CONFIG_DEBUG_MISC)] 248 #[cfg(CONFIG_DEBUG_MISC)] 249 if new_refcount == 0 { 249 if new_refcount == 0 { 250 panic!("Request reached refcount z 250 panic!("Request reached refcount zero in Rust abstractions"); 251 } 251 } 252 } 252 } 253 } 253 }
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.