1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * linux/fs/char_dev.c 3 * linux/fs/char_dev.c 4 * 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 6 */ 7 7 8 #include <linux/init.h> 8 #include <linux/init.h> 9 #include <linux/fs.h> 9 #include <linux/fs.h> 10 #include <linux/kdev_t.h> 10 #include <linux/kdev_t.h> 11 #include <linux/slab.h> 11 #include <linux/slab.h> 12 #include <linux/string.h> 12 #include <linux/string.h> 13 13 14 #include <linux/major.h> 14 #include <linux/major.h> 15 #include <linux/errno.h> 15 #include <linux/errno.h> 16 #include <linux/module.h> 16 #include <linux/module.h> 17 #include <linux/seq_file.h> 17 #include <linux/seq_file.h> 18 18 19 #include <linux/kobject.h> 19 #include <linux/kobject.h> 20 #include <linux/kobj_map.h> 20 #include <linux/kobj_map.h> 21 #include <linux/cdev.h> 21 #include <linux/cdev.h> 22 #include <linux/mutex.h> 22 #include <linux/mutex.h> 23 #include <linux/backing-dev.h> 23 #include <linux/backing-dev.h> 24 #include <linux/tty.h> 24 #include <linux/tty.h> 25 25 26 #include "internal.h" 26 #include "internal.h" 27 27 28 static struct kobj_map *cdev_map __ro_after_in !! 28 static struct kobj_map *cdev_map; 29 29 30 static DEFINE_MUTEX(chrdevs_lock); 30 static DEFINE_MUTEX(chrdevs_lock); 31 31 32 #define CHRDEV_MAJOR_HASH_SIZE 255 32 #define CHRDEV_MAJOR_HASH_SIZE 255 33 33 34 static struct char_device_struct { 34 static struct char_device_struct { 35 struct char_device_struct *next; 35 struct char_device_struct *next; 36 unsigned int major; 36 unsigned int major; 37 unsigned int baseminor; 37 unsigned int baseminor; 38 int minorct; 38 int minorct; 39 char name[64]; 39 char name[64]; 40 struct cdev *cdev; /* wil 40 struct cdev *cdev; /* will die */ 41 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE]; 41 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE]; 42 42 43 /* index in the above */ 43 /* index in the above */ 44 static inline int major_to_index(unsigned majo 44 static inline int major_to_index(unsigned major) 45 { 45 { 46 return major % CHRDEV_MAJOR_HASH_SIZE; 46 return major % CHRDEV_MAJOR_HASH_SIZE; 47 } 47 } 48 48 49 #ifdef CONFIG_PROC_FS 49 #ifdef CONFIG_PROC_FS 50 50 51 void chrdev_show(struct seq_file *f, off_t off 51 void chrdev_show(struct seq_file *f, off_t offset) 52 { 52 { 53 struct char_device_struct *cd; 53 struct char_device_struct *cd; 54 54 55 mutex_lock(&chrdevs_lock); 55 mutex_lock(&chrdevs_lock); 56 for (cd = chrdevs[major_to_index(offse 56 for (cd = chrdevs[major_to_index(offset)]; cd; cd = cd->next) { 57 if (cd->major == offset) 57 if (cd->major == offset) 58 seq_printf(f, "%3d %s\ 58 seq_printf(f, "%3d %s\n", cd->major, cd->name); 59 } 59 } 60 mutex_unlock(&chrdevs_lock); 60 mutex_unlock(&chrdevs_lock); 61 } 61 } 62 62 63 #endif /* CONFIG_PROC_FS */ 63 #endif /* CONFIG_PROC_FS */ 64 64 65 static int find_dynamic_major(void) 65 static int find_dynamic_major(void) 66 { 66 { 67 int i; 67 int i; 68 struct char_device_struct *cd; 68 struct char_device_struct *cd; 69 69 70 for (i = ARRAY_SIZE(chrdevs)-1; i >= C !! 70 for (i = ARRAY_SIZE(chrdevs)-1; i > CHRDEV_MAJOR_DYN_END; i--) { 71 if (chrdevs[i] == NULL) 71 if (chrdevs[i] == NULL) 72 return i; 72 return i; 73 } 73 } 74 74 75 for (i = CHRDEV_MAJOR_DYN_EXT_START; 75 for (i = CHRDEV_MAJOR_DYN_EXT_START; 76 i >= CHRDEV_MAJOR_DYN_EXT_END; i- !! 76 i > CHRDEV_MAJOR_DYN_EXT_END; i--) { 77 for (cd = chrdevs[major_to_ind 77 for (cd = chrdevs[major_to_index(i)]; cd; cd = cd->next) 78 if (cd->major == i) 78 if (cd->major == i) 79 break; 79 break; 80 80 81 if (cd == NULL) !! 81 if (cd == NULL || cd->major != i) 82 return i; 82 return i; 83 } 83 } 84 84 85 return -EBUSY; 85 return -EBUSY; 86 } 86 } 87 87 88 /* 88 /* 89 * Register a single major with a specified mi 89 * Register a single major with a specified minor range. 90 * 90 * 91 * If major == 0 this function will dynamicall !! 91 * If major == 0 this functions will dynamically allocate a major and return 92 * If major > 0 this function will attempt to !! 92 * its number. 93 * with given major. !! 93 * >> 94 * If major > 0 this function will attempt to reserve the passed range of >> 95 * minors and will return zero on success. 94 * 96 * >> 97 * Returns a -ve errno on failure. 95 */ 98 */ 96 static struct char_device_struct * 99 static struct char_device_struct * 97 __register_chrdev_region(unsigned int major, u 100 __register_chrdev_region(unsigned int major, unsigned int baseminor, 98 int minorct, const 101 int minorct, const char *name) 99 { 102 { 100 struct char_device_struct *cd, *curr, !! 103 struct char_device_struct *cd, **cp; 101 int ret; !! 104 int ret = 0; 102 int i; 105 int i; 103 106 104 if (major >= CHRDEV_MAJOR_MAX) { << 105 pr_err("CHRDEV \"%s\" major re << 106 name, major, CHRDEV_MAJ << 107 return ERR_PTR(-EINVAL); << 108 } << 109 << 110 if (minorct > MINORMASK + 1 - basemino << 111 pr_err("CHRDEV \"%s\" minor ra << 112 name, baseminor, basem << 113 return ERR_PTR(-EINVAL); << 114 } << 115 << 116 cd = kzalloc(sizeof(struct char_device 107 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL); 117 if (cd == NULL) 108 if (cd == NULL) 118 return ERR_PTR(-ENOMEM); 109 return ERR_PTR(-ENOMEM); 119 110 120 mutex_lock(&chrdevs_lock); 111 mutex_lock(&chrdevs_lock); 121 112 122 if (major == 0) { 113 if (major == 0) { 123 ret = find_dynamic_major(); 114 ret = find_dynamic_major(); 124 if (ret < 0) { 115 if (ret < 0) { 125 pr_err("CHRDEV \"%s\" 116 pr_err("CHRDEV \"%s\" dynamic allocation region is full\n", 126 name); 117 name); 127 goto out; 118 goto out; 128 } 119 } 129 major = ret; 120 major = ret; 130 } 121 } 131 122 132 ret = -EBUSY; !! 123 if (major >= CHRDEV_MAJOR_MAX) { 133 i = major_to_index(major); !! 124 pr_err("CHRDEV \"%s\" major requested (%d) is greater than the maximum (%d)\n", 134 for (curr = chrdevs[i]; curr; prev = c !! 125 name, major, CHRDEV_MAJOR_MAX); 135 if (curr->major < major) !! 126 ret = -EINVAL; 136 continue; << 137 << 138 if (curr->major > major) << 139 break; << 140 << 141 if (curr->baseminor + curr->mi << 142 continue; << 143 << 144 if (curr->baseminor >= basemin << 145 break; << 146 << 147 goto out; 127 goto out; 148 } 128 } 149 129 150 cd->major = major; 130 cd->major = major; 151 cd->baseminor = baseminor; 131 cd->baseminor = baseminor; 152 cd->minorct = minorct; 132 cd->minorct = minorct; 153 strscpy(cd->name, name, sizeof(cd->nam !! 133 strlcpy(cd->name, name, sizeof(cd->name)); >> 134 >> 135 i = major_to_index(major); 154 136 155 if (!prev) { !! 137 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) 156 cd->next = curr; !! 138 if ((*cp)->major > major || 157 chrdevs[i] = cd; !! 139 ((*cp)->major == major && 158 } else { !! 140 (((*cp)->baseminor >= baseminor) || 159 cd->next = prev->next; !! 141 ((*cp)->baseminor + (*cp)->minorct > baseminor)))) 160 prev->next = cd; !! 142 break; >> 143 >> 144 /* Check for overlapping minor ranges. */ >> 145 if (*cp && (*cp)->major == major) { >> 146 int old_min = (*cp)->baseminor; >> 147 int old_max = (*cp)->baseminor + (*cp)->minorct - 1; >> 148 int new_min = baseminor; >> 149 int new_max = baseminor + minorct - 1; >> 150 >> 151 /* New driver overlaps from the left. */ >> 152 if (new_max >= old_min && new_max <= old_max) { >> 153 ret = -EBUSY; >> 154 goto out; >> 155 } >> 156 >> 157 /* New driver overlaps from the right. */ >> 158 if (new_min <= old_max && new_min >= old_min) { >> 159 ret = -EBUSY; >> 160 goto out; >> 161 } 161 } 162 } 162 163 >> 164 cd->next = *cp; >> 165 *cp = cd; 163 mutex_unlock(&chrdevs_lock); 166 mutex_unlock(&chrdevs_lock); 164 return cd; 167 return cd; 165 out: 168 out: 166 mutex_unlock(&chrdevs_lock); 169 mutex_unlock(&chrdevs_lock); 167 kfree(cd); 170 kfree(cd); 168 return ERR_PTR(ret); 171 return ERR_PTR(ret); 169 } 172 } 170 173 171 static struct char_device_struct * 174 static struct char_device_struct * 172 __unregister_chrdev_region(unsigned major, uns 175 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct) 173 { 176 { 174 struct char_device_struct *cd = NULL, 177 struct char_device_struct *cd = NULL, **cp; 175 int i = major_to_index(major); 178 int i = major_to_index(major); 176 179 177 mutex_lock(&chrdevs_lock); 180 mutex_lock(&chrdevs_lock); 178 for (cp = &chrdevs[i]; *cp; cp = &(*cp 181 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) 179 if ((*cp)->major == major && 182 if ((*cp)->major == major && 180 (*cp)->baseminor == basemi 183 (*cp)->baseminor == baseminor && 181 (*cp)->minorct == minorct) 184 (*cp)->minorct == minorct) 182 break; 185 break; 183 if (*cp) { 186 if (*cp) { 184 cd = *cp; 187 cd = *cp; 185 *cp = cd->next; 188 *cp = cd->next; 186 } 189 } 187 mutex_unlock(&chrdevs_lock); 190 mutex_unlock(&chrdevs_lock); 188 return cd; 191 return cd; 189 } 192 } 190 193 191 /** 194 /** 192 * register_chrdev_region() - register a range 195 * register_chrdev_region() - register a range of device numbers 193 * @from: the first in the desired range of de 196 * @from: the first in the desired range of device numbers; must include 194 * the major number. 197 * the major number. 195 * @count: the number of consecutive device nu 198 * @count: the number of consecutive device numbers required 196 * @name: the name of the device or driver. 199 * @name: the name of the device or driver. 197 * 200 * 198 * Return value is zero on success, a negative 201 * Return value is zero on success, a negative error code on failure. 199 */ 202 */ 200 int register_chrdev_region(dev_t from, unsigne 203 int register_chrdev_region(dev_t from, unsigned count, const char *name) 201 { 204 { 202 struct char_device_struct *cd; 205 struct char_device_struct *cd; 203 dev_t to = from + count; 206 dev_t to = from + count; 204 dev_t n, next; 207 dev_t n, next; 205 208 206 for (n = from; n < to; n = next) { 209 for (n = from; n < to; n = next) { 207 next = MKDEV(MAJOR(n)+1, 0); 210 next = MKDEV(MAJOR(n)+1, 0); 208 if (next > to) 211 if (next > to) 209 next = to; 212 next = to; 210 cd = __register_chrdev_region( 213 cd = __register_chrdev_region(MAJOR(n), MINOR(n), 211 next - n, name) 214 next - n, name); 212 if (IS_ERR(cd)) 215 if (IS_ERR(cd)) 213 goto fail; 216 goto fail; 214 } 217 } 215 return 0; 218 return 0; 216 fail: 219 fail: 217 to = n; 220 to = n; 218 for (n = from; n < to; n = next) { 221 for (n = from; n < to; n = next) { 219 next = MKDEV(MAJOR(n)+1, 0); 222 next = MKDEV(MAJOR(n)+1, 0); 220 kfree(__unregister_chrdev_regi 223 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); 221 } 224 } 222 return PTR_ERR(cd); 225 return PTR_ERR(cd); 223 } 226 } 224 227 225 /** 228 /** 226 * alloc_chrdev_region() - register a range of 229 * alloc_chrdev_region() - register a range of char device numbers 227 * @dev: output parameter for first assigned n 230 * @dev: output parameter for first assigned number 228 * @baseminor: first of the requested range of 231 * @baseminor: first of the requested range of minor numbers 229 * @count: the number of minor numbers require 232 * @count: the number of minor numbers required 230 * @name: the name of the associated device or 233 * @name: the name of the associated device or driver 231 * 234 * 232 * Allocates a range of char device numbers. 235 * Allocates a range of char device numbers. The major number will be 233 * chosen dynamically, and returned (along wit 236 * chosen dynamically, and returned (along with the first minor number) 234 * in @dev. Returns zero or a negative error 237 * in @dev. Returns zero or a negative error code. 235 */ 238 */ 236 int alloc_chrdev_region(dev_t *dev, unsigned b 239 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count, 237 const char *name) 240 const char *name) 238 { 241 { 239 struct char_device_struct *cd; 242 struct char_device_struct *cd; 240 cd = __register_chrdev_region(0, basem 243 cd = __register_chrdev_region(0, baseminor, count, name); 241 if (IS_ERR(cd)) 244 if (IS_ERR(cd)) 242 return PTR_ERR(cd); 245 return PTR_ERR(cd); 243 *dev = MKDEV(cd->major, cd->baseminor) 246 *dev = MKDEV(cd->major, cd->baseminor); 244 return 0; 247 return 0; 245 } 248 } 246 249 247 /** 250 /** 248 * __register_chrdev() - create and register a 251 * __register_chrdev() - create and register a cdev occupying a range of minors 249 * @major: major device number or 0 for dynami 252 * @major: major device number or 0 for dynamic allocation 250 * @baseminor: first of the requested range of 253 * @baseminor: first of the requested range of minor numbers 251 * @count: the number of minor numbers require 254 * @count: the number of minor numbers required 252 * @name: name of this range of devices 255 * @name: name of this range of devices 253 * @fops: file operations associated with this 256 * @fops: file operations associated with this devices 254 * 257 * 255 * If @major == 0 this functions will dynamica 258 * If @major == 0 this functions will dynamically allocate a major and return 256 * its number. 259 * its number. 257 * 260 * 258 * If @major > 0 this function will attempt to 261 * If @major > 0 this function will attempt to reserve a device with the given 259 * major number and will return zero on succes 262 * major number and will return zero on success. 260 * 263 * 261 * Returns a -ve errno on failure. 264 * Returns a -ve errno on failure. 262 * 265 * 263 * The name of this device has nothing to do w 266 * The name of this device has nothing to do with the name of the device in 264 * /dev. It only helps to keep track of the di 267 * /dev. It only helps to keep track of the different owners of devices. If 265 * your module name has only one type of devic 268 * your module name has only one type of devices it's ok to use e.g. the name 266 * of the module here. 269 * of the module here. 267 */ 270 */ 268 int __register_chrdev(unsigned int major, unsi 271 int __register_chrdev(unsigned int major, unsigned int baseminor, 269 unsigned int count, cons 272 unsigned int count, const char *name, 270 const struct file_operat 273 const struct file_operations *fops) 271 { 274 { 272 struct char_device_struct *cd; 275 struct char_device_struct *cd; 273 struct cdev *cdev; 276 struct cdev *cdev; 274 int err = -ENOMEM; 277 int err = -ENOMEM; 275 278 276 cd = __register_chrdev_region(major, b 279 cd = __register_chrdev_region(major, baseminor, count, name); 277 if (IS_ERR(cd)) 280 if (IS_ERR(cd)) 278 return PTR_ERR(cd); 281 return PTR_ERR(cd); 279 282 280 cdev = cdev_alloc(); 283 cdev = cdev_alloc(); 281 if (!cdev) 284 if (!cdev) 282 goto out2; 285 goto out2; 283 286 284 cdev->owner = fops->owner; 287 cdev->owner = fops->owner; 285 cdev->ops = fops; 288 cdev->ops = fops; 286 kobject_set_name(&cdev->kobj, "%s", na 289 kobject_set_name(&cdev->kobj, "%s", name); 287 290 288 err = cdev_add(cdev, MKDEV(cd->major, 291 err = cdev_add(cdev, MKDEV(cd->major, baseminor), count); 289 if (err) 292 if (err) 290 goto out; 293 goto out; 291 294 292 cd->cdev = cdev; 295 cd->cdev = cdev; 293 296 294 return major ? 0 : cd->major; 297 return major ? 0 : cd->major; 295 out: 298 out: 296 kobject_put(&cdev->kobj); 299 kobject_put(&cdev->kobj); 297 out2: 300 out2: 298 kfree(__unregister_chrdev_region(cd->m 301 kfree(__unregister_chrdev_region(cd->major, baseminor, count)); 299 return err; 302 return err; 300 } 303 } 301 304 302 /** 305 /** 303 * unregister_chrdev_region() - unregister a r 306 * unregister_chrdev_region() - unregister a range of device numbers 304 * @from: the first in the range of numbers to 307 * @from: the first in the range of numbers to unregister 305 * @count: the number of device numbers to unr 308 * @count: the number of device numbers to unregister 306 * 309 * 307 * This function will unregister a range of @c 310 * This function will unregister a range of @count device numbers, 308 * starting with @from. The caller should nor 311 * starting with @from. The caller should normally be the one who 309 * allocated those numbers in the first place. 312 * allocated those numbers in the first place... 310 */ 313 */ 311 void unregister_chrdev_region(dev_t from, unsi 314 void unregister_chrdev_region(dev_t from, unsigned count) 312 { 315 { 313 dev_t to = from + count; 316 dev_t to = from + count; 314 dev_t n, next; 317 dev_t n, next; 315 318 316 for (n = from; n < to; n = next) { 319 for (n = from; n < to; n = next) { 317 next = MKDEV(MAJOR(n)+1, 0); 320 next = MKDEV(MAJOR(n)+1, 0); 318 if (next > to) 321 if (next > to) 319 next = to; 322 next = to; 320 kfree(__unregister_chrdev_regi 323 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); 321 } 324 } 322 } 325 } 323 326 324 /** 327 /** 325 * __unregister_chrdev - unregister and destro 328 * __unregister_chrdev - unregister and destroy a cdev 326 * @major: major device number 329 * @major: major device number 327 * @baseminor: first of the range of minor num 330 * @baseminor: first of the range of minor numbers 328 * @count: the number of minor numbers this cd 331 * @count: the number of minor numbers this cdev is occupying 329 * @name: name of this range of devices 332 * @name: name of this range of devices 330 * 333 * 331 * Unregister and destroy the cdev occupying t 334 * Unregister and destroy the cdev occupying the region described by 332 * @major, @baseminor and @count. This functi 335 * @major, @baseminor and @count. This function undoes what 333 * __register_chrdev() did. 336 * __register_chrdev() did. 334 */ 337 */ 335 void __unregister_chrdev(unsigned int major, u 338 void __unregister_chrdev(unsigned int major, unsigned int baseminor, 336 unsigned int count, c 339 unsigned int count, const char *name) 337 { 340 { 338 struct char_device_struct *cd; 341 struct char_device_struct *cd; 339 342 340 cd = __unregister_chrdev_region(major, 343 cd = __unregister_chrdev_region(major, baseminor, count); 341 if (cd && cd->cdev) 344 if (cd && cd->cdev) 342 cdev_del(cd->cdev); 345 cdev_del(cd->cdev); 343 kfree(cd); 346 kfree(cd); 344 } 347 } 345 348 346 static DEFINE_SPINLOCK(cdev_lock); 349 static DEFINE_SPINLOCK(cdev_lock); 347 350 348 static struct kobject *cdev_get(struct cdev *p 351 static struct kobject *cdev_get(struct cdev *p) 349 { 352 { 350 struct module *owner = p->owner; 353 struct module *owner = p->owner; 351 struct kobject *kobj; 354 struct kobject *kobj; 352 355 353 if (!try_module_get(owner)) !! 356 if (owner && !try_module_get(owner)) 354 return NULL; 357 return NULL; 355 kobj = kobject_get_unless_zero(&p->kob !! 358 kobj = kobject_get(&p->kobj); 356 if (!kobj) 359 if (!kobj) 357 module_put(owner); 360 module_put(owner); 358 return kobj; 361 return kobj; 359 } 362 } 360 363 361 void cdev_put(struct cdev *p) 364 void cdev_put(struct cdev *p) 362 { 365 { 363 if (p) { 366 if (p) { 364 struct module *owner = p->owne 367 struct module *owner = p->owner; 365 kobject_put(&p->kobj); 368 kobject_put(&p->kobj); 366 module_put(owner); 369 module_put(owner); 367 } 370 } 368 } 371 } 369 372 370 /* 373 /* 371 * Called every time a character special file 374 * Called every time a character special file is opened 372 */ 375 */ 373 static int chrdev_open(struct inode *inode, st 376 static int chrdev_open(struct inode *inode, struct file *filp) 374 { 377 { 375 const struct file_operations *fops; 378 const struct file_operations *fops; 376 struct cdev *p; 379 struct cdev *p; 377 struct cdev *new = NULL; 380 struct cdev *new = NULL; 378 int ret = 0; 381 int ret = 0; 379 382 380 spin_lock(&cdev_lock); 383 spin_lock(&cdev_lock); 381 p = inode->i_cdev; 384 p = inode->i_cdev; 382 if (!p) { 385 if (!p) { 383 struct kobject *kobj; 386 struct kobject *kobj; 384 int idx; 387 int idx; 385 spin_unlock(&cdev_lock); 388 spin_unlock(&cdev_lock); 386 kobj = kobj_lookup(cdev_map, i 389 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx); 387 if (!kobj) 390 if (!kobj) 388 return -ENXIO; 391 return -ENXIO; 389 new = container_of(kobj, struc 392 new = container_of(kobj, struct cdev, kobj); 390 spin_lock(&cdev_lock); 393 spin_lock(&cdev_lock); 391 /* Check i_cdev again in case 394 /* Check i_cdev again in case somebody beat us to it while 392 we dropped the lock. */ 395 we dropped the lock. */ 393 p = inode->i_cdev; 396 p = inode->i_cdev; 394 if (!p) { 397 if (!p) { 395 inode->i_cdev = p = ne 398 inode->i_cdev = p = new; 396 list_add(&inode->i_dev 399 list_add(&inode->i_devices, &p->list); 397 new = NULL; 400 new = NULL; 398 } else if (!cdev_get(p)) 401 } else if (!cdev_get(p)) 399 ret = -ENXIO; 402 ret = -ENXIO; 400 } else if (!cdev_get(p)) 403 } else if (!cdev_get(p)) 401 ret = -ENXIO; 404 ret = -ENXIO; 402 spin_unlock(&cdev_lock); 405 spin_unlock(&cdev_lock); 403 cdev_put(new); 406 cdev_put(new); 404 if (ret) 407 if (ret) 405 return ret; 408 return ret; 406 409 407 ret = -ENXIO; 410 ret = -ENXIO; 408 fops = fops_get(p->ops); 411 fops = fops_get(p->ops); 409 if (!fops) 412 if (!fops) 410 goto out_cdev_put; 413 goto out_cdev_put; 411 414 412 replace_fops(filp, fops); 415 replace_fops(filp, fops); 413 if (filp->f_op->open) { 416 if (filp->f_op->open) { 414 ret = filp->f_op->open(inode, 417 ret = filp->f_op->open(inode, filp); 415 if (ret) 418 if (ret) 416 goto out_cdev_put; 419 goto out_cdev_put; 417 } 420 } 418 421 419 return 0; 422 return 0; 420 423 421 out_cdev_put: 424 out_cdev_put: 422 cdev_put(p); 425 cdev_put(p); 423 return ret; 426 return ret; 424 } 427 } 425 428 426 void cd_forget(struct inode *inode) 429 void cd_forget(struct inode *inode) 427 { 430 { 428 spin_lock(&cdev_lock); 431 spin_lock(&cdev_lock); 429 list_del_init(&inode->i_devices); 432 list_del_init(&inode->i_devices); 430 inode->i_cdev = NULL; 433 inode->i_cdev = NULL; 431 inode->i_mapping = &inode->i_data; 434 inode->i_mapping = &inode->i_data; 432 spin_unlock(&cdev_lock); 435 spin_unlock(&cdev_lock); 433 } 436 } 434 437 435 static void cdev_purge(struct cdev *cdev) 438 static void cdev_purge(struct cdev *cdev) 436 { 439 { 437 spin_lock(&cdev_lock); 440 spin_lock(&cdev_lock); 438 while (!list_empty(&cdev->list)) { 441 while (!list_empty(&cdev->list)) { 439 struct inode *inode; 442 struct inode *inode; 440 inode = container_of(cdev->lis 443 inode = container_of(cdev->list.next, struct inode, i_devices); 441 list_del_init(&inode->i_device 444 list_del_init(&inode->i_devices); 442 inode->i_cdev = NULL; 445 inode->i_cdev = NULL; 443 } 446 } 444 spin_unlock(&cdev_lock); 447 spin_unlock(&cdev_lock); 445 } 448 } 446 449 447 /* 450 /* 448 * Dummy default file-operations: the only thi 451 * Dummy default file-operations: the only thing this does 449 * is contain the open that then fills in the 452 * is contain the open that then fills in the correct operations 450 * depending on the special file... 453 * depending on the special file... 451 */ 454 */ 452 const struct file_operations def_chr_fops = { 455 const struct file_operations def_chr_fops = { 453 .open = chrdev_open, 456 .open = chrdev_open, 454 .llseek = noop_llseek, 457 .llseek = noop_llseek, 455 }; 458 }; 456 459 457 static struct kobject *exact_match(dev_t dev, 460 static struct kobject *exact_match(dev_t dev, int *part, void *data) 458 { 461 { 459 struct cdev *p = data; 462 struct cdev *p = data; 460 return &p->kobj; 463 return &p->kobj; 461 } 464 } 462 465 463 static int exact_lock(dev_t dev, void *data) 466 static int exact_lock(dev_t dev, void *data) 464 { 467 { 465 struct cdev *p = data; 468 struct cdev *p = data; 466 return cdev_get(p) ? 0 : -1; 469 return cdev_get(p) ? 0 : -1; 467 } 470 } 468 471 469 /** 472 /** 470 * cdev_add() - add a char device to the syste 473 * cdev_add() - add a char device to the system 471 * @p: the cdev structure for the device 474 * @p: the cdev structure for the device 472 * @dev: the first device number for which thi 475 * @dev: the first device number for which this device is responsible 473 * @count: the number of consecutive minor num 476 * @count: the number of consecutive minor numbers corresponding to this 474 * device 477 * device 475 * 478 * 476 * cdev_add() adds the device represented by @ 479 * cdev_add() adds the device represented by @p to the system, making it 477 * live immediately. A negative error code is 480 * live immediately. A negative error code is returned on failure. 478 */ 481 */ 479 int cdev_add(struct cdev *p, dev_t dev, unsign 482 int cdev_add(struct cdev *p, dev_t dev, unsigned count) 480 { 483 { 481 int error; 484 int error; 482 485 483 p->dev = dev; 486 p->dev = dev; 484 p->count = count; 487 p->count = count; 485 488 486 if (WARN_ON(dev == WHITEOUT_DEV)) { << 487 error = -EBUSY; << 488 goto err; << 489 } << 490 << 491 error = kobj_map(cdev_map, dev, count, 489 error = kobj_map(cdev_map, dev, count, NULL, 492 exact_match, exact_lo 490 exact_match, exact_lock, p); 493 if (error) 491 if (error) 494 goto err; !! 492 return error; 495 493 496 kobject_get(p->kobj.parent); 494 kobject_get(p->kobj.parent); 497 495 498 return 0; 496 return 0; 499 << 500 err: << 501 kfree_const(p->kobj.name); << 502 p->kobj.name = NULL; << 503 return error; << 504 } 497 } 505 498 506 /** 499 /** 507 * cdev_set_parent() - set the parent kobject 500 * cdev_set_parent() - set the parent kobject for a char device 508 * @p: the cdev structure 501 * @p: the cdev structure 509 * @kobj: the kobject to take a reference to 502 * @kobj: the kobject to take a reference to 510 * 503 * 511 * cdev_set_parent() sets a parent kobject whi 504 * cdev_set_parent() sets a parent kobject which will be referenced 512 * appropriately so the parent is not freed be 505 * appropriately so the parent is not freed before the cdev. This 513 * should be called before cdev_add. 506 * should be called before cdev_add. 514 */ 507 */ 515 void cdev_set_parent(struct cdev *p, struct ko 508 void cdev_set_parent(struct cdev *p, struct kobject *kobj) 516 { 509 { 517 WARN_ON(!kobj->state_initialized); 510 WARN_ON(!kobj->state_initialized); 518 p->kobj.parent = kobj; 511 p->kobj.parent = kobj; 519 } 512 } 520 513 521 /** 514 /** 522 * cdev_device_add() - add a char device and i 515 * cdev_device_add() - add a char device and it's corresponding 523 * struct device, linkink 516 * struct device, linkink 524 * @dev: the device structure 517 * @dev: the device structure 525 * @cdev: the cdev structure 518 * @cdev: the cdev structure 526 * 519 * 527 * cdev_device_add() adds the char device repr 520 * cdev_device_add() adds the char device represented by @cdev to the system, 528 * just as cdev_add does. It then adds @dev to 521 * just as cdev_add does. It then adds @dev to the system using device_add 529 * The dev_t for the char device will be taken 522 * The dev_t for the char device will be taken from the struct device which 530 * needs to be initialized first. This helper 523 * needs to be initialized first. This helper function correctly takes a 531 * reference to the parent device so the paren 524 * reference to the parent device so the parent will not get released until 532 * all references to the cdev are released. 525 * all references to the cdev are released. 533 * 526 * 534 * This helper uses dev->devt for the device n 527 * This helper uses dev->devt for the device number. If it is not set 535 * it will not add the cdev and it will be equ 528 * it will not add the cdev and it will be equivalent to device_add. 536 * 529 * 537 * This function should be used whenever the s 530 * This function should be used whenever the struct cdev and the 538 * struct device are members of the same struc 531 * struct device are members of the same structure whose lifetime is 539 * managed by the struct device. 532 * managed by the struct device. 540 * 533 * 541 * NOTE: Callers must assume that userspace wa 534 * NOTE: Callers must assume that userspace was able to open the cdev and 542 * can call cdev fops callbacks at any time, e 535 * can call cdev fops callbacks at any time, even if this function fails. 543 */ 536 */ 544 int cdev_device_add(struct cdev *cdev, struct 537 int cdev_device_add(struct cdev *cdev, struct device *dev) 545 { 538 { 546 int rc = 0; 539 int rc = 0; 547 540 548 if (dev->devt) { 541 if (dev->devt) { 549 cdev_set_parent(cdev, &dev->ko 542 cdev_set_parent(cdev, &dev->kobj); 550 543 551 rc = cdev_add(cdev, dev->devt, 544 rc = cdev_add(cdev, dev->devt, 1); 552 if (rc) 545 if (rc) 553 return rc; 546 return rc; 554 } 547 } 555 548 556 rc = device_add(dev); 549 rc = device_add(dev); 557 if (rc && dev->devt) !! 550 if (rc) 558 cdev_del(cdev); 551 cdev_del(cdev); 559 552 560 return rc; 553 return rc; 561 } 554 } 562 555 563 /** 556 /** 564 * cdev_device_del() - inverse of cdev_device_ 557 * cdev_device_del() - inverse of cdev_device_add 565 * @dev: the device structure 558 * @dev: the device structure 566 * @cdev: the cdev structure 559 * @cdev: the cdev structure 567 * 560 * 568 * cdev_device_del() is a helper function to c 561 * cdev_device_del() is a helper function to call cdev_del and device_del. 569 * It should be used whenever cdev_device_add 562 * It should be used whenever cdev_device_add is used. 570 * 563 * 571 * If dev->devt is not set it will not remove 564 * If dev->devt is not set it will not remove the cdev and will be equivalent 572 * to device_del. 565 * to device_del. 573 * 566 * 574 * NOTE: This guarantees that associated sysfs 567 * NOTE: This guarantees that associated sysfs callbacks are not running 575 * or runnable, however any cdevs already open 568 * or runnable, however any cdevs already open will remain and their fops 576 * will still be callable even after this func 569 * will still be callable even after this function returns. 577 */ 570 */ 578 void cdev_device_del(struct cdev *cdev, struct 571 void cdev_device_del(struct cdev *cdev, struct device *dev) 579 { 572 { 580 device_del(dev); 573 device_del(dev); 581 if (dev->devt) 574 if (dev->devt) 582 cdev_del(cdev); 575 cdev_del(cdev); 583 } 576 } 584 577 585 static void cdev_unmap(dev_t dev, unsigned cou 578 static void cdev_unmap(dev_t dev, unsigned count) 586 { 579 { 587 kobj_unmap(cdev_map, dev, count); 580 kobj_unmap(cdev_map, dev, count); 588 } 581 } 589 582 590 /** 583 /** 591 * cdev_del() - remove a cdev from the system 584 * cdev_del() - remove a cdev from the system 592 * @p: the cdev structure to be removed 585 * @p: the cdev structure to be removed 593 * 586 * 594 * cdev_del() removes @p from the system, poss 587 * cdev_del() removes @p from the system, possibly freeing the structure 595 * itself. 588 * itself. 596 * 589 * 597 * NOTE: This guarantees that cdev device will 590 * NOTE: This guarantees that cdev device will no longer be able to be 598 * opened, however any cdevs already open will 591 * opened, however any cdevs already open will remain and their fops will 599 * still be callable even after cdev_del retur 592 * still be callable even after cdev_del returns. 600 */ 593 */ 601 void cdev_del(struct cdev *p) 594 void cdev_del(struct cdev *p) 602 { 595 { 603 cdev_unmap(p->dev, p->count); 596 cdev_unmap(p->dev, p->count); 604 kobject_put(&p->kobj); 597 kobject_put(&p->kobj); 605 } 598 } 606 599 607 600 608 static void cdev_default_release(struct kobjec 601 static void cdev_default_release(struct kobject *kobj) 609 { 602 { 610 struct cdev *p = container_of(kobj, st 603 struct cdev *p = container_of(kobj, struct cdev, kobj); 611 struct kobject *parent = kobj->parent; 604 struct kobject *parent = kobj->parent; 612 605 613 cdev_purge(p); 606 cdev_purge(p); 614 kobject_put(parent); 607 kobject_put(parent); 615 } 608 } 616 609 617 static void cdev_dynamic_release(struct kobjec 610 static void cdev_dynamic_release(struct kobject *kobj) 618 { 611 { 619 struct cdev *p = container_of(kobj, st 612 struct cdev *p = container_of(kobj, struct cdev, kobj); 620 struct kobject *parent = kobj->parent; 613 struct kobject *parent = kobj->parent; 621 614 622 cdev_purge(p); 615 cdev_purge(p); 623 kfree(p); 616 kfree(p); 624 kobject_put(parent); 617 kobject_put(parent); 625 } 618 } 626 619 627 static struct kobj_type ktype_cdev_default = { 620 static struct kobj_type ktype_cdev_default = { 628 .release = cdev_default_release 621 .release = cdev_default_release, 629 }; 622 }; 630 623 631 static struct kobj_type ktype_cdev_dynamic = { 624 static struct kobj_type ktype_cdev_dynamic = { 632 .release = cdev_dynamic_release 625 .release = cdev_dynamic_release, 633 }; 626 }; 634 627 635 /** 628 /** 636 * cdev_alloc() - allocate a cdev structure 629 * cdev_alloc() - allocate a cdev structure 637 * 630 * 638 * Allocates and returns a cdev structure, or 631 * Allocates and returns a cdev structure, or NULL on failure. 639 */ 632 */ 640 struct cdev *cdev_alloc(void) 633 struct cdev *cdev_alloc(void) 641 { 634 { 642 struct cdev *p = kzalloc(sizeof(struct 635 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL); 643 if (p) { 636 if (p) { 644 INIT_LIST_HEAD(&p->list); 637 INIT_LIST_HEAD(&p->list); 645 kobject_init(&p->kobj, &ktype_ 638 kobject_init(&p->kobj, &ktype_cdev_dynamic); 646 } 639 } 647 return p; 640 return p; 648 } 641 } 649 642 650 /** 643 /** 651 * cdev_init() - initialize a cdev structure 644 * cdev_init() - initialize a cdev structure 652 * @cdev: the structure to initialize 645 * @cdev: the structure to initialize 653 * @fops: the file_operations for this device 646 * @fops: the file_operations for this device 654 * 647 * 655 * Initializes @cdev, remembering @fops, makin 648 * Initializes @cdev, remembering @fops, making it ready to add to the 656 * system with cdev_add(). 649 * system with cdev_add(). 657 */ 650 */ 658 void cdev_init(struct cdev *cdev, const struct 651 void cdev_init(struct cdev *cdev, const struct file_operations *fops) 659 { 652 { 660 memset(cdev, 0, sizeof *cdev); 653 memset(cdev, 0, sizeof *cdev); 661 INIT_LIST_HEAD(&cdev->list); 654 INIT_LIST_HEAD(&cdev->list); 662 kobject_init(&cdev->kobj, &ktype_cdev_ 655 kobject_init(&cdev->kobj, &ktype_cdev_default); 663 cdev->ops = fops; 656 cdev->ops = fops; 664 } 657 } 665 658 666 static struct kobject *base_probe(dev_t dev, i 659 static struct kobject *base_probe(dev_t dev, int *part, void *data) 667 { 660 { 668 if (request_module("char-major-%d-%d", 661 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0) 669 /* Make old-style 2.4 aliases 662 /* Make old-style 2.4 aliases work */ 670 request_module("char-major-%d" 663 request_module("char-major-%d", MAJOR(dev)); 671 return NULL; 664 return NULL; 672 } 665 } 673 666 674 void __init chrdev_init(void) 667 void __init chrdev_init(void) 675 { 668 { 676 cdev_map = kobj_map_init(base_probe, & 669 cdev_map = kobj_map_init(base_probe, &chrdevs_lock); 677 } 670 } 678 671 679 672 680 /* Let modules do char dev stuff */ 673 /* Let modules do char dev stuff */ 681 EXPORT_SYMBOL(register_chrdev_region); 674 EXPORT_SYMBOL(register_chrdev_region); 682 EXPORT_SYMBOL(unregister_chrdev_region); 675 EXPORT_SYMBOL(unregister_chrdev_region); 683 EXPORT_SYMBOL(alloc_chrdev_region); 676 EXPORT_SYMBOL(alloc_chrdev_region); 684 EXPORT_SYMBOL(cdev_init); 677 EXPORT_SYMBOL(cdev_init); 685 EXPORT_SYMBOL(cdev_alloc); 678 EXPORT_SYMBOL(cdev_alloc); 686 EXPORT_SYMBOL(cdev_del); 679 EXPORT_SYMBOL(cdev_del); 687 EXPORT_SYMBOL(cdev_add); 680 EXPORT_SYMBOL(cdev_add); 688 EXPORT_SYMBOL(cdev_set_parent); 681 EXPORT_SYMBOL(cdev_set_parent); 689 EXPORT_SYMBOL(cdev_device_add); 682 EXPORT_SYMBOL(cdev_device_add); 690 EXPORT_SYMBOL(cdev_device_del); 683 EXPORT_SYMBOL(cdev_device_del); 691 EXPORT_SYMBOL(__register_chrdev); 684 EXPORT_SYMBOL(__register_chrdev); 692 EXPORT_SYMBOL(__unregister_chrdev); 685 EXPORT_SYMBOL(__unregister_chrdev); 693 686
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.