1 /* 2 * Any part of this program may be used in documents licensed under 3 * the GNU Free Documentation License, Version 1.1 or any later version 4 * published by the Free Software Foundation. 5 */ 6 #ifndef _PARPORT_H_ 7 #define _PARPORT_H_ 8 9 10 #include <linux/jiffies.h> 11 #include <linux/proc_fs.h> 12 #include <linux/spinlock.h> 13 #include <linux/wait.h> 14 #include <linux/irqreturn.h> 15 #include <linux/semaphore.h> 16 #include <linux/device.h> 17 #include <asm/ptrace.h> 18 #include <uapi/linux/parport.h> 19 20 /* Define this later. */ 21 struct parport; 22 struct pardevice; 23 24 struct pc_parport_state { 25 unsigned int ctr; 26 unsigned int ecr; 27 }; 28 29 struct ax_parport_state { 30 unsigned int ctr; 31 unsigned int ecr; 32 unsigned int dcsr; 33 }; 34 35 /* used by both parport_amiga and parport_mfc3 */ 36 struct amiga_parport_state { 37 unsigned char data; /* ciaa.prb */ 38 unsigned char datadir; /* ciaa.ddrb */ 39 unsigned char status; /* ciab.pra & 7 */ 40 unsigned char statusdir;/* ciab.ddrb & 7 */ 41 }; 42 43 struct ip32_parport_state { 44 unsigned int dcr; 45 unsigned int ecr; 46 }; 47 48 struct parport_state { 49 union { 50 struct pc_parport_state pc; 51 /* ARC has no state. */ 52 struct ax_parport_state ax; 53 struct amiga_parport_state amiga; 54 /* Atari has not state. */ 55 struct ip32_parport_state ip32; 56 void *misc; 57 } u; 58 }; 59 60 struct parport_operations { 61 /* IBM PC-style virtual registers. */ 62 void (*write_data)(struct parport *, unsigned char); 63 unsigned char (*read_data)(struct parport *); 64 65 void (*write_control)(struct parport *, unsigned char); 66 unsigned char (*read_control)(struct parport *); 67 unsigned char (*frob_control)(struct parport *, unsigned char mask, 68 unsigned char val); 69 70 unsigned char (*read_status)(struct parport *); 71 72 /* IRQs. */ 73 void (*enable_irq)(struct parport *); 74 void (*disable_irq)(struct parport *); 75 76 /* Data direction. */ 77 void (*data_forward) (struct parport *); 78 void (*data_reverse) (struct parport *); 79 80 /* For core parport code. */ 81 void (*init_state)(struct pardevice *, struct parport_state *); 82 void (*save_state)(struct parport *, struct parport_state *); 83 void (*restore_state)(struct parport *, struct parport_state *); 84 85 /* Block read/write */ 86 size_t (*epp_write_data) (struct parport *port, const void *buf, 87 size_t len, int flags); 88 size_t (*epp_read_data) (struct parport *port, void *buf, size_t len, 89 int flags); 90 size_t (*epp_write_addr) (struct parport *port, const void *buf, 91 size_t len, int flags); 92 size_t (*epp_read_addr) (struct parport *port, void *buf, size_t len, 93 int flags); 94 95 size_t (*ecp_write_data) (struct parport *port, const void *buf, 96 size_t len, int flags); 97 size_t (*ecp_read_data) (struct parport *port, void *buf, size_t len, 98 int flags); 99 size_t (*ecp_write_addr) (struct parport *port, const void *buf, 100 size_t len, int flags); 101 102 size_t (*compat_write_data) (struct parport *port, const void *buf, 103 size_t len, int flags); 104 size_t (*nibble_read_data) (struct parport *port, void *buf, 105 size_t len, int flags); 106 size_t (*byte_read_data) (struct parport *port, void *buf, 107 size_t len, int flags); 108 struct module *owner; 109 }; 110 111 struct parport_device_info { 112 parport_device_class class; 113 const char *class_name; 114 const char *mfr; 115 const char *model; 116 const char *cmdset; 117 const char *description; 118 }; 119 120 /* Each device can have two callback functions: 121 * 1) a preemption function, called by the resource manager to request 122 * that the driver relinquish control of the port. The driver should 123 * return zero if it agrees to release the port, and nonzero if it 124 * refuses. Do not call parport_release() - the kernel will do this 125 * implicitly. 126 * 127 * 2) a wake-up function, called by the resource manager to tell drivers 128 * that the port is available to be claimed. If a driver wants to use 129 * the port, it should call parport_claim() here. 130 */ 131 132 /* A parallel port device */ 133 struct pardevice { 134 const char *name; 135 struct parport *port; 136 int daisy; 137 int (*preempt)(void *); 138 void (*wakeup)(void *); 139 void *private; 140 void (*irq_func)(void *); 141 unsigned int flags; 142 struct pardevice *next; 143 struct pardevice *prev; 144 struct device dev; 145 bool devmodel; 146 struct parport_state *state; /* saved status over preemption */ 147 wait_queue_head_t wait_q; 148 unsigned long int time; 149 unsigned long int timeslice; 150 volatile long int timeout; 151 unsigned long waiting; /* long req'd for set_bit --RR */ 152 struct pardevice *waitprev; 153 struct pardevice *waitnext; 154 void * sysctl_table; 155 }; 156 157 #define to_pardevice(n) container_of(n, struct pardevice, dev) 158 159 /* IEEE1284 information */ 160 161 /* IEEE1284 phases. These are exposed to userland through ppdev IOCTL 162 * PP[GS]ETPHASE, so do not change existing values. */ 163 enum ieee1284_phase { 164 IEEE1284_PH_FWD_DATA, 165 IEEE1284_PH_FWD_IDLE, 166 IEEE1284_PH_TERMINATE, 167 IEEE1284_PH_NEGOTIATION, 168 IEEE1284_PH_HBUSY_DNA, 169 IEEE1284_PH_REV_IDLE, 170 IEEE1284_PH_HBUSY_DAVAIL, 171 IEEE1284_PH_REV_DATA, 172 IEEE1284_PH_ECP_SETUP, 173 IEEE1284_PH_ECP_FWD_TO_REV, 174 IEEE1284_PH_ECP_REV_TO_FWD, 175 IEEE1284_PH_ECP_DIR_UNKNOWN, 176 }; 177 struct ieee1284_info { 178 int mode; 179 volatile enum ieee1284_phase phase; 180 struct semaphore irq; 181 }; 182 183 /* A parallel port */ 184 struct parport { 185 unsigned long base; /* base address */ 186 unsigned long base_hi; /* base address (hi - ECR) */ 187 unsigned int size; /* IO extent */ 188 const char *name; 189 unsigned int modes; 190 int irq; /* interrupt (or -1 for none) */ 191 int dma; 192 int muxport; /* which muxport (if any) this is */ 193 int portnum; /* which physical parallel port (not mux) */ 194 struct device *dev; /* Physical device associated with IO/DMA. 195 * This may unfortulately be null if the 196 * port has a legacy driver. 197 */ 198 struct device bus_dev; /* to link with the bus */ 199 struct parport *physport; 200 /* If this is a non-default mux 201 parport, i.e. we're a clone of a real 202 physical port, this is a pointer to that 203 port. The locking is only done in the 204 real port. For a clone port, the 205 following structure members are 206 meaningless: devices, cad, muxsel, 207 waithead, waittail, flags, pdir, 208 dev, ieee1284, *_lock. 209 210 It this is a default mux parport, or 211 there is no mux involved, this points to 212 ourself. */ 213 214 struct pardevice *devices; 215 struct pardevice *cad; /* port owner */ 216 int daisy; /* currently selected daisy addr */ 217 int muxsel; /* currently selected mux port */ 218 219 struct pardevice *waithead; 220 struct pardevice *waittail; 221 222 struct list_head list; 223 struct timer_list timer; 224 unsigned int flags; 225 226 void *sysctl_table; 227 struct parport_device_info probe_info[5]; /* 0-3 + non-IEEE1284.3 */ 228 struct ieee1284_info ieee1284; 229 230 struct parport_operations *ops; 231 void *private_data; /* for lowlevel driver */ 232 233 int number; /* port index - the `n' in `parportn' */ 234 spinlock_t pardevice_lock; 235 spinlock_t waitlist_lock; 236 rwlock_t cad_lock; 237 238 int spintime; 239 atomic_t ref_count; 240 241 unsigned long devflags; 242 #define PARPORT_DEVPROC_REGISTERED 0 243 struct pardevice *proc_device; /* Currently register proc device */ 244 245 struct list_head full_list; 246 struct parport *slaves[3]; 247 }; 248 249 #define to_parport_dev(n) container_of(n, struct parport, bus_dev) 250 251 #define DEFAULT_SPIN_TIME 500 /* us */ 252 253 struct parport_driver { 254 const char *name; 255 void (*detach) (struct parport *); 256 void (*match_port)(struct parport *); 257 int (*probe)(struct pardevice *); 258 struct device_driver driver; 259 }; 260 261 #define to_parport_driver(n) container_of(n, struct parport_driver, driver) 262 263 int parport_bus_init(void); 264 void parport_bus_exit(void); 265 266 /* parport_register_port registers a new parallel port at the given 267 address (if one does not already exist) and returns a pointer to it. 268 This entails claiming the I/O region, IRQ and DMA. NULL is returned 269 if initialisation fails. */ 270 struct parport *parport_register_port(unsigned long base, int irq, int dma, 271 struct parport_operations *ops); 272 273 /* Once a registered port is ready for high-level drivers to use, the 274 low-level driver that registered it should announce it. This will 275 call the high-level drivers' attach() functions (after things like 276 determining the IEEE 1284.3 topology of the port and collecting 277 DeviceIDs). */ 278 void parport_announce_port (struct parport *port); 279 280 /* Unregister a port. */ 281 extern void parport_remove_port(struct parport *port); 282 283 /* Register a new high-level driver. */ 284 285 int __must_check __parport_register_driver(struct parport_driver *, 286 struct module *, 287 const char *mod_name); 288 /* 289 * parport_register_driver must be a macro so that KBUILD_MODNAME can 290 * be expanded 291 */ 292 293 /** 294 * parport_register_driver - register a parallel port device driver 295 * @driver: structure describing the driver 296 * 297 * This can be called by a parallel port device driver in order 298 * to receive notifications about ports being found in the 299 * system, as well as ports no longer available. 300 * 301 * The @driver structure is allocated by the caller and must not be 302 * deallocated until after calling parport_unregister_driver(). 303 * 304 * If using the non device model: 305 * The driver's attach() function may block. The port that 306 * attach() is given will be valid for the duration of the 307 * callback, but if the driver wants to take a copy of the 308 * pointer it must call parport_get_port() to do so. Calling 309 * parport_register_device() on that port will do this for you. 310 * 311 * The driver's detach() function may block. The port that 312 * detach() is given will be valid for the duration of the 313 * callback, but if the driver wants to take a copy of the 314 * pointer it must call parport_get_port() to do so. 315 * 316 * 317 * Returns 0 on success. The non device model will always succeeds. 318 * but the new device model can fail and will return the error code. 319 **/ 320 #define parport_register_driver(driver) \ 321 __parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) 322 323 /* Unregister a high-level driver. */ 324 void parport_unregister_driver(struct parport_driver *); 325 326 /** 327 * module_parport_driver() - Helper macro for registering a modular parport driver 328 * @__parport_driver: struct parport_driver to be used 329 * 330 * Helper macro for parport drivers which do not do anything special in module 331 * init and exit. This eliminates a lot of boilerplate. Each module may only 332 * use this macro once, and calling it replaces module_init() and module_exit(). 333 */ 334 #define module_parport_driver(__parport_driver) \ 335 module_driver(__parport_driver, parport_register_driver, parport_unregister_driver) 336 337 /* If parport_register_driver doesn't fit your needs, perhaps 338 * parport_find_xxx does. */ 339 extern struct parport *parport_find_number (int); 340 extern struct parport *parport_find_base (unsigned long); 341 342 /* generic irq handler, if it suits your needs */ 343 extern irqreturn_t parport_irq_handler(int irq, void *dev_id); 344 345 /* Reference counting for ports. */ 346 extern struct parport *parport_get_port (struct parport *); 347 extern void parport_put_port (struct parport *); 348 void parport_del_port(struct parport *); 349 350 struct pardev_cb { 351 int (*preempt)(void *); 352 void (*wakeup)(void *); 353 void *private; 354 void (*irq_func)(void *); 355 unsigned int flags; 356 }; 357 358 /* 359 * parport_register_dev_model declares that a device is connected to a 360 * port, and tells the kernel all it needs to know. 361 */ 362 struct pardevice * 363 parport_register_dev_model(struct parport *port, const char *name, 364 const struct pardev_cb *par_dev_cb, int cnt); 365 366 /* parport_unregister unlinks a device from the chain. */ 367 extern void parport_unregister_device(struct pardevice *dev); 368 369 /* parport_claim tries to gain ownership of the port for a particular 370 driver. This may fail (return non-zero) if another driver is busy. 371 If this driver has registered an interrupt handler, it will be 372 enabled. */ 373 extern int parport_claim(struct pardevice *dev); 374 375 /* parport_claim_or_block is the same, but sleeps if the port cannot 376 be claimed. Return value is 1 if it slept, 0 normally and -errno 377 on error. */ 378 extern int parport_claim_or_block(struct pardevice *dev); 379 380 /* parport_release reverses a previous parport_claim. This can never 381 fail, though the effects are undefined (except that they are bad) 382 if you didn't previously own the port. Once you have released the 383 port you should make sure that neither your code nor the hardware 384 on the port tries to initiate any communication without first 385 re-claiming the port. If you mess with the port state (enabling 386 ECP for example) you should clean up before releasing the port. */ 387 388 extern void parport_release(struct pardevice *dev); 389 390 /** 391 * parport_yield - relinquish a parallel port temporarily 392 * @dev: a device on the parallel port 393 * 394 * This function relinquishes the port if it would be helpful to other 395 * drivers to do so. Afterwards it tries to reclaim the port using 396 * parport_claim(), and the return value is the same as for 397 * parport_claim(). If it fails, the port is left unclaimed and it is 398 * the driver's responsibility to reclaim the port. 399 * 400 * The parport_yield() and parport_yield_blocking() functions are for 401 * marking points in the driver at which other drivers may claim the 402 * port and use their devices. Yielding the port is similar to 403 * releasing it and reclaiming it, but is more efficient because no 404 * action is taken if there are no other devices needing the port. In 405 * fact, nothing is done even if there are other devices waiting but 406 * the current device is still within its "timeslice". The default 407 * timeslice is half a second, but it can be adjusted via the /proc 408 * interface. 409 **/ 410 static __inline__ int parport_yield(struct pardevice *dev) 411 { 412 unsigned long int timeslip = (jiffies - dev->time); 413 if ((dev->port->waithead == NULL) || (timeslip < dev->timeslice)) 414 return 0; 415 parport_release(dev); 416 return parport_claim(dev); 417 } 418 419 /** 420 * parport_yield_blocking - relinquish a parallel port temporarily 421 * @dev: a device on the parallel port 422 * 423 * This function relinquishes the port if it would be helpful to other 424 * drivers to do so. Afterwards it tries to reclaim the port using 425 * parport_claim_or_block(), and the return value is the same as for 426 * parport_claim_or_block(). 427 **/ 428 static __inline__ int parport_yield_blocking(struct pardevice *dev) 429 { 430 unsigned long int timeslip = (jiffies - dev->time); 431 if ((dev->port->waithead == NULL) || (timeslip < dev->timeslice)) 432 return 0; 433 parport_release(dev); 434 return parport_claim_or_block(dev); 435 } 436 437 /* Flags used to identify what a device does. */ 438 #define PARPORT_DEV_TRAN 0 /* WARNING !! DEPRECATED !! */ 439 #define PARPORT_DEV_LURK (1<<0) /* WARNING !! DEPRECATED !! */ 440 #define PARPORT_DEV_EXCL (1<<1) /* Need exclusive access. */ 441 442 #define PARPORT_FLAG_EXCL (1<<1) /* EXCL driver registered. */ 443 444 /* IEEE1284 functions */ 445 extern void parport_ieee1284_interrupt (void *); 446 extern int parport_negotiate (struct parport *, int mode); 447 extern ssize_t parport_write (struct parport *, const void *buf, size_t len); 448 extern ssize_t parport_read (struct parport *, void *buf, size_t len); 449 450 #define PARPORT_INACTIVITY_O_NONBLOCK 1 451 extern long parport_set_timeout (struct pardevice *, long inactivity); 452 453 extern int parport_wait_event (struct parport *, long timeout); 454 extern int parport_wait_peripheral (struct parport *port, 455 unsigned char mask, 456 unsigned char val); 457 extern int parport_poll_peripheral (struct parport *port, 458 unsigned char mask, 459 unsigned char val, 460 int usec); 461 462 /* For architectural drivers */ 463 extern size_t parport_ieee1284_write_compat (struct parport *, 464 const void *, size_t, int); 465 extern size_t parport_ieee1284_read_nibble (struct parport *, 466 void *, size_t, int); 467 extern size_t parport_ieee1284_read_byte (struct parport *, 468 void *, size_t, int); 469 extern size_t parport_ieee1284_ecp_read_data (struct parport *, 470 void *, size_t, int); 471 extern size_t parport_ieee1284_ecp_write_data (struct parport *, 472 const void *, size_t, int); 473 extern size_t parport_ieee1284_ecp_write_addr (struct parport *, 474 const void *, size_t, int); 475 extern size_t parport_ieee1284_epp_write_data (struct parport *, 476 const void *, size_t, int); 477 extern size_t parport_ieee1284_epp_read_data (struct parport *, 478 void *, size_t, int); 479 extern size_t parport_ieee1284_epp_write_addr (struct parport *, 480 const void *, size_t, int); 481 extern size_t parport_ieee1284_epp_read_addr (struct parport *, 482 void *, size_t, int); 483 484 /* IEEE1284.3 functions */ 485 #define daisy_dev_name "Device ID probe" 486 extern int parport_daisy_init (struct parport *port); 487 extern void parport_daisy_fini (struct parport *port); 488 extern struct pardevice *parport_open (int devnum, const char *name); 489 extern void parport_close (struct pardevice *dev); 490 extern ssize_t parport_device_id (int devnum, char *buffer, size_t len); 491 extern void parport_daisy_deselect_all (struct parport *port); 492 extern int parport_daisy_select (struct parport *port, int daisy, int mode); 493 494 /* Lowlevel drivers _can_ call this support function to handle irqs. */ 495 static inline void parport_generic_irq(struct parport *port) 496 { 497 parport_ieee1284_interrupt (port); 498 read_lock(&port->cad_lock); 499 if (port->cad && port->cad->irq_func) 500 port->cad->irq_func(port->cad->private); 501 read_unlock(&port->cad_lock); 502 } 503 504 /* Prototypes from parport_procfs */ 505 extern int parport_proc_register(struct parport *pp); 506 extern int parport_proc_unregister(struct parport *pp); 507 extern int parport_device_proc_register(struct pardevice *device); 508 extern int parport_device_proc_unregister(struct pardevice *device); 509 510 /* If PC hardware is the only type supported, we can optimise a bit. */ 511 #if !defined(CONFIG_PARPORT_NOT_PC) && defined(CONFIG_PARPORT_PC) 512 513 #include <linux/parport_pc.h> 514 #define parport_write_data(p,x) parport_pc_write_data(p,x) 515 #define parport_read_data(p) parport_pc_read_data(p) 516 #define parport_write_control(p,x) parport_pc_write_control(p,x) 517 #define parport_read_control(p) parport_pc_read_control(p) 518 #define parport_frob_control(p,m,v) parport_pc_frob_control(p,m,v) 519 #define parport_read_status(p) parport_pc_read_status(p) 520 #define parport_enable_irq(p) parport_pc_enable_irq(p) 521 #define parport_disable_irq(p) parport_pc_disable_irq(p) 522 #define parport_data_forward(p) parport_pc_data_forward(p) 523 #define parport_data_reverse(p) parport_pc_data_reverse(p) 524 525 #else /* !CONFIG_PARPORT_NOT_PC */ 526 527 /* Generic operations vector through the dispatch table. */ 528 #define parport_write_data(p,x) (p)->ops->write_data(p,x) 529 #define parport_read_data(p) (p)->ops->read_data(p) 530 #define parport_write_control(p,x) (p)->ops->write_control(p,x) 531 #define parport_read_control(p) (p)->ops->read_control(p) 532 #define parport_frob_control(p,m,v) (p)->ops->frob_control(p,m,v) 533 #define parport_read_status(p) (p)->ops->read_status(p) 534 #define parport_enable_irq(p) (p)->ops->enable_irq(p) 535 #define parport_disable_irq(p) (p)->ops->disable_irq(p) 536 #define parport_data_forward(p) (p)->ops->data_forward(p) 537 #define parport_data_reverse(p) (p)->ops->data_reverse(p) 538 539 #endif /* !CONFIG_PARPORT_NOT_PC */ 540 541 extern unsigned long parport_default_timeslice; 542 extern int parport_default_spintime; 543 544 #endif /* _PARPORT_H_ */ 545
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.