include/asm-i386/dma.h
10288 /* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $
10289 * linux/include/asm/dma.h: Defines for using and
10290 * allocating dma channels.
10291 * Written by Hennus Bergman, 1992.
10292 * High DMA channel support & info by Hannu Savolainen
10293 * and John Boyd, Nov. 1992. */
10294
10295 #ifndef _ASM_DMA_H
10296 #define _ASM_DMA_H
10297
10298 #include <linux/config.h>
10299 #include <asm/io.h> /* need byte IO */
10300 #include <asm/spinlock.h> /* And spinlocks */
10301 #include <linux/delay.h>
10302
10303
10304 #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
10305 #define dma_outb outb_p
10306 #else
10307 #define dma_outb outb
10308 #endif
10309
10310 #define dma_inb inb
10311
10312 /* NOTES about DMA transfers:
10313 *
10314 * controller 1: chans 0-3, byte operations, ports 00-1F
10315 * controller 2: chans 4-7, word operations, ports C0-DF
10316 *
10317 * - ALL registers are 8 bits only, regardless of
10318 * transfer size
10319 * - channel 4 is not used - cascades 1 into 2.
10320 * - channels 0-3 are byte - addresses/counts are for
10321 * physical bytes
10322 * - channels 5-7 are word - addresses/counts are for
10323 * physical words
10324 * - transfers must not cross physical 64K (0-3) or 128K
10325 * (5-7) boundaries
10326 * - transfer count loaded to registers is 1 less than
10327 * actual count
10328 * - controller 2 offsets are all even (2x offsets for
10329 * controller 1)
10330 * - page registers for 5-7 don't use data bit 0,
10331 * represent 128K pages
10332 * - page registers for 0-3 use bit 0, represent 64K
10333 * pages
10334 *
10335 * DMA transfers are limited to the lower 16MB of
10336 * _physical_ memory. Note that addresses loaded into
10337 * registers must be _physical_ addresses, not logical
10338 * addresses (which may differ if paging is active).
10339 *
10340 * Address mapping for channels 0-3:
10341 *
10342 * A23 ... A16 A15 ... A8 A7 ... A0 (Phys addrs)
10343 * | ... | | ... | | ... |
10344 * | ... | | ... | | ... |
10345 * | ... | | ... | | ... |
10346 * P7 ... P0 A7 ... A0 A7 ... A0
10347 * | Page | Addr MSB | Addr LSB | (DMA registers)
10348 *
10349 * Address mapping for channels 5-7:
10350 *
10351 * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0
10352 * | ... | \ \ ... \ \ \ ... \ \
10353 * | ... | \ \ ... \ \ \ ... \ (not used)
10354 * | ... | \ \ ... \ \ \ ... \
10355 * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
10356 * | Page | Addr MSB | Addr LSB | DMA regs
10357 *
10358 * Again, channels 5-7 transfer _physical_ words (16
10359 * bits), so addresses and counts _must_ be word-aligned
10360 * (the lowest address bit is _ignored_ at the hardware
10361 * level, so odd-byte transfers aren't possible).
10362 *
10363 * Transfer count (_not # bytes_) is limited to 64K,
10364 * represented as actual count - 1 : 64K => 0xFFFF, 1 =>
10365 * 0x0000. Thus, count is always 1 or more, and up to
10366 * 128K bytes may be transferred on channels 5-7 in one
10367 * operation. */
10368
10369 #define MAX_DMA_CHANNELS 8
10370
10371 /* The maximum address that we can perform a DMA transfer
10372 * to on this platform */
10373 #define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
10374
10375 /* 8237 DMA controllers */
10376 /* 8 bit slave DMA, channels 0..3 */
10377 #define IO_DMA1_BASE 0x00
10378 /* 16 bit master DMA, ch 4(=slave input)..7 */
10379 #define IO_DMA2_BASE 0xC0
10380
10381 /* DMA controller registers */
10382 #define DMA1_CMD_REG 0x08 /* command reg (w) */
10383 #define DMA1_STAT_REG 0x08 /* status reg (r) */
10384 #define DMA1_REQ_REG 0x09 /* request reg (w) */
10385 #define DMA1_MASK_REG 0x0A /* single-chan mask (w) */
10386 #define DMA1_MODE_REG 0x0B /* mode register (w) */
10387 #define DMA1_CLEAR_FF_REG 0x0C /* clr ptr flip-flop (w)*/
10388 #define DMA1_TEMP_REG 0x0D /* Temp Register (r) */
10389 #define DMA1_RESET_REG 0x0D /* Master Clear (w) */
10390 #define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
10391 #define DMA1_MASK_ALL_REG 0x0F /* all-chans mask (w) */
10392
10393 #define DMA2_CMD_REG 0xD0 /* command register (w) */
10394 #define DMA2_STAT_REG 0xD0 /* status register (r) */
10395 #define DMA2_REQ_REG 0xD2 /* request register (w) */
10396 #define DMA2_MASK_REG 0xD4 /* single-chan mask (w) */
10397 #define DMA2_MODE_REG 0xD6 /* mode register (w) */
10398 #define DMA2_CLEAR_FF_REG 0xD8 /* clr pt flip-flop (w) */
10399 #define DMA2_TEMP_REG 0xDA /* Temp Register (r) */
10400 #define DMA2_RESET_REG 0xDA /* Master Clear (w) */
10401 #define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
10402 #define DMA2_MASK_ALL_REG 0xDE /* all-chans mask (w) */
10403
10404 #define DMA_ADDR_0 0x00 /* DMA addr registers */
10405 #define DMA_ADDR_1 0x02
10406 #define DMA_ADDR_2 0x04
10407 #define DMA_ADDR_3 0x06
10408 #define DMA_ADDR_4 0xC0
10409 #define DMA_ADDR_5 0xC4
10410 #define DMA_ADDR_6 0xC8
10411 #define DMA_ADDR_7 0xCC
10412
10413 #define DMA_CNT_0 0x01 /* DMA count registers */
10414 #define DMA_CNT_1 0x03
10415 #define DMA_CNT_2 0x05
10416 #define DMA_CNT_3 0x07
10417 #define DMA_CNT_4 0xC2
10418 #define DMA_CNT_5 0xC6
10419 #define DMA_CNT_6 0xCA
10420 #define DMA_CNT_7 0xCE
10421
10422 #define DMA_PAGE_0 0x87 /* DMA page registers */
10423 #define DMA_PAGE_1 0x83
10424 #define DMA_PAGE_2 0x81
10425 #define DMA_PAGE_3 0x82
10426 #define DMA_PAGE_5 0x8B
10427 #define DMA_PAGE_6 0x89
10428 #define DMA_PAGE_7 0x8A
10429
10430 /* I/O to memory, no autoinit, increment, single mode */
10431 #define DMA_MODE_READ 0x44
10432 /* memory to I/O, no autoinit, increment, single mode */
10433 #define DMA_MODE_WRITE 0x48
10434 /* pass thru DREQ->HRQ, DACK<-HLDA only */
10435 #define DMA_MODE_CASCADE 0xC0
10436
10437 #define DMA_AUTOINIT 0x10
10438
10439
10440 extern spinlock_t dma_spin_lock;
10441
10442 static __inline__ unsigned long claim_dma_lock(void)
10443 {
10444 unsigned long flags;
10445 spin_lock_irqsave(&dma_spin_lock, flags);
10446 return flags;
10447 }
10448
10449 static __inline__ void release_dma_lock(
10450 unsigned long flags)
10451 {
10452 spin_unlock_irqrestore(&dma_spin_lock, flags);
10453 }
10454
10455 /* enable/disable a specific DMA channel */
10456 static __inline__ void enable_dma(unsigned int dmanr)
10457 {
10458 if (dmanr<=3)
10459 dma_outb(dmanr, DMA1_MASK_REG);
10460 else
10461 dma_outb(dmanr & 3, DMA2_MASK_REG);
10462 }
10463
10464 static __inline__ void disable_dma(unsigned int dmanr)
10465 {
10466 if (dmanr<=3)
10467 dma_outb(dmanr | 4, DMA1_MASK_REG);
10468 else
10469 dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
10470 }
10471
10472 /* Clear the 'DMA Pointer Flip Flop'.
10473 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
10474 * Use this once to initialize the FF to a known state.
10475 * After that, keep track of it. :-)
10476 * --- In order to do that, the DMA routines below should
10477 * --- only be used while holding the DMA lock ! */
10478 static __inline__ void clear_dma_ff(unsigned int dmanr)
10479 {
10480 if (dmanr<=3)
10481 dma_outb(0, DMA1_CLEAR_FF_REG);
10482 else
10483 dma_outb(0, DMA2_CLEAR_FF_REG);
10484 }
10485
10486 /* set mode (above) for a specific DMA channel */
10487 static __inline__ void set_dma_mode(unsigned int dmanr,
10488 char mode)
10489 {
10490 if (dmanr<=3)
10491 dma_outb(mode | dmanr, DMA1_MODE_REG);
10492 else
10493 dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
10494 }
10495
10496 /* Set only the page register bits of the transfer
10497 * address. This is used for successive transfers when
10498 * we know the contents of the lower 16 bits of the DMA
10499 * current address register, but a 64k boundary may have
10500 * been crossed. */
10501 static __inline__ void set_dma_page(unsigned int dmanr,
10502 char pagenr)
10503 {
10504 switch(dmanr) {
10505 case 0:
10506 dma_outb(pagenr, DMA_PAGE_0);
10507 break;
10508 case 1:
10509 dma_outb(pagenr, DMA_PAGE_1);
10510 break;
10511 case 2:
10512 dma_outb(pagenr, DMA_PAGE_2);
10513 break;
10514 case 3:
10515 dma_outb(pagenr, DMA_PAGE_3);
10516 break;
10517 case 5:
10518 dma_outb(pagenr & 0xfe, DMA_PAGE_5);
10519 break;
10520 case 6:
10521 dma_outb(pagenr & 0xfe, DMA_PAGE_6);
10522 break;
10523 case 7:
10524 dma_outb(pagenr & 0xfe, DMA_PAGE_7);
10525 break;
10526 }
10527 }
10528
10529
10530 /* Set transfer address & page bits for specific DMA
10531 * channel. Assumes dma flipflop is clear. */
10532 static __inline__ void set_dma_addr(unsigned int dmanr,
10533 unsigned int a)
10534 {
10535 set_dma_page(dmanr, a>>16);
10536 if (dmanr <= 3) {
10537 dma_outb(a & 0xff,
10538 ((dmanr&3)<<1) + IO_DMA1_BASE );
10539 dma_outb((a>>8) & 0xff,
10540 ((dmanr&3)<<1) + IO_DMA1_BASE);
10541 } else {
10542 dma_outb((a>>1) & 0xff,
10543 ((dmanr&3)<<2) + IO_DMA2_BASE);
10544 dma_outb((a>>9) & 0xff,
10545 ((dmanr&3)<<2) + IO_DMA2_BASE);
10546 }
10547 }
10548
10549
10550 /* Set transfer size (max 64k for DMA1..3, 128k for
10551 * DMA5..7) for a specific DMA channel. You must ensure
10552 * the parameters are valid. NOTE: from a manual: "the
10553 * number of transfers is one more than the initial word
10554 * count"! This is taken into account. Assumes dma
10555 * flip-flop is clear. NOTE 2: "count" represents
10556 * _bytes_ and must be even for channels 5-7. */
10557 static __inline__ void set_dma_count(unsigned int dmanr,
10558 unsigned int count)
10559 {
10560 count--;
10561 if (dmanr <= 3) {
10562 dma_outb(count & 0xff,
10563 ((dmanr&3)<<1) + 1 + IO_DMA1_BASE);
10564 dma_outb((count>>8) & 0xff,
10565 ((dmanr&3)<<1) + 1 + IO_DMA1_BASE);
10566 } else {
10567 dma_outb((count>>1) & 0xff,
10568 ((dmanr&3)<<2) + 2 + IO_DMA2_BASE);
10569 dma_outb((count>>9) & 0xff,
10570 ((dmanr&3)<<2) + 2 + IO_DMA2_BASE);
10571 }
10572 }
10573
10574
10575 /* Get DMA residue count. After a DMA transfer, this
10576 * should return zero. Reading this while a DMA transfer
10577 * is still in progress will return unpredictable
10578 * results. If called before the channel has been used,
10579 * it may return 1. Otherwise, it returns the number of
10580 * _bytes_ left to transfer.
10581 *
10582 * Assumes DMA flip-flop is clear. */
10583 static __inline__ int get_dma_residue(unsigned int dmanr)
10584 {
10585 unsigned int io_port = (dmanr <= 3)
10586 ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
10587 : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
10588
10589 /* using short to get 16-bit wrap around */
10590 unsigned short count;
10591
10592 count = 1 + dma_inb(io_port);
10593 count += dma_inb(io_port) << 8;
10594
10595 return (dmanr<=3)? count : (count<<1);
10596 }
10597
10598
10599 /* These are in kernel/dma.c: */
10600 /* reserve a DMA channel */
10601 extern int request_dma(unsigned int dmanr,
10602 const char * device_id);
10603 /* release it again */
10604 extern void free_dma(unsigned int dmanr);
10605
10606 /* From PCI */
10607
10608 #ifdef CONFIG_PCI_QUIRKS
10609 extern int isa_dma_bridge_buggy;
10610 #else
10611 #define isa_dma_bridge_buggy (0)
10612 #endif
10613
10614 #endif /* _ASM_DMA_H */
Сайт управляется системой
uCoz