00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077 #include <linux/module.h>
00078 #include <linux/types.h>
00079 #include <linux/kernel.h>
00080 #include <linux/timer.h>
00081 #include <linux/mm.h>
00082 #include <linux/interrupt.h>
00083 #include <linux/pci.h>
00084 #include <linux/init.h>
00085 #include <linux/ide.h>
00086 #include <linux/delay.h>
00087 #include <linux/scatterlist.h>
00088
00089 #include <asm/io.h>
00090 #include <asm/irq.h>
00091
00092 static const struct drive_list_entry drive_whitelist [] = {
00093
00094 { "Micropolis 2112A" , "ALL" },
00095 { "CONNER CTMA 4000" , "ALL" },
00096 { "CONNER CTT8000-A" , "ALL" },
00097 { "ST34342A" , "ALL" },
00098 { NULL , NULL }
00099 };
00100
00101 static const struct drive_list_entry drive_blacklist [] = {
00102
00103 { "WDC AC11000H" , "ALL" },
00104 { "WDC AC22100H" , "ALL" },
00105 { "WDC AC32500H" , "ALL" },
00106 { "WDC AC33100H" , "ALL" },
00107 { "WDC AC31600H" , "ALL" },
00108 { "WDC AC32100H" , "24.09P07" },
00109 { "WDC AC23200L" , "21.10N21" },
00110 { "Compaq CRD-8241B" , "ALL" },
00111 { "CRD-8400B" , "ALL" },
00112 { "CRD-8480B", "ALL" },
00113 { "CRD-8482B", "ALL" },
00114 { "CRD-84" , "ALL" },
00115 { "SanDisk SDP3B" , "ALL" },
00116 { "SanDisk SDP3B-64" , "ALL" },
00117 { "SANYO CD-ROM CRD" , "ALL" },
00118 { "HITACHI CDR-8" , "ALL" },
00119 { "HITACHI CDR-8335" , "ALL" },
00120 { "HITACHI CDR-8435" , "ALL" },
00121 { "Toshiba CD-ROM XM-6202B" , "ALL" },
00122 { "CD-532E-A" , "ALL" },
00123 { "E-IDE CD-ROM CR-840", "ALL" },
00124 { "CD-ROM Drive/F5A", "ALL" },
00125 { "WPI CDD-820", "ALL" },
00126 { "SAMSUNG CD-ROM SC-148C", "ALL" },
00127 { "SAMSUNG CD-ROM SC", "ALL" },
00128 { "SanDisk SDP3B-64" , "ALL" },
00129 { "ATAPI CD-ROM DRIVE 40X MAXIMUM", "ALL" },
00130
00131 { "CF 16GB", "20070131" },
00132 { "CF2GHS", "20070504" },
00133 { "TRANSCEND", "20070831" },
00134
00135 { "", "ALL" },
00136 { NULL , NULL }
00137
00138 };
00139
00149 int ide_in_drive_list(struct hd_driveid *id, const struct drive_list_entry *drive_table)
00150 {
00151 for ( ; drive_table->id_model ; drive_table++)
00152 if ((!strcmp(drive_table->id_model, id->model)) &&
00153 ((strstr(id->fw_rev, drive_table->id_firmware)) ||
00154 (!strcmp(drive_table->id_firmware, "ALL"))))
00155 return 1;
00156 return 0;
00157 }
00158
00167 ide_startstop_t ide_dma_intr (ide_drive_t *drive)
00168 {
00169 u8 stat = 0, dma_stat = 0;
00170
00171 dma_stat = HWIF(drive)->ide_dma_end(drive);
00172 stat = HWIF(drive)->INB(IDE_STATUS_REG);
00173 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
00174 if (!dma_stat) {
00175 struct request *rq = HWGROUP(drive)->rq;
00176
00177 if (rq->rq_disk) {
00178 ide_driver_t *drv;
00179
00180 drv = *(ide_driver_t **)rq->rq_disk->private_data;
00181 drv->end_request(drive, 1, rq->nr_sectors);
00182 } else
00183 ide_end_request(drive, 1, rq->nr_sectors);
00184 return ide_stopped;
00185 }
00186 printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n",
00187 drive->name, dma_stat);
00188 }
00189 return ide_error(drive, "dma_intr", stat);
00190 }
00191
00192 EXPORT_SYMBOL_GPL(ide_dma_intr);
00193
00194 #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
00195
00206 int ide_build_sglist(ide_drive_t *drive, struct request *rq)
00207 {
00208 ide_hwif_t *hwif = HWIF(drive);
00209 struct scatterlist *sg = hwif->sg_table;
00210
00211 BUG_ON((rq->cmd_type == REQ_TYPE_ATA_TASKFILE) && rq->nr_sectors > 256);
00212
00213 ide_map_sg(drive, rq);
00214
00215 if (rq_data_dir(rq) == READ)
00216 hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
00217 else
00218 hwif->sg_dma_direction = PCI_DMA_TODEVICE;
00219
00220 return pci_map_sg(hwif->pci_dev, sg, hwif->sg_nents, hwif->sg_dma_direction);
00221 }
00222
00223 EXPORT_SYMBOL_GPL(ide_build_sglist);
00224
00239 int ide_build_dmatable (ide_drive_t *drive, struct request *rq)
00240 {
00241 ide_hwif_t *hwif = HWIF(drive);
00242 unsigned int *table = hwif->dmatable_cpu;
00243 unsigned int is_trm290 = (hwif->chipset == ide_trm290) ? 1 : 0;
00244 unsigned int count = 0;
00245 int i;
00246 struct scatterlist *sg;
00247
00248 hwif->sg_nents = i = ide_build_sglist(drive, rq);
00249
00250 if (!i)
00251 return 0;
00252
00253 sg = hwif->sg_table;
00254 while (i) {
00255 u32 cur_addr;
00256 u32 cur_len;
00257
00258 cur_addr = sg_dma_address(sg);
00259 cur_len = sg_dma_len(sg);
00260
00261
00262
00263
00264
00265
00266
00267 while (cur_len) {
00268 if (count++ >= PRD_ENTRIES) {
00269 printk(KERN_ERR "%s: DMA table too small\n", drive->name);
00270 goto use_pio_instead;
00271 } else {
00272 u32 xcount, bcount = 0x10000 - (cur_addr & 0xffff);
00273
00274 if (bcount > cur_len)
00275 bcount = cur_len;
00276 *table++ = cpu_to_le32(cur_addr);
00277 xcount = bcount & 0xffff;
00278 if (is_trm290)
00279 xcount = ((xcount >> 2) - 1) << 16;
00280 if (xcount == 0x0000) {
00281
00282
00283
00284
00285
00286 if (count++ >= PRD_ENTRIES) {
00287 printk(KERN_ERR "%s: DMA table too small\n", drive->name);
00288 goto use_pio_instead;
00289 }
00290 *table++ = cpu_to_le32(0x8000);
00291 *table++ = cpu_to_le32(cur_addr + 0x8000);
00292 xcount = 0x8000;
00293 }
00294 *table++ = cpu_to_le32(xcount);
00295 cur_addr += bcount;
00296 cur_len -= bcount;
00297 }
00298 }
00299
00300 sg++;
00301 i--;
00302 }
00303
00304 if (count) {
00305 if (!is_trm290)
00306 *--table |= cpu_to_le32(0x80000000);
00307 return count;
00308 }
00309 printk(KERN_ERR "%s: empty DMA table?\n", drive->name);
00310 use_pio_instead:
00311 pci_unmap_sg(hwif->pci_dev,
00312 hwif->sg_table,
00313 hwif->sg_nents,
00314 hwif->sg_dma_direction);
00315 return 0;
00316 }
00317
00318 EXPORT_SYMBOL_GPL(ide_build_dmatable);
00319
00331 void ide_destroy_dmatable (ide_drive_t *drive)
00332 {
00333 struct pci_dev *dev = HWIF(drive)->pci_dev;
00334 struct scatterlist *sg = HWIF(drive)->sg_table;
00335 int nents = HWIF(drive)->sg_nents;
00336
00337 pci_unmap_sg(dev, sg, nents, HWIF(drive)->sg_dma_direction);
00338 }
00339
00340 EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
00341
00353 static int config_drive_for_dma (ide_drive_t *drive)
00354 {
00355 struct hd_driveid *id = drive->id;
00356 ide_hwif_t *hwif = HWIF(drive);
00357
00358 if ((id->capability & 1) && hwif->autodma) {
00359
00360
00361
00362
00363 if ((id->field_valid & 4) && ((id->dma_ultra >> 8) & 0x7f))
00364 return hwif->ide_dma_on(drive);
00365
00366
00367
00368
00369 if (id->field_valid & 2)
00370 if ((id->dma_mword & 0x404) == 0x404 ||
00371 (id->dma_1word & 0x404) == 0x404)
00372 return hwif->ide_dma_on(drive);
00373
00374
00375 if (__ide_dma_good_drive(drive))
00376 return hwif->ide_dma_on(drive);
00377 }
00378
00379 return hwif->ide_dma_off_quietly(drive);
00380 }
00381
00396 static int dma_timer_expiry (ide_drive_t *drive)
00397 {
00398 ide_hwif_t *hwif = HWIF(drive);
00399 u8 dma_stat = hwif->INB(hwif->dma_status);
00400
00401 printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n",
00402 drive->name, dma_stat);
00403
00404 if ((dma_stat & 0x18) == 0x18)
00405 return WAIT_CMD;
00406
00407 HWGROUP(drive)->expiry = NULL;
00408
00409
00410 if (dma_stat & 2)
00411 return -1;
00412
00413 if (dma_stat & 1)
00414 return WAIT_CMD;
00415
00416 if (dma_stat & 4)
00417 return WAIT_CMD;
00418
00419 return 0;
00420 }
00421
00430 int __ide_dma_host_off (ide_drive_t *drive)
00431 {
00432 ide_hwif_t *hwif = HWIF(drive);
00433 u8 unit = (drive->select.b.unit & 0x01);
00434 u8 dma_stat = hwif->INB(hwif->dma_status);
00435
00436 hwif->OUTB((dma_stat & ~(1<<(5+unit))), hwif->dma_status);
00437 return 0;
00438 }
00439
00440 EXPORT_SYMBOL(__ide_dma_host_off);
00441
00449 int __ide_dma_off_quietly (ide_drive_t *drive)
00450 {
00451 drive->using_dma = 0;
00452 ide_toggle_bounce(drive, 0);
00453
00454 if (HWIF(drive)->ide_dma_host_off(drive))
00455 return 1;
00456
00457 return 0;
00458 }
00459
00460 EXPORT_SYMBOL(__ide_dma_off_quietly);
00461 #endif
00462
00471 int __ide_dma_off (ide_drive_t *drive)
00472 {
00473 printk(KERN_INFO "%s: DMA disabled\n", drive->name);
00474 return HWIF(drive)->ide_dma_off_quietly(drive);
00475 }
00476
00477 EXPORT_SYMBOL(__ide_dma_off);
00478
00479 #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
00480
00488 int __ide_dma_host_on (ide_drive_t *drive)
00489 {
00490 if (drive->using_dma) {
00491 ide_hwif_t *hwif = HWIF(drive);
00492 u8 unit = (drive->select.b.unit & 0x01);
00493 u8 dma_stat = hwif->INB(hwif->dma_status);
00494
00495 hwif->OUTB((dma_stat|(1<<(5+unit))), hwif->dma_status);
00496 return 0;
00497 }
00498 return 1;
00499 }
00500
00501 EXPORT_SYMBOL(__ide_dma_host_on);
00502
00510 int __ide_dma_on (ide_drive_t *drive)
00511 {
00512
00513 if (__ide_dma_bad_drive(drive))
00514 return 1;
00515
00516 drive->using_dma = 1;
00517 ide_toggle_bounce(drive, 1);
00518
00519 if (HWIF(drive)->ide_dma_host_on(drive))
00520 return 1;
00521
00522 return 0;
00523 }
00524
00525 EXPORT_SYMBOL(__ide_dma_on);
00526
00534 int __ide_dma_check (ide_drive_t *drive)
00535 {
00536 return config_drive_for_dma(drive);
00537 }
00538
00539 EXPORT_SYMBOL(__ide_dma_check);
00540
00554 int ide_dma_setup(ide_drive_t *drive)
00555 {
00556 ide_hwif_t *hwif = drive->hwif;
00557 struct request *rq = HWGROUP(drive)->rq;
00558 unsigned int reading;
00559 u8 dma_stat;
00560
00561 if (rq_data_dir(rq))
00562 reading = 0;
00563 else
00564 reading = 1 << 3;
00565
00566
00567 if (!ide_build_dmatable(drive, rq)) {
00568 ide_map_sg(drive, rq);
00569 return 1;
00570 }
00571
00572
00573 hwif->OUTL(hwif->dmatable_dma, hwif->dma_prdtable);
00574
00575
00576 hwif->OUTB(reading, hwif->dma_command);
00577
00578
00579 dma_stat = hwif->INB(hwif->dma_status);
00580
00581
00582 hwif->OUTB(dma_stat|6, hwif->dma_status);
00583 drive->waiting_for_dma = 1;
00584 return 0;
00585 }
00586
00587 EXPORT_SYMBOL_GPL(ide_dma_setup);
00588
00589 static void ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
00590 {
00591
00592 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry);
00593 }
00594
00595 void ide_dma_start(ide_drive_t *drive)
00596 {
00597 ide_hwif_t *hwif = HWIF(drive);
00598 u8 dma_cmd = hwif->INB(hwif->dma_command);
00599
00600
00601
00602
00603
00604
00605
00606 hwif->OUTB(dma_cmd|1, hwif->dma_command);
00607 hwif->dma = 1;
00608 wmb();
00609 }
00610
00611 EXPORT_SYMBOL_GPL(ide_dma_start);
00612
00613
00614 int __ide_dma_end (ide_drive_t *drive)
00615 {
00616 ide_hwif_t *hwif = HWIF(drive);
00617 u8 dma_stat = 0, dma_cmd = 0;
00618
00619 drive->waiting_for_dma = 0;
00620
00621 dma_cmd = hwif->INB(hwif->dma_command);
00622
00623 hwif->OUTB(dma_cmd&~1, hwif->dma_command);
00624
00625 dma_stat = hwif->INB(hwif->dma_status);
00626
00627 hwif->OUTB(dma_stat|6, hwif->dma_status);
00628
00629 ide_destroy_dmatable(drive);
00630
00631 hwif->dma = 0;
00632 wmb();
00633 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
00634 }
00635
00636 EXPORT_SYMBOL(__ide_dma_end);
00637
00638
00639 static int __ide_dma_test_irq(ide_drive_t *drive)
00640 {
00641 ide_hwif_t *hwif = HWIF(drive);
00642 u8 dma_stat = hwif->INB(hwif->dma_status);
00643
00644 #if 0
00645 if (dma_stat & 4) {
00646 u8 stat = hwif->INB(IDE_STATUS_REG);
00647 hwif->OUTB(hwif->dma_status, dma_stat & 0xE4);
00648 }
00649 #endif
00650
00651 if ((dma_stat & 4) == 4)
00652 return 1;
00653 if (!drive->waiting_for_dma)
00654 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
00655 drive->name, __FUNCTION__);
00656 return 0;
00657 }
00658 #endif
00659
00660 int __ide_dma_bad_drive (ide_drive_t *drive)
00661 {
00662 struct hd_driveid *id = drive->id;
00663
00664 int blacklist = ide_in_drive_list(id, drive_blacklist);
00665 if (blacklist) {
00666 printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
00667 drive->name, id->model);
00668 return blacklist;
00669 }
00670 return 0;
00671 }
00672
00673 EXPORT_SYMBOL(__ide_dma_bad_drive);
00674
00675 int __ide_dma_good_drive (ide_drive_t *drive)
00676 {
00677 struct hd_driveid *id = drive->id;
00678 return ide_in_drive_list(id, drive_whitelist);
00679 }
00680
00681 EXPORT_SYMBOL(__ide_dma_good_drive);
00682
00683 int ide_use_dma(ide_drive_t *drive)
00684 {
00685 struct hd_driveid *id = drive->id;
00686 ide_hwif_t *hwif = drive->hwif;
00687
00688
00689 if (__ide_dma_bad_drive(drive))
00690 return 0;
00691
00692
00693 if (id->field_valid & 4) {
00694 if (hwif->ultra_mask & id->dma_ultra)
00695 return 1;
00696 }
00697
00698
00699 if (id->field_valid & 2) {
00700 if (hwif->mwdma_mask & id->dma_mword)
00701 return 1;
00702 if (hwif->swdma_mask & id->dma_1word)
00703 return 1;
00704 }
00705
00706
00707 if (__ide_dma_good_drive(drive) && id->eide_dma_time < 150)
00708 return 1;
00709
00710 return 0;
00711 }
00712
00713 EXPORT_SYMBOL_GPL(ide_use_dma);
00714
00715 void ide_dma_verbose(ide_drive_t *drive)
00716 {
00717 struct hd_driveid *id = drive->id;
00718 ide_hwif_t *hwif = HWIF(drive);
00719
00720 if (id->field_valid & 4) {
00721 if ((id->dma_ultra >> 8) && (id->dma_mword >> 8))
00722 goto bug_dma_off;
00723 if (id->dma_ultra & ((id->dma_ultra >> 8) & hwif->ultra_mask)) {
00724 if (((id->dma_ultra >> 11) & 0x1F) &&
00725 eighty_ninty_three(drive)) {
00726 if ((id->dma_ultra >> 15) & 1) {
00727 printk(", UDMA(mode 7)");
00728 } else if ((id->dma_ultra >> 14) & 1) {
00729 printk(", UDMA(133)");
00730 } else if ((id->dma_ultra >> 13) & 1) {
00731 printk(", UDMA(100)");
00732 } else if ((id->dma_ultra >> 12) & 1) {
00733 printk(", UDMA(66)");
00734 } else if ((id->dma_ultra >> 11) & 1) {
00735 printk(", UDMA(44)");
00736 } else
00737 goto mode_two;
00738 } else {
00739 mode_two:
00740 if ((id->dma_ultra >> 10) & 1) {
00741 printk(", UDMA(33)");
00742 } else if ((id->dma_ultra >> 9) & 1) {
00743 printk(", UDMA(25)");
00744 } else if ((id->dma_ultra >> 8) & 1) {
00745 printk(", UDMA(16)");
00746 }
00747 }
00748 } else {
00749 printk(", (U)DMA");
00750 }
00751 } else if (id->field_valid & 2) {
00752 if ((id->dma_mword >> 8) && (id->dma_1word >> 8))
00753 goto bug_dma_off;
00754 printk(", DMA");
00755 } else if (id->field_valid & 1) {
00756 goto bug_dma_off;
00757 }
00758 return;
00759 bug_dma_off:
00760 printk(", BUG DMA OFF");
00761 hwif->ide_dma_off_quietly(drive);
00762 return;
00763 }
00764
00765 EXPORT_SYMBOL(ide_dma_verbose);
00766
00767 #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
00768 int __ide_dma_lostirq (ide_drive_t *drive)
00769 {
00770 printk("%s: DMA interrupt recovery\n", drive->name);
00771 return 1;
00772 }
00773
00774 EXPORT_SYMBOL(__ide_dma_lostirq);
00775
00776 int __ide_dma_timeout (ide_drive_t *drive)
00777 {
00778 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
00779 if (HWIF(drive)->ide_dma_test_irq(drive))
00780 return 0;
00781
00782 return HWIF(drive)->ide_dma_end(drive);
00783 }
00784
00785 EXPORT_SYMBOL(__ide_dma_timeout);
00786
00787
00788
00789
00790 static int ide_release_dma_engine(ide_hwif_t *hwif)
00791 {
00792 if (hwif->dmatable_cpu) {
00793 pci_free_consistent(hwif->pci_dev,
00794 PRD_ENTRIES * PRD_BYTES,
00795 hwif->dmatable_cpu,
00796 hwif->dmatable_dma);
00797 hwif->dmatable_cpu = NULL;
00798 }
00799 return 1;
00800 }
00801
00802 static int ide_release_iomio_dma(ide_hwif_t *hwif)
00803 {
00804 release_region(hwif->dma_base, 8);
00805 if (hwif->extra_ports)
00806 release_region(hwif->extra_base, hwif->extra_ports);
00807 return 1;
00808 }
00809
00810
00811
00812
00813 int ide_release_dma(ide_hwif_t *hwif)
00814 {
00815 ide_release_dma_engine(hwif);
00816
00817 if (hwif->mmio == 2)
00818 return 1;
00819 else
00820 return ide_release_iomio_dma(hwif);
00821 }
00822
00823 static int ide_allocate_dma_engine(ide_hwif_t *hwif)
00824 {
00825 hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev,
00826 PRD_ENTRIES * PRD_BYTES,
00827 &hwif->dmatable_dma);
00828
00829 if (hwif->dmatable_cpu)
00830 return 0;
00831
00832 printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n",
00833 hwif->cds->name);
00834
00835 return 1;
00836 }
00837
00838 static int ide_mapped_mmio_dma(ide_hwif_t *hwif, unsigned long base, unsigned int ports)
00839 {
00840 printk(KERN_INFO " %s: MMIO-DMA ", hwif->name);
00841
00842 hwif->dma_base = base;
00843
00844 if(hwif->mate)
00845 hwif->dma_master = (hwif->channel) ? hwif->mate->dma_base : base;
00846 else
00847 hwif->dma_master = base;
00848 return 0;
00849 }
00850
00851 static int ide_iomio_dma(ide_hwif_t *hwif, unsigned long base, unsigned int ports)
00852 {
00853 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx",
00854 hwif->name, base, base + ports - 1);
00855
00856 if (!request_region(base, ports, hwif->name)) {
00857 printk(" -- Error, ports in use.\n");
00858 return 1;
00859 }
00860
00861 hwif->dma_base = base;
00862
00863 if (hwif->cds->extra) {
00864 hwif->extra_base = base + (hwif->channel ? 8 : 16);
00865
00866 if (!hwif->mate || !hwif->mate->extra_ports) {
00867 if (!request_region(hwif->extra_base,
00868 hwif->cds->extra, hwif->cds->name)) {
00869 printk(" -- Error, extra ports in use.\n");
00870 release_region(base, ports);
00871 return 1;
00872 }
00873 hwif->extra_ports = hwif->cds->extra;
00874 }
00875 }
00876
00877 if(hwif->mate)
00878 hwif->dma_master = (hwif->channel) ? hwif->mate->dma_base:base;
00879 else
00880 hwif->dma_master = base;
00881 return 0;
00882 }
00883
00884 static int ide_dma_iobase(ide_hwif_t *hwif, unsigned long base, unsigned int ports)
00885 {
00886 if (hwif->mmio == 2)
00887 return ide_mapped_mmio_dma(hwif, base,ports);
00888 BUG_ON(hwif->mmio == 1);
00889 return ide_iomio_dma(hwif, base, ports);
00890 }
00891
00892
00893
00894
00895 void ide_setup_dma (ide_hwif_t *hwif, unsigned long dma_base, unsigned int num_ports)
00896 {
00897 if (ide_dma_iobase(hwif, dma_base, num_ports))
00898 return;
00899
00900 if (ide_allocate_dma_engine(hwif)) {
00901 ide_release_dma(hwif);
00902 return;
00903 }
00904
00905 if (!(hwif->dma_command))
00906 hwif->dma_command = hwif->dma_base;
00907 if (!(hwif->dma_vendor1))
00908 hwif->dma_vendor1 = (hwif->dma_base + 1);
00909 if (!(hwif->dma_status))
00910 hwif->dma_status = (hwif->dma_base + 2);
00911 if (!(hwif->dma_vendor3))
00912 hwif->dma_vendor3 = (hwif->dma_base + 3);
00913 if (!(hwif->dma_prdtable))
00914 hwif->dma_prdtable = (hwif->dma_base + 4);
00915
00916 if (!hwif->ide_dma_off_quietly)
00917 hwif->ide_dma_off_quietly = &__ide_dma_off_quietly;
00918 if (!hwif->ide_dma_host_off)
00919 hwif->ide_dma_host_off = &__ide_dma_host_off;
00920 if (!hwif->ide_dma_on)
00921 hwif->ide_dma_on = &__ide_dma_on;
00922 if (!hwif->ide_dma_host_on)
00923 hwif->ide_dma_host_on = &__ide_dma_host_on;
00924 if (!hwif->ide_dma_check)
00925 hwif->ide_dma_check = &__ide_dma_check;
00926 if (!hwif->dma_setup)
00927 hwif->dma_setup = &ide_dma_setup;
00928 if (!hwif->dma_exec_cmd)
00929 hwif->dma_exec_cmd = &ide_dma_exec_cmd;
00930 if (!hwif->dma_start)
00931 hwif->dma_start = &ide_dma_start;
00932 if (!hwif->ide_dma_end)
00933 hwif->ide_dma_end = &__ide_dma_end;
00934 if (!hwif->ide_dma_test_irq)
00935 hwif->ide_dma_test_irq = &__ide_dma_test_irq;
00936 if (!hwif->ide_dma_timeout)
00937 hwif->ide_dma_timeout = &__ide_dma_timeout;
00938 if (!hwif->ide_dma_lostirq)
00939 hwif->ide_dma_lostirq = &__ide_dma_lostirq;
00940
00941 if (hwif->chipset != ide_trm290) {
00942 u8 dma_stat = hwif->INB(hwif->dma_status);
00943 printk(", BIOS settings: %s:%s, %s:%s",
00944 hwif->drives[0].name, (dma_stat & 0x20) ? "DMA" : "pio",
00945 hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "pio");
00946 }
00947 printk("\n");
00948
00949 BUG_ON(!hwif->dma_master);
00950 }
00951
00952 EXPORT_SYMBOL_GPL(ide_setup_dma);
00953 #endif