linit.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703
  1. /*
  2. * Adaptec AAC series RAID controller driver
  3. * (c) Copyright 2001 Red Hat Inc.
  4. *
  5. * based on the old aacraid driver that is..
  6. * Adaptec aacraid device driver for Linux.
  7. *
  8. * Copyright (c) 2000-2010 Adaptec, Inc.
  9. * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; see the file COPYING. If not, write to
  23. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  24. *
  25. * Module Name:
  26. * linit.c
  27. *
  28. * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
  29. */
  30. #include <linux/compat.h>
  31. #include <linux/blkdev.h>
  32. #include <linux/completion.h>
  33. #include <linux/init.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/kernel.h>
  36. #include <linux/module.h>
  37. #include <linux/moduleparam.h>
  38. #include <linux/pci.h>
  39. #include <linux/aer.h>
  40. #include <linux/pci-aspm.h>
  41. #include <linux/slab.h>
  42. #include <linux/mutex.h>
  43. #include <linux/spinlock.h>
  44. #include <linux/syscalls.h>
  45. #include <linux/delay.h>
  46. #include <linux/kthread.h>
  47. #include <scsi/scsi.h>
  48. #include <scsi/scsi_cmnd.h>
  49. #include <scsi/scsi_device.h>
  50. #include <scsi/scsi_host.h>
  51. #include <scsi/scsi_tcq.h>
  52. #include <scsi/scsicam.h>
  53. #include <scsi/scsi_eh.h>
  54. #include "aacraid.h"
  55. #define AAC_DRIVER_VERSION "1.2-1"
  56. #ifndef AAC_DRIVER_BRANCH
  57. #define AAC_DRIVER_BRANCH ""
  58. #endif
  59. #define AAC_DRIVERNAME "aacraid"
  60. #ifdef AAC_DRIVER_BUILD
  61. #define _str(x) #x
  62. #define str(x) _str(x)
  63. #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
  64. #else
  65. #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH
  66. #endif
  67. MODULE_AUTHOR("Red Hat Inc and Adaptec");
  68. MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
  69. "Adaptec Advanced Raid Products, "
  70. "HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver");
  71. MODULE_LICENSE("GPL");
  72. MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
  73. static DEFINE_MUTEX(aac_mutex);
  74. static LIST_HEAD(aac_devices);
  75. static int aac_cfg_major = AAC_CHARDEV_UNREGISTERED;
  76. char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
  77. /*
  78. * Because of the way Linux names scsi devices, the order in this table has
  79. * become important. Check for on-board Raid first, add-in cards second.
  80. *
  81. * Note: The last field is used to index into aac_drivers below.
  82. */
  83. static const struct pci_device_id aac_pci_tbl[] = {
  84. { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
  85. { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
  86. { 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
  87. { 0x1028, 0x0004, 0x1028, 0x00d0, 0, 0, 3 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
  88. { 0x1028, 0x0002, 0x1028, 0x00d1, 0, 0, 4 }, /* PERC 3/Di (Viper/PERC3DiV) */
  89. { 0x1028, 0x0002, 0x1028, 0x00d9, 0, 0, 5 }, /* PERC 3/Di (Lexus/PERC3DiL) */
  90. { 0x1028, 0x000a, 0x1028, 0x0106, 0, 0, 6 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
  91. { 0x1028, 0x000a, 0x1028, 0x011b, 0, 0, 7 }, /* PERC 3/Di (Dagger/PERC3DiD) */
  92. { 0x1028, 0x000a, 0x1028, 0x0121, 0, 0, 8 }, /* PERC 3/Di (Boxster/PERC3DiB) */
  93. { 0x9005, 0x0283, 0x9005, 0x0283, 0, 0, 9 }, /* catapult */
  94. { 0x9005, 0x0284, 0x9005, 0x0284, 0, 0, 10 }, /* tomcat */
  95. { 0x9005, 0x0285, 0x9005, 0x0286, 0, 0, 11 }, /* Adaptec 2120S (Crusader) */
  96. { 0x9005, 0x0285, 0x9005, 0x0285, 0, 0, 12 }, /* Adaptec 2200S (Vulcan) */
  97. { 0x9005, 0x0285, 0x9005, 0x0287, 0, 0, 13 }, /* Adaptec 2200S (Vulcan-2m) */
  98. { 0x9005, 0x0285, 0x17aa, 0x0286, 0, 0, 14 }, /* Legend S220 (Legend Crusader) */
  99. { 0x9005, 0x0285, 0x17aa, 0x0287, 0, 0, 15 }, /* Legend S230 (Legend Vulcan) */
  100. { 0x9005, 0x0285, 0x9005, 0x0288, 0, 0, 16 }, /* Adaptec 3230S (Harrier) */
  101. { 0x9005, 0x0285, 0x9005, 0x0289, 0, 0, 17 }, /* Adaptec 3240S (Tornado) */
  102. { 0x9005, 0x0285, 0x9005, 0x028a, 0, 0, 18 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
  103. { 0x9005, 0x0285, 0x9005, 0x028b, 0, 0, 19 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
  104. { 0x9005, 0x0286, 0x9005, 0x028c, 0, 0, 20 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
  105. { 0x9005, 0x0286, 0x9005, 0x028d, 0, 0, 21 }, /* ASR-2130S (Lancer) */
  106. { 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */
  107. { 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */
  108. { 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */
  109. { 0x9005, 0x0286, 0x9005, 0x029e, 0, 0, 25 }, /* ICP9024RO (Lancer) */
  110. { 0x9005, 0x0286, 0x9005, 0x029f, 0, 0, 26 }, /* ICP9014RO (Lancer) */
  111. { 0x9005, 0x0286, 0x9005, 0x02a0, 0, 0, 27 }, /* ICP9047MA (Lancer) */
  112. { 0x9005, 0x0286, 0x9005, 0x02a1, 0, 0, 28 }, /* ICP9087MA (Lancer) */
  113. { 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5445AU (Hurricane44) */
  114. { 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */
  115. { 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */
  116. { 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */
  117. { 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */
  118. { 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */
  119. { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */
  120. { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
  121. { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
  122. { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
  123. { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
  124. { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
  125. { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
  126. { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
  127. { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */
  128. { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */
  129. { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005 */
  130. { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */
  131. { 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */
  132. { 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */
  133. { 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */
  134. { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000 (BlackBird) */
  135. { 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */
  136. { 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */
  137. { 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-3800 (Hurricane44) */
  138. { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/
  139. { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/
  140. { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/
  141. { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */
  142. { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */
  143. { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */
  144. { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
  145. { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
  146. { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
  147. { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
  148. { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
  149. { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
  150. { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
  151. { 0,}
  152. };
  153. MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
  154. /*
  155. * dmb - For now we add the number of channels to this structure.
  156. * In the future we should add a fib that reports the number of channels
  157. * for the card. At that time we can remove the channels from here
  158. */
  159. static struct aac_driver_ident aac_drivers[] = {
  160. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */
  161. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */
  162. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */
  163. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
  164. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */
  165. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */
  166. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
  167. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */
  168. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
  169. { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
  170. { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
  171. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
  172. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
  173. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
  174. { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
  175. { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
  176. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */
  177. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */
  178. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020ZCR ", 2 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
  179. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025ZCR ", 2 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
  180. { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2230S PCI-X ", 2 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
  181. { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2130S PCI-X ", 1 }, /* ASR-2130S (Lancer) */
  182. { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2820SA ", 1 }, /* AAR-2820SA (Intruder) */
  183. { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2620SA ", 1 }, /* AAR-2620SA (Intruder) */
  184. { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2420SA ", 1 }, /* AAR-2420SA (Intruder) */
  185. { aac_rkt_init, "aacraid", "ICP ", "ICP9024RO ", 2 }, /* ICP9024RO (Lancer) */
  186. { aac_rkt_init, "aacraid", "ICP ", "ICP9014RO ", 1 }, /* ICP9014RO (Lancer) */
  187. { aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */
  188. { aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */
  189. { aac_rkt_init, "aacraid", "ICP ", "ICP5445AU ", 1 }, /* ICP5445AU (Hurricane44) */
  190. { aac_rx_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */
  191. { aac_rx_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */
  192. { aac_rkt_init, "aacraid", "ICP ", "ICP9067MA ", 1 }, /* ICP9067MA (Intruder-6) */
  193. { NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */
  194. { aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
  195. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
  196. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025SA ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
  197. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2410SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
  198. { aac_rx_init, "aacraid", "DELL ", "CERC SR2 ", 1, AAC_QUIRK_17SG }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
  199. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2810SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
  200. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-21610SA SATA", 1, AAC_QUIRK_17SG }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
  201. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2026ZCR ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
  202. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2610SA ", 1 }, /* SATA 6Ch (Bearcat) */
  203. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */
  204. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005 ", 1 }, /* ASR-4005 */
  205. { aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */
  206. { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */
  207. { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */
  208. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000 ", 1 }, /* ASR-4000 (BlackBird & AvonPark) */
  209. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */
  210. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */
  211. { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-3800 ", 1 }, /* ASR-3800 (Hurricane44) */
  212. { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
  213. { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
  214. { aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
  215. { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
  216. { aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
  217. { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */
  218. { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
  219. { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
  220. { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
  221. { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
  222. { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
  223. { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
  224. { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
  225. };
  226. /**
  227. * aac_queuecommand - queue a SCSI command
  228. * @cmd: SCSI command to queue
  229. * @done: Function to call on command completion
  230. *
  231. * Queues a command for execution by the associated Host Adapter.
  232. *
  233. * TODO: unify with aac_scsi_cmd().
  234. */
  235. static int aac_queuecommand(struct Scsi_Host *shost,
  236. struct scsi_cmnd *cmd)
  237. {
  238. int r = 0;
  239. cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
  240. r = (aac_scsi_cmd(cmd) ? FAILED : 0);
  241. return r;
  242. }
  243. /**
  244. * aac_info - Returns the host adapter name
  245. * @shost: Scsi host to report on
  246. *
  247. * Returns a static string describing the device in question
  248. */
  249. static const char *aac_info(struct Scsi_Host *shost)
  250. {
  251. struct aac_dev *dev = (struct aac_dev *)shost->hostdata;
  252. return aac_drivers[dev->cardtype].name;
  253. }
  254. /**
  255. * aac_get_driver_ident
  256. * @devtype: index into lookup table
  257. *
  258. * Returns a pointer to the entry in the driver lookup table.
  259. */
  260. struct aac_driver_ident* aac_get_driver_ident(int devtype)
  261. {
  262. return &aac_drivers[devtype];
  263. }
  264. /**
  265. * aac_biosparm - return BIOS parameters for disk
  266. * @sdev: The scsi device corresponding to the disk
  267. * @bdev: the block device corresponding to the disk
  268. * @capacity: the sector capacity of the disk
  269. * @geom: geometry block to fill in
  270. *
  271. * Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
  272. * The default disk geometry is 64 heads, 32 sectors, and the appropriate
  273. * number of cylinders so as not to exceed drive capacity. In order for
  274. * disks equal to or larger than 1 GB to be addressable by the BIOS
  275. * without exceeding the BIOS limitation of 1024 cylinders, Extended
  276. * Translation should be enabled. With Extended Translation enabled,
  277. * drives between 1 GB inclusive and 2 GB exclusive are given a disk
  278. * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
  279. * are given a disk geometry of 255 heads and 63 sectors. However, if
  280. * the BIOS detects that the Extended Translation setting does not match
  281. * the geometry in the partition table, then the translation inferred
  282. * from the partition table will be used by the BIOS, and a warning may
  283. * be displayed.
  284. */
  285. static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
  286. sector_t capacity, int *geom)
  287. {
  288. struct diskparm *param = (struct diskparm *)geom;
  289. unsigned char *buf;
  290. dprintk((KERN_DEBUG "aac_biosparm.\n"));
  291. /*
  292. * Assuming extended translation is enabled - #REVISIT#
  293. */
  294. if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */
  295. if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */
  296. param->heads = 255;
  297. param->sectors = 63;
  298. } else {
  299. param->heads = 128;
  300. param->sectors = 32;
  301. }
  302. } else {
  303. param->heads = 64;
  304. param->sectors = 32;
  305. }
  306. param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
  307. /*
  308. * Read the first 1024 bytes from the disk device, if the boot
  309. * sector partition table is valid, search for a partition table
  310. * entry whose end_head matches one of the standard geometry
  311. * translations ( 64/32, 128/32, 255/63 ).
  312. */
  313. buf = scsi_bios_ptable(bdev);
  314. if (!buf)
  315. return 0;
  316. if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) {
  317. struct partition *first = (struct partition * )buf;
  318. struct partition *entry = first;
  319. int saved_cylinders = param->cylinders;
  320. int num;
  321. unsigned char end_head, end_sec;
  322. for(num = 0; num < 4; num++) {
  323. end_head = entry->end_head;
  324. end_sec = entry->end_sector & 0x3f;
  325. if(end_head == 63) {
  326. param->heads = 64;
  327. param->sectors = 32;
  328. break;
  329. } else if(end_head == 127) {
  330. param->heads = 128;
  331. param->sectors = 32;
  332. break;
  333. } else if(end_head == 254) {
  334. param->heads = 255;
  335. param->sectors = 63;
  336. break;
  337. }
  338. entry++;
  339. }
  340. if (num == 4) {
  341. end_head = first->end_head;
  342. end_sec = first->end_sector & 0x3f;
  343. }
  344. param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
  345. if (num < 4 && end_sec == param->sectors) {
  346. if (param->cylinders != saved_cylinders)
  347. dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
  348. param->heads, param->sectors, num));
  349. } else if (end_head > 0 || end_sec > 0) {
  350. dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
  351. end_head + 1, end_sec, num));
  352. dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
  353. param->heads, param->sectors));
  354. }
  355. }
  356. kfree(buf);
  357. return 0;
  358. }
  359. /**
  360. * aac_slave_configure - compute queue depths
  361. * @sdev: SCSI device we are considering
  362. *
  363. * Selects queue depths for each target device based on the host adapter's
  364. * total capacity and the queue depth supported by the target device.
  365. * A queue depth of one automatically disables tagged queueing.
  366. */
  367. static int aac_slave_configure(struct scsi_device *sdev)
  368. {
  369. struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
  370. if (aac->jbod && (sdev->type == TYPE_DISK))
  371. sdev->removable = 1;
  372. if ((sdev->type == TYPE_DISK) &&
  373. (sdev_channel(sdev) != CONTAINER_CHANNEL) &&
  374. (!aac->jbod || sdev->inq_periph_qual) &&
  375. (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
  376. if (expose_physicals == 0)
  377. return -ENXIO;
  378. if (expose_physicals < 0)
  379. sdev->no_uld_attach = 1;
  380. }
  381. if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
  382. (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) &&
  383. !sdev->no_uld_attach) {
  384. struct scsi_device * dev;
  385. struct Scsi_Host *host = sdev->host;
  386. unsigned num_lsu = 0;
  387. unsigned num_one = 0;
  388. unsigned depth;
  389. unsigned cid;
  390. /*
  391. * Firmware has an individual device recovery time typically
  392. * of 35 seconds, give us a margin.
  393. */
  394. if (sdev->request_queue->rq_timeout < (45 * HZ))
  395. blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
  396. for (cid = 0; cid < aac->maximum_num_containers; ++cid)
  397. if (aac->fsa_dev[cid].valid)
  398. ++num_lsu;
  399. __shost_for_each_device(dev, host) {
  400. if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
  401. (!aac->raid_scsi_mode ||
  402. (sdev_channel(sdev) != 2)) &&
  403. !dev->no_uld_attach) {
  404. if ((sdev_channel(dev) != CONTAINER_CHANNEL)
  405. || !aac->fsa_dev[sdev_id(dev)].valid)
  406. ++num_lsu;
  407. } else
  408. ++num_one;
  409. }
  410. if (num_lsu == 0)
  411. ++num_lsu;
  412. depth = (host->can_queue - num_one) / num_lsu;
  413. if (depth > 256)
  414. depth = 256;
  415. else if (depth < 2)
  416. depth = 2;
  417. scsi_change_queue_depth(sdev, depth);
  418. } else {
  419. scsi_change_queue_depth(sdev, 1);
  420. sdev->tagged_supported = 1;
  421. }
  422. return 0;
  423. }
  424. /**
  425. * aac_change_queue_depth - alter queue depths
  426. * @sdev: SCSI device we are considering
  427. * @depth: desired queue depth
  428. *
  429. * Alters queue depths for target device based on the host adapter's
  430. * total capacity and the queue depth supported by the target device.
  431. */
  432. static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
  433. {
  434. if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
  435. (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
  436. struct scsi_device * dev;
  437. struct Scsi_Host *host = sdev->host;
  438. unsigned num = 0;
  439. __shost_for_each_device(dev, host) {
  440. if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
  441. (sdev_channel(dev) == CONTAINER_CHANNEL))
  442. ++num;
  443. ++num;
  444. }
  445. if (num >= host->can_queue)
  446. num = host->can_queue - 1;
  447. if (depth > (host->can_queue - num))
  448. depth = host->can_queue - num;
  449. if (depth > 256)
  450. depth = 256;
  451. else if (depth < 2)
  452. depth = 2;
  453. return scsi_change_queue_depth(sdev, depth);
  454. }
  455. return scsi_change_queue_depth(sdev, 1);
  456. }
  457. static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
  458. {
  459. struct scsi_device *sdev = to_scsi_device(dev);
  460. struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
  461. if (sdev_channel(sdev) != CONTAINER_CHANNEL)
  462. return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
  463. ? "Hidden\n" :
  464. ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : ""));
  465. return snprintf(buf, PAGE_SIZE, "%s\n",
  466. get_container_type(aac->fsa_dev[sdev_id(sdev)].type));
  467. }
  468. static struct device_attribute aac_raid_level_attr = {
  469. .attr = {
  470. .name = "level",
  471. .mode = S_IRUGO,
  472. },
  473. .show = aac_show_raid_level
  474. };
  475. static struct device_attribute *aac_dev_attrs[] = {
  476. &aac_raid_level_attr,
  477. NULL,
  478. };
  479. static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
  480. {
  481. struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
  482. if (!capable(CAP_SYS_RAWIO))
  483. return -EPERM;
  484. return aac_do_ioctl(dev, cmd, arg);
  485. }
  486. static int aac_eh_abort(struct scsi_cmnd* cmd)
  487. {
  488. struct scsi_device * dev = cmd->device;
  489. struct Scsi_Host * host = dev->host;
  490. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  491. int count;
  492. int ret = FAILED;
  493. printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%llu)\n",
  494. AAC_DRIVERNAME,
  495. host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun);
  496. switch (cmd->cmnd[0]) {
  497. case SERVICE_ACTION_IN_16:
  498. if (!(aac->raw_io_interface) ||
  499. !(aac->raw_io_64) ||
  500. ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
  501. break;
  502. case INQUIRY:
  503. case READ_CAPACITY:
  504. /* Mark associated FIB to not complete, eh handler does this */
  505. for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
  506. struct fib * fib = &aac->fibs[count];
  507. if (fib->hw_fib_va->header.XferState &&
  508. (fib->flags & FIB_CONTEXT_FLAG) &&
  509. (fib->callback_data == cmd)) {
  510. fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
  511. cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
  512. ret = SUCCESS;
  513. }
  514. }
  515. break;
  516. case TEST_UNIT_READY:
  517. /* Mark associated FIB to not complete, eh handler does this */
  518. for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
  519. struct scsi_cmnd * command;
  520. struct fib * fib = &aac->fibs[count];
  521. if ((fib->hw_fib_va->header.XferState & cpu_to_le32(Async | NoResponseExpected)) &&
  522. (fib->flags & FIB_CONTEXT_FLAG) &&
  523. ((command = fib->callback_data)) &&
  524. (command->device == cmd->device)) {
  525. fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
  526. command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
  527. if (command == cmd)
  528. ret = SUCCESS;
  529. }
  530. }
  531. }
  532. return ret;
  533. }
  534. /*
  535. * aac_eh_reset - Reset command handling
  536. * @scsi_cmd: SCSI command block causing the reset
  537. *
  538. */
  539. static int aac_eh_reset(struct scsi_cmnd* cmd)
  540. {
  541. struct scsi_device * dev = cmd->device;
  542. struct Scsi_Host * host = dev->host;
  543. struct scsi_cmnd * command;
  544. int count;
  545. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  546. unsigned long flags;
  547. /* Mark the associated FIB to not complete, eh handler does this */
  548. for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
  549. struct fib * fib = &aac->fibs[count];
  550. if (fib->hw_fib_va->header.XferState &&
  551. (fib->flags & FIB_CONTEXT_FLAG) &&
  552. (fib->callback_data == cmd)) {
  553. fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
  554. cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
  555. }
  556. }
  557. printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
  558. AAC_DRIVERNAME);
  559. if ((count = aac_check_health(aac)))
  560. return count;
  561. /*
  562. * Wait for all commands to complete to this specific
  563. * target (block maximum 60 seconds).
  564. */
  565. for (count = 60; count; --count) {
  566. int active = aac->in_reset;
  567. if (active == 0)
  568. __shost_for_each_device(dev, host) {
  569. spin_lock_irqsave(&dev->list_lock, flags);
  570. list_for_each_entry(command, &dev->cmd_list, list) {
  571. if ((command != cmd) &&
  572. (command->SCp.phase == AAC_OWNER_FIRMWARE)) {
  573. active++;
  574. break;
  575. }
  576. }
  577. spin_unlock_irqrestore(&dev->list_lock, flags);
  578. if (active)
  579. break;
  580. }
  581. /*
  582. * We can exit If all the commands are complete
  583. */
  584. if (active == 0)
  585. return SUCCESS;
  586. ssleep(1);
  587. }
  588. printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
  589. /*
  590. * This adapter needs a blind reset, only do so for Adapters that
  591. * support a register, instead of a commanded, reset.
  592. */
  593. if (((aac->supplement_adapter_info.SupportedOptions2 &
  594. AAC_OPTION_MU_RESET) ||
  595. (aac->supplement_adapter_info.SupportedOptions2 &
  596. AAC_OPTION_DOORBELL_RESET)) &&
  597. aac_check_reset &&
  598. ((aac_check_reset != 1) ||
  599. !(aac->supplement_adapter_info.SupportedOptions2 &
  600. AAC_OPTION_IGNORE_RESET)))
  601. aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
  602. return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
  603. }
  604. /**
  605. * aac_cfg_open - open a configuration file
  606. * @inode: inode being opened
  607. * @file: file handle attached
  608. *
  609. * Called when the configuration device is opened. Does the needed
  610. * set up on the handle and then returns
  611. *
  612. * Bugs: This needs extending to check a given adapter is present
  613. * so we can support hot plugging, and to ref count adapters.
  614. */
  615. static int aac_cfg_open(struct inode *inode, struct file *file)
  616. {
  617. struct aac_dev *aac;
  618. unsigned minor_number = iminor(inode);
  619. int err = -ENODEV;
  620. mutex_lock(&aac_mutex); /* BKL pushdown: nothing else protects this list */
  621. list_for_each_entry(aac, &aac_devices, entry) {
  622. if (aac->id == minor_number) {
  623. file->private_data = aac;
  624. err = 0;
  625. break;
  626. }
  627. }
  628. mutex_unlock(&aac_mutex);
  629. return err;
  630. }
  631. /**
  632. * aac_cfg_ioctl - AAC configuration request
  633. * @inode: inode of device
  634. * @file: file handle
  635. * @cmd: ioctl command code
  636. * @arg: argument
  637. *
  638. * Handles a configuration ioctl. Currently this involves wrapping it
  639. * up and feeding it into the nasty windowsalike glue layer.
  640. *
  641. * Bugs: Needs locking against parallel ioctls lower down
  642. * Bugs: Needs to handle hot plugging
  643. */
  644. static long aac_cfg_ioctl(struct file *file,
  645. unsigned int cmd, unsigned long arg)
  646. {
  647. struct aac_dev *aac = (struct aac_dev *)file->private_data;
  648. if (!capable(CAP_SYS_RAWIO))
  649. return -EPERM;
  650. return aac_do_ioctl(aac, cmd, (void __user *)arg);
  651. }
  652. #ifdef CONFIG_COMPAT
  653. static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
  654. {
  655. long ret;
  656. switch (cmd) {
  657. case FSACTL_MINIPORT_REV_CHECK:
  658. case FSACTL_SENDFIB:
  659. case FSACTL_OPEN_GET_ADAPTER_FIB:
  660. case FSACTL_CLOSE_GET_ADAPTER_FIB:
  661. case FSACTL_SEND_RAW_SRB:
  662. case FSACTL_GET_PCI_INFO:
  663. case FSACTL_QUERY_DISK:
  664. case FSACTL_DELETE_DISK:
  665. case FSACTL_FORCE_DELETE_DISK:
  666. case FSACTL_GET_CONTAINERS:
  667. case FSACTL_SEND_LARGE_FIB:
  668. ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
  669. break;
  670. case FSACTL_GET_NEXT_ADAPTER_FIB: {
  671. struct fib_ioctl __user *f;
  672. f = compat_alloc_user_space(sizeof(*f));
  673. ret = 0;
  674. if (clear_user(f, sizeof(*f)))
  675. ret = -EFAULT;
  676. if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
  677. ret = -EFAULT;
  678. if (!ret)
  679. ret = aac_do_ioctl(dev, cmd, f);
  680. break;
  681. }
  682. default:
  683. ret = -ENOIOCTLCMD;
  684. break;
  685. }
  686. return ret;
  687. }
  688. static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
  689. {
  690. struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
  691. if (!capable(CAP_SYS_RAWIO))
  692. return -EPERM;
  693. return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
  694. }
  695. static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
  696. {
  697. if (!capable(CAP_SYS_RAWIO))
  698. return -EPERM;
  699. return aac_compat_do_ioctl(file->private_data, cmd, arg);
  700. }
  701. #endif
  702. static ssize_t aac_show_model(struct device *device,
  703. struct device_attribute *attr, char *buf)
  704. {
  705. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  706. int len;
  707. if (dev->supplement_adapter_info.AdapterTypeText[0]) {
  708. char * cp = dev->supplement_adapter_info.AdapterTypeText;
  709. while (*cp && *cp != ' ')
  710. ++cp;
  711. while (*cp == ' ')
  712. ++cp;
  713. len = snprintf(buf, PAGE_SIZE, "%s\n", cp);
  714. } else
  715. len = snprintf(buf, PAGE_SIZE, "%s\n",
  716. aac_drivers[dev->cardtype].model);
  717. return len;
  718. }
  719. static ssize_t aac_show_vendor(struct device *device,
  720. struct device_attribute *attr, char *buf)
  721. {
  722. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  723. int len;
  724. if (dev->supplement_adapter_info.AdapterTypeText[0]) {
  725. char * cp = dev->supplement_adapter_info.AdapterTypeText;
  726. while (*cp && *cp != ' ')
  727. ++cp;
  728. len = snprintf(buf, PAGE_SIZE, "%.*s\n",
  729. (int)(cp - (char *)dev->supplement_adapter_info.AdapterTypeText),
  730. dev->supplement_adapter_info.AdapterTypeText);
  731. } else
  732. len = snprintf(buf, PAGE_SIZE, "%s\n",
  733. aac_drivers[dev->cardtype].vname);
  734. return len;
  735. }
  736. static ssize_t aac_show_flags(struct device *cdev,
  737. struct device_attribute *attr, char *buf)
  738. {
  739. int len = 0;
  740. struct aac_dev *dev = (struct aac_dev*)class_to_shost(cdev)->hostdata;
  741. if (nblank(dprintk(x)))
  742. len = snprintf(buf, PAGE_SIZE, "dprintk\n");
  743. #ifdef AAC_DETAILED_STATUS_INFO
  744. len += snprintf(buf + len, PAGE_SIZE - len,
  745. "AAC_DETAILED_STATUS_INFO\n");
  746. #endif
  747. if (dev->raw_io_interface && dev->raw_io_64)
  748. len += snprintf(buf + len, PAGE_SIZE - len,
  749. "SAI_READ_CAPACITY_16\n");
  750. if (dev->jbod)
  751. len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
  752. if (dev->supplement_adapter_info.SupportedOptions2 &
  753. AAC_OPTION_POWER_MANAGEMENT)
  754. len += snprintf(buf + len, PAGE_SIZE - len,
  755. "SUPPORTED_POWER_MANAGEMENT\n");
  756. if (dev->msi)
  757. len += snprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
  758. return len;
  759. }
  760. static ssize_t aac_show_kernel_version(struct device *device,
  761. struct device_attribute *attr,
  762. char *buf)
  763. {
  764. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  765. int len, tmp;
  766. tmp = le32_to_cpu(dev->adapter_info.kernelrev);
  767. len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
  768. tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
  769. le32_to_cpu(dev->adapter_info.kernelbuild));
  770. return len;
  771. }
  772. static ssize_t aac_show_monitor_version(struct device *device,
  773. struct device_attribute *attr,
  774. char *buf)
  775. {
  776. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  777. int len, tmp;
  778. tmp = le32_to_cpu(dev->adapter_info.monitorrev);
  779. len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
  780. tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
  781. le32_to_cpu(dev->adapter_info.monitorbuild));
  782. return len;
  783. }
  784. static ssize_t aac_show_bios_version(struct device *device,
  785. struct device_attribute *attr,
  786. char *buf)
  787. {
  788. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  789. int len, tmp;
  790. tmp = le32_to_cpu(dev->adapter_info.biosrev);
  791. len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
  792. tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
  793. le32_to_cpu(dev->adapter_info.biosbuild));
  794. return len;
  795. }
  796. static ssize_t aac_show_serial_number(struct device *device,
  797. struct device_attribute *attr, char *buf)
  798. {
  799. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  800. int len = 0;
  801. if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
  802. len = snprintf(buf, 16, "%06X\n",
  803. le32_to_cpu(dev->adapter_info.serial[0]));
  804. if (len &&
  805. !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[
  806. sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len],
  807. buf, len-1))
  808. len = snprintf(buf, 16, "%.*s\n",
  809. (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo),
  810. dev->supplement_adapter_info.MfgPcbaSerialNo);
  811. return min(len, 16);
  812. }
  813. static ssize_t aac_show_max_channel(struct device *device,
  814. struct device_attribute *attr, char *buf)
  815. {
  816. return snprintf(buf, PAGE_SIZE, "%d\n",
  817. class_to_shost(device)->max_channel);
  818. }
  819. static ssize_t aac_show_max_id(struct device *device,
  820. struct device_attribute *attr, char *buf)
  821. {
  822. return snprintf(buf, PAGE_SIZE, "%d\n",
  823. class_to_shost(device)->max_id);
  824. }
  825. static ssize_t aac_store_reset_adapter(struct device *device,
  826. struct device_attribute *attr,
  827. const char *buf, size_t count)
  828. {
  829. int retval = -EACCES;
  830. if (!capable(CAP_SYS_ADMIN))
  831. return retval;
  832. retval = aac_reset_adapter((struct aac_dev*)class_to_shost(device)->hostdata, buf[0] == '!');
  833. if (retval >= 0)
  834. retval = count;
  835. return retval;
  836. }
  837. static ssize_t aac_show_reset_adapter(struct device *device,
  838. struct device_attribute *attr,
  839. char *buf)
  840. {
  841. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  842. int len, tmp;
  843. tmp = aac_adapter_check_health(dev);
  844. if ((tmp == 0) && dev->in_reset)
  845. tmp = -EBUSY;
  846. len = snprintf(buf, PAGE_SIZE, "0x%x\n", tmp);
  847. return len;
  848. }
  849. static struct device_attribute aac_model = {
  850. .attr = {
  851. .name = "model",
  852. .mode = S_IRUGO,
  853. },
  854. .show = aac_show_model,
  855. };
  856. static struct device_attribute aac_vendor = {
  857. .attr = {
  858. .name = "vendor",
  859. .mode = S_IRUGO,
  860. },
  861. .show = aac_show_vendor,
  862. };
  863. static struct device_attribute aac_flags = {
  864. .attr = {
  865. .name = "flags",
  866. .mode = S_IRUGO,
  867. },
  868. .show = aac_show_flags,
  869. };
  870. static struct device_attribute aac_kernel_version = {
  871. .attr = {
  872. .name = "hba_kernel_version",
  873. .mode = S_IRUGO,
  874. },
  875. .show = aac_show_kernel_version,
  876. };
  877. static struct device_attribute aac_monitor_version = {
  878. .attr = {
  879. .name = "hba_monitor_version",
  880. .mode = S_IRUGO,
  881. },
  882. .show = aac_show_monitor_version,
  883. };
  884. static struct device_attribute aac_bios_version = {
  885. .attr = {
  886. .name = "hba_bios_version",
  887. .mode = S_IRUGO,
  888. },
  889. .show = aac_show_bios_version,
  890. };
  891. static struct device_attribute aac_serial_number = {
  892. .attr = {
  893. .name = "serial_number",
  894. .mode = S_IRUGO,
  895. },
  896. .show = aac_show_serial_number,
  897. };
  898. static struct device_attribute aac_max_channel = {
  899. .attr = {
  900. .name = "max_channel",
  901. .mode = S_IRUGO,
  902. },
  903. .show = aac_show_max_channel,
  904. };
  905. static struct device_attribute aac_max_id = {
  906. .attr = {
  907. .name = "max_id",
  908. .mode = S_IRUGO,
  909. },
  910. .show = aac_show_max_id,
  911. };
  912. static struct device_attribute aac_reset = {
  913. .attr = {
  914. .name = "reset_host",
  915. .mode = S_IWUSR|S_IRUGO,
  916. },
  917. .store = aac_store_reset_adapter,
  918. .show = aac_show_reset_adapter,
  919. };
  920. static struct device_attribute *aac_attrs[] = {
  921. &aac_model,
  922. &aac_vendor,
  923. &aac_flags,
  924. &aac_kernel_version,
  925. &aac_monitor_version,
  926. &aac_bios_version,
  927. &aac_serial_number,
  928. &aac_max_channel,
  929. &aac_max_id,
  930. &aac_reset,
  931. NULL
  932. };
  933. ssize_t aac_get_serial_number(struct device *device, char *buf)
  934. {
  935. return aac_show_serial_number(device, &aac_serial_number, buf);
  936. }
  937. static const struct file_operations aac_cfg_fops = {
  938. .owner = THIS_MODULE,
  939. .unlocked_ioctl = aac_cfg_ioctl,
  940. #ifdef CONFIG_COMPAT
  941. .compat_ioctl = aac_compat_cfg_ioctl,
  942. #endif
  943. .open = aac_cfg_open,
  944. .llseek = noop_llseek,
  945. };
  946. static struct scsi_host_template aac_driver_template = {
  947. .module = THIS_MODULE,
  948. .name = "AAC",
  949. .proc_name = AAC_DRIVERNAME,
  950. .info = aac_info,
  951. .ioctl = aac_ioctl,
  952. #ifdef CONFIG_COMPAT
  953. .compat_ioctl = aac_compat_ioctl,
  954. #endif
  955. .queuecommand = aac_queuecommand,
  956. .bios_param = aac_biosparm,
  957. .shost_attrs = aac_attrs,
  958. .slave_configure = aac_slave_configure,
  959. .change_queue_depth = aac_change_queue_depth,
  960. .sdev_attrs = aac_dev_attrs,
  961. .eh_abort_handler = aac_eh_abort,
  962. .eh_host_reset_handler = aac_eh_reset,
  963. .can_queue = AAC_NUM_IO_FIB,
  964. .this_id = MAXIMUM_NUM_CONTAINERS,
  965. .sg_tablesize = 16,
  966. .max_sectors = 128,
  967. #if (AAC_NUM_IO_FIB > 256)
  968. .cmd_per_lun = 256,
  969. #else
  970. .cmd_per_lun = AAC_NUM_IO_FIB,
  971. #endif
  972. .use_clustering = ENABLE_CLUSTERING,
  973. .emulated = 1,
  974. .no_write_same = 1,
  975. };
  976. static void __aac_shutdown(struct aac_dev * aac)
  977. {
  978. int i;
  979. int cpu;
  980. aac_send_shutdown(aac);
  981. if (aac->aif_thread) {
  982. int i;
  983. /* Clear out events first */
  984. for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) {
  985. struct fib *fib = &aac->fibs[i];
  986. if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
  987. (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
  988. up(&fib->event_wait);
  989. }
  990. kthread_stop(aac->thread);
  991. }
  992. aac_adapter_disable_int(aac);
  993. cpu = cpumask_first(cpu_online_mask);
  994. if (aac->pdev->device == PMC_DEVICE_S6 ||
  995. aac->pdev->device == PMC_DEVICE_S7 ||
  996. aac->pdev->device == PMC_DEVICE_S8 ||
  997. aac->pdev->device == PMC_DEVICE_S9) {
  998. if (aac->max_msix > 1) {
  999. for (i = 0; i < aac->max_msix; i++) {
  1000. if (irq_set_affinity_hint(
  1001. aac->msixentry[i].vector,
  1002. NULL)) {
  1003. printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
  1004. aac->name,
  1005. aac->id,
  1006. cpu);
  1007. }
  1008. cpu = cpumask_next(cpu,
  1009. cpu_online_mask);
  1010. free_irq(aac->msixentry[i].vector,
  1011. &(aac->aac_msix[i]));
  1012. }
  1013. } else {
  1014. free_irq(aac->pdev->irq,
  1015. &(aac->aac_msix[0]));
  1016. }
  1017. } else {
  1018. free_irq(aac->pdev->irq, aac);
  1019. }
  1020. if (aac->msi)
  1021. pci_disable_msi(aac->pdev);
  1022. else if (aac->max_msix > 1)
  1023. pci_disable_msix(aac->pdev);
  1024. }
  1025. static void aac_init_char(void)
  1026. {
  1027. aac_cfg_major = register_chrdev(0, "aac", &aac_cfg_fops);
  1028. if (aac_cfg_major < 0) {
  1029. pr_err("aacraid: unable to register \"aac\" device.\n");
  1030. }
  1031. }
  1032. static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
  1033. {
  1034. unsigned index = id->driver_data;
  1035. struct Scsi_Host *shost;
  1036. struct aac_dev *aac;
  1037. struct list_head *insert = &aac_devices;
  1038. int error = -ENODEV;
  1039. int unique_id = 0;
  1040. u64 dmamask;
  1041. extern int aac_sync_mode;
  1042. /*
  1043. * Only series 7 needs freset.
  1044. */
  1045. if (pdev->device == PMC_DEVICE_S7)
  1046. pdev->needs_freset = 1;
  1047. list_for_each_entry(aac, &aac_devices, entry) {
  1048. if (aac->id > unique_id)
  1049. break;
  1050. insert = &aac->entry;
  1051. unique_id++;
  1052. }
  1053. pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
  1054. PCIE_LINK_STATE_CLKPM);
  1055. error = pci_enable_device(pdev);
  1056. if (error)
  1057. goto out;
  1058. error = -ENODEV;
  1059. /*
  1060. * If the quirk31 bit is set, the adapter needs adapter
  1061. * to driver communication memory to be allocated below 2gig
  1062. */
  1063. if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
  1064. dmamask = DMA_BIT_MASK(31);
  1065. else
  1066. dmamask = DMA_BIT_MASK(32);
  1067. if (pci_set_dma_mask(pdev, dmamask) ||
  1068. pci_set_consistent_dma_mask(pdev, dmamask))
  1069. goto out_disable_pdev;
  1070. pci_set_master(pdev);
  1071. shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
  1072. if (!shost)
  1073. goto out_disable_pdev;
  1074. shost->irq = pdev->irq;
  1075. shost->unique_id = unique_id;
  1076. shost->max_cmd_len = 16;
  1077. shost->use_cmd_list = 1;
  1078. if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT)
  1079. aac_init_char();
  1080. aac = (struct aac_dev *)shost->hostdata;
  1081. aac->base_start = pci_resource_start(pdev, 0);
  1082. aac->scsi_host_ptr = shost;
  1083. aac->pdev = pdev;
  1084. aac->name = aac_driver_template.name;
  1085. aac->id = shost->unique_id;
  1086. aac->cardtype = index;
  1087. INIT_LIST_HEAD(&aac->entry);
  1088. aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
  1089. if (!aac->fibs)
  1090. goto out_free_host;
  1091. spin_lock_init(&aac->fib_lock);
  1092. mutex_init(&aac->ioctl_mutex);
  1093. /*
  1094. * Map in the registers from the adapter.
  1095. */
  1096. aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
  1097. if ((*aac_drivers[index].init)(aac))
  1098. goto out_unmap;
  1099. if (aac->sync_mode) {
  1100. if (aac_sync_mode)
  1101. printk(KERN_INFO "%s%d: Sync. mode enforced "
  1102. "by driver parameter. This will cause "
  1103. "a significant performance decrease!\n",
  1104. aac->name,
  1105. aac->id);
  1106. else
  1107. printk(KERN_INFO "%s%d: Async. mode not supported "
  1108. "by current driver, sync. mode enforced."
  1109. "\nPlease update driver to get full performance.\n",
  1110. aac->name,
  1111. aac->id);
  1112. }
  1113. /*
  1114. * Start any kernel threads needed
  1115. */
  1116. aac->thread = kthread_run(aac_command_thread, aac, AAC_DRIVERNAME);
  1117. if (IS_ERR(aac->thread)) {
  1118. printk(KERN_ERR "aacraid: Unable to create command thread.\n");
  1119. error = PTR_ERR(aac->thread);
  1120. aac->thread = NULL;
  1121. goto out_deinit;
  1122. }
  1123. /*
  1124. * If we had set a smaller DMA mask earlier, set it to 4gig
  1125. * now since the adapter can dma data to at least a 4gig
  1126. * address space.
  1127. */
  1128. if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
  1129. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
  1130. goto out_deinit;
  1131. aac->maximum_num_channels = aac_drivers[index].channels;
  1132. error = aac_get_adapter_info(aac);
  1133. if (error < 0)
  1134. goto out_deinit;
  1135. /*
  1136. * Lets override negotiations and drop the maximum SG limit to 34
  1137. */
  1138. if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
  1139. (shost->sg_tablesize > 34)) {
  1140. shost->sg_tablesize = 34;
  1141. shost->max_sectors = (shost->sg_tablesize * 8) + 112;
  1142. }
  1143. if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
  1144. (shost->sg_tablesize > 17)) {
  1145. shost->sg_tablesize = 17;
  1146. shost->max_sectors = (shost->sg_tablesize * 8) + 112;
  1147. }
  1148. error = pci_set_dma_max_seg_size(pdev,
  1149. (aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
  1150. (shost->max_sectors << 9) : 65536);
  1151. if (error)
  1152. goto out_deinit;
  1153. /*
  1154. * Firmware printf works only with older firmware.
  1155. */
  1156. if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
  1157. aac->printf_enabled = 1;
  1158. else
  1159. aac->printf_enabled = 0;
  1160. /*
  1161. * max channel will be the physical channels plus 1 virtual channel
  1162. * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
  1163. * physical channels are address by their actual physical number+1
  1164. */
  1165. if (aac->nondasd_support || expose_physicals || aac->jbod)
  1166. shost->max_channel = aac->maximum_num_channels;
  1167. else
  1168. shost->max_channel = 0;
  1169. aac_get_config_status(aac, 0);
  1170. aac_get_containers(aac);
  1171. list_add(&aac->entry, insert);
  1172. shost->max_id = aac->maximum_num_containers;
  1173. if (shost->max_id < aac->maximum_num_physicals)
  1174. shost->max_id = aac->maximum_num_physicals;
  1175. if (shost->max_id < MAXIMUM_NUM_CONTAINERS)
  1176. shost->max_id = MAXIMUM_NUM_CONTAINERS;
  1177. else
  1178. shost->this_id = shost->max_id;
  1179. if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
  1180. aac_intr_normal(aac, 0, 2, 0, NULL);
  1181. /*
  1182. * dmb - we may need to move the setting of these parms somewhere else once
  1183. * we get a fib that can report the actual numbers
  1184. */
  1185. shost->max_lun = AAC_MAX_LUN;
  1186. pci_set_drvdata(pdev, shost);
  1187. error = scsi_add_host(shost, &pdev->dev);
  1188. if (error)
  1189. goto out_deinit;
  1190. scsi_scan_host(shost);
  1191. pci_enable_pcie_error_reporting(pdev);
  1192. pci_save_state(pdev);
  1193. return 0;
  1194. out_deinit:
  1195. __aac_shutdown(aac);
  1196. out_unmap:
  1197. aac_fib_map_free(aac);
  1198. if (aac->comm_addr)
  1199. pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
  1200. aac->comm_phys);
  1201. kfree(aac->queues);
  1202. aac_adapter_ioremap(aac, 0);
  1203. kfree(aac->fibs);
  1204. kfree(aac->fsa_dev);
  1205. out_free_host:
  1206. scsi_host_put(shost);
  1207. out_disable_pdev:
  1208. pci_disable_device(pdev);
  1209. out:
  1210. return error;
  1211. }
  1212. static void aac_release_resources(struct aac_dev *aac)
  1213. {
  1214. int i;
  1215. aac_adapter_disable_int(aac);
  1216. if (aac->pdev->device == PMC_DEVICE_S6 ||
  1217. aac->pdev->device == PMC_DEVICE_S7 ||
  1218. aac->pdev->device == PMC_DEVICE_S8 ||
  1219. aac->pdev->device == PMC_DEVICE_S9) {
  1220. if (aac->max_msix > 1) {
  1221. for (i = 0; i < aac->max_msix; i++)
  1222. free_irq(aac->msixentry[i].vector,
  1223. &(aac->aac_msix[i]));
  1224. } else {
  1225. free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
  1226. }
  1227. } else {
  1228. free_irq(aac->pdev->irq, aac);
  1229. }
  1230. if (aac->msi)
  1231. pci_disable_msi(aac->pdev);
  1232. else if (aac->max_msix > 1)
  1233. pci_disable_msix(aac->pdev);
  1234. }
  1235. static int aac_acquire_resources(struct aac_dev *dev)
  1236. {
  1237. int i, j;
  1238. int instance = dev->id;
  1239. const char *name = dev->name;
  1240. unsigned long status;
  1241. /*
  1242. * First clear out all interrupts. Then enable the one's that we
  1243. * can handle.
  1244. */
  1245. while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)
  1246. || status == 0xffffffff)
  1247. msleep(20);
  1248. aac_adapter_disable_int(dev);
  1249. aac_adapter_enable_int(dev);
  1250. if ((dev->pdev->device == PMC_DEVICE_S7 ||
  1251. dev->pdev->device == PMC_DEVICE_S8 ||
  1252. dev->pdev->device == PMC_DEVICE_S9))
  1253. aac_define_int_mode(dev);
  1254. if (dev->msi_enabled)
  1255. aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
  1256. if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
  1257. for (i = 0; i < dev->max_msix; i++) {
  1258. dev->aac_msix[i].vector_no = i;
  1259. dev->aac_msix[i].dev = dev;
  1260. if (request_irq(dev->msixentry[i].vector,
  1261. dev->a_ops.adapter_intr,
  1262. 0, "aacraid", &(dev->aac_msix[i]))) {
  1263. printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
  1264. name, instance, i);
  1265. for (j = 0 ; j < i ; j++)
  1266. free_irq(dev->msixentry[j].vector,
  1267. &(dev->aac_msix[j]));
  1268. pci_disable_msix(dev->pdev);
  1269. goto error_iounmap;
  1270. }
  1271. }
  1272. } else {
  1273. dev->aac_msix[0].vector_no = 0;
  1274. dev->aac_msix[0].dev = dev;
  1275. if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
  1276. IRQF_SHARED, "aacraid",
  1277. &(dev->aac_msix[0])) < 0) {
  1278. if (dev->msi)
  1279. pci_disable_msi(dev->pdev);
  1280. printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
  1281. name, instance);
  1282. goto error_iounmap;
  1283. }
  1284. }
  1285. aac_adapter_enable_int(dev);
  1286. /*max msix may change after EEH
  1287. * Re-assign vectors to fibs
  1288. */
  1289. aac_fib_vector_assign(dev);
  1290. if (!dev->sync_mode) {
  1291. /* After EEH recovery or suspend resume, max_msix count
  1292. * may change, therfore updating in init as well.
  1293. */
  1294. dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
  1295. aac_adapter_start(dev);
  1296. }
  1297. return 0;
  1298. error_iounmap:
  1299. return -1;
  1300. }
  1301. #if (defined(CONFIG_PM))
  1302. static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
  1303. {
  1304. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1305. struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
  1306. scsi_block_requests(shost);
  1307. aac_send_shutdown(aac);
  1308. aac_release_resources(aac);
  1309. pci_set_drvdata(pdev, shost);
  1310. pci_save_state(pdev);
  1311. pci_disable_device(pdev);
  1312. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  1313. return 0;
  1314. }
  1315. static int aac_resume(struct pci_dev *pdev)
  1316. {
  1317. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1318. struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
  1319. int r;
  1320. pci_set_power_state(pdev, PCI_D0);
  1321. pci_enable_wake(pdev, PCI_D0, 0);
  1322. pci_restore_state(pdev);
  1323. r = pci_enable_device(pdev);
  1324. if (r)
  1325. goto fail_device;
  1326. pci_set_master(pdev);
  1327. if (aac_acquire_resources(aac))
  1328. goto fail_device;
  1329. /*
  1330. * reset this flag to unblock ioctl() as it was set at
  1331. * aac_send_shutdown() to block ioctls from upperlayer
  1332. */
  1333. aac->adapter_shutdown = 0;
  1334. scsi_unblock_requests(shost);
  1335. return 0;
  1336. fail_device:
  1337. printk(KERN_INFO "%s%d: resume failed.\n", aac->name, aac->id);
  1338. scsi_host_put(shost);
  1339. pci_disable_device(pdev);
  1340. return -ENODEV;
  1341. }
  1342. #endif
  1343. static void aac_shutdown(struct pci_dev *dev)
  1344. {
  1345. struct Scsi_Host *shost = pci_get_drvdata(dev);
  1346. scsi_block_requests(shost);
  1347. __aac_shutdown((struct aac_dev *)shost->hostdata);
  1348. }
  1349. static void aac_remove_one(struct pci_dev *pdev)
  1350. {
  1351. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1352. struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
  1353. scsi_remove_host(shost);
  1354. __aac_shutdown(aac);
  1355. aac_fib_map_free(aac);
  1356. pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
  1357. aac->comm_phys);
  1358. kfree(aac->queues);
  1359. aac_adapter_ioremap(aac, 0);
  1360. kfree(aac->fibs);
  1361. kfree(aac->fsa_dev);
  1362. list_del(&aac->entry);
  1363. scsi_host_put(shost);
  1364. pci_disable_device(pdev);
  1365. if (list_empty(&aac_devices)) {
  1366. unregister_chrdev(aac_cfg_major, "aac");
  1367. aac_cfg_major = AAC_CHARDEV_NEEDS_REINIT;
  1368. }
  1369. }
  1370. static void aac_flush_ios(struct aac_dev *aac)
  1371. {
  1372. int i;
  1373. struct scsi_cmnd *cmd;
  1374. for (i = 0; i < aac->scsi_host_ptr->can_queue; i++) {
  1375. cmd = (struct scsi_cmnd *)aac->fibs[i].callback_data;
  1376. if (cmd && (cmd->SCp.phase == AAC_OWNER_FIRMWARE)) {
  1377. scsi_dma_unmap(cmd);
  1378. if (aac->handle_pci_error)
  1379. cmd->result = DID_NO_CONNECT << 16;
  1380. else
  1381. cmd->result = DID_RESET << 16;
  1382. cmd->scsi_done(cmd);
  1383. }
  1384. }
  1385. }
  1386. static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
  1387. enum pci_channel_state error)
  1388. {
  1389. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1390. struct aac_dev *aac = shost_priv(shost);
  1391. dev_err(&pdev->dev, "aacraid: PCI error detected %x\n", error);
  1392. switch (error) {
  1393. case pci_channel_io_normal:
  1394. return PCI_ERS_RESULT_CAN_RECOVER;
  1395. case pci_channel_io_frozen:
  1396. aac->handle_pci_error = 1;
  1397. scsi_block_requests(aac->scsi_host_ptr);
  1398. aac_flush_ios(aac);
  1399. aac_release_resources(aac);
  1400. pci_disable_pcie_error_reporting(pdev);
  1401. aac_adapter_ioremap(aac, 0);
  1402. return PCI_ERS_RESULT_NEED_RESET;
  1403. case pci_channel_io_perm_failure:
  1404. aac->handle_pci_error = 1;
  1405. aac_flush_ios(aac);
  1406. return PCI_ERS_RESULT_DISCONNECT;
  1407. }
  1408. return PCI_ERS_RESULT_NEED_RESET;
  1409. }
  1410. static pci_ers_result_t aac_pci_mmio_enabled(struct pci_dev *pdev)
  1411. {
  1412. dev_err(&pdev->dev, "aacraid: PCI error - mmio enabled\n");
  1413. return PCI_ERS_RESULT_NEED_RESET;
  1414. }
  1415. static pci_ers_result_t aac_pci_slot_reset(struct pci_dev *pdev)
  1416. {
  1417. dev_err(&pdev->dev, "aacraid: PCI error - slot reset\n");
  1418. pci_restore_state(pdev);
  1419. if (pci_enable_device(pdev)) {
  1420. dev_warn(&pdev->dev,
  1421. "aacraid: failed to enable slave\n");
  1422. goto fail_device;
  1423. }
  1424. pci_set_master(pdev);
  1425. if (pci_enable_device_mem(pdev)) {
  1426. dev_err(&pdev->dev, "pci_enable_device_mem failed\n");
  1427. goto fail_device;
  1428. }
  1429. return PCI_ERS_RESULT_RECOVERED;
  1430. fail_device:
  1431. dev_err(&pdev->dev, "aacraid: PCI error - slot reset failed\n");
  1432. return PCI_ERS_RESULT_DISCONNECT;
  1433. }
  1434. static void aac_pci_resume(struct pci_dev *pdev)
  1435. {
  1436. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1437. struct scsi_device *sdev = NULL;
  1438. struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
  1439. pci_cleanup_aer_uncorrect_error_status(pdev);
  1440. if (aac_adapter_ioremap(aac, aac->base_size)) {
  1441. dev_err(&pdev->dev, "aacraid: ioremap failed\n");
  1442. /* remap failed, go back ... */
  1443. aac->comm_interface = AAC_COMM_PRODUCER;
  1444. if (aac_adapter_ioremap(aac, AAC_MIN_FOOTPRINT_SIZE)) {
  1445. dev_warn(&pdev->dev,
  1446. "aacraid: unable to map adapter.\n");
  1447. return;
  1448. }
  1449. }
  1450. msleep(10000);
  1451. aac_acquire_resources(aac);
  1452. /*
  1453. * reset this flag to unblock ioctl() as it was set
  1454. * at aac_send_shutdown() to block ioctls from upperlayer
  1455. */
  1456. aac->adapter_shutdown = 0;
  1457. aac->handle_pci_error = 0;
  1458. shost_for_each_device(sdev, shost)
  1459. if (sdev->sdev_state == SDEV_OFFLINE)
  1460. sdev->sdev_state = SDEV_RUNNING;
  1461. scsi_unblock_requests(aac->scsi_host_ptr);
  1462. scsi_scan_host(aac->scsi_host_ptr);
  1463. pci_save_state(pdev);
  1464. dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
  1465. }
  1466. static struct pci_error_handlers aac_pci_err_handler = {
  1467. .error_detected = aac_pci_error_detected,
  1468. .mmio_enabled = aac_pci_mmio_enabled,
  1469. .slot_reset = aac_pci_slot_reset,
  1470. .resume = aac_pci_resume,
  1471. };
  1472. static struct pci_driver aac_pci_driver = {
  1473. .name = AAC_DRIVERNAME,
  1474. .id_table = aac_pci_tbl,
  1475. .probe = aac_probe_one,
  1476. .remove = aac_remove_one,
  1477. #if (defined(CONFIG_PM))
  1478. .suspend = aac_suspend,
  1479. .resume = aac_resume,
  1480. #endif
  1481. .shutdown = aac_shutdown,
  1482. .err_handler = &aac_pci_err_handler,
  1483. };
  1484. static int __init aac_init(void)
  1485. {
  1486. int error;
  1487. printk(KERN_INFO "Adaptec %s driver %s\n",
  1488. AAC_DRIVERNAME, aac_driver_version);
  1489. error = pci_register_driver(&aac_pci_driver);
  1490. if (error < 0)
  1491. return error;
  1492. aac_init_char();
  1493. return 0;
  1494. }
  1495. static void __exit aac_exit(void)
  1496. {
  1497. if (aac_cfg_major > -1)
  1498. unregister_chrdev(aac_cfg_major, "aac");
  1499. pci_unregister_driver(&aac_pci_driver);
  1500. }
  1501. module_init(aac_init);
  1502. module_exit(aac_exit);