port.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * BSD LICENSE
  25. *
  26. * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27. * All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions
  31. * are met:
  32. *
  33. * * Redistributions of source code must retain the above copyright
  34. * notice, this list of conditions and the following disclaimer.
  35. * * Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions and the following disclaimer in
  37. * the documentation and/or other materials provided with the
  38. * distribution.
  39. * * Neither the name of Intel Corporation nor the names of its
  40. * contributors may be used to endorse or promote products derived
  41. * from this software without specific prior written permission.
  42. *
  43. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54. */
  55. #include "isci.h"
  56. #include "port.h"
  57. #include "request.h"
  58. #define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
  59. #define SCU_DUMMY_INDEX (0xFFFF)
  60. #undef C
  61. #define C(a) (#a)
  62. static const char *port_state_name(enum sci_port_states state)
  63. {
  64. static const char * const strings[] = PORT_STATES;
  65. return strings[state];
  66. }
  67. #undef C
  68. static struct device *sciport_to_dev(struct isci_port *iport)
  69. {
  70. int i = iport->physical_port_index;
  71. struct isci_port *table;
  72. struct isci_host *ihost;
  73. if (i == SCIC_SDS_DUMMY_PORT)
  74. i = SCI_MAX_PORTS+1;
  75. table = iport - i;
  76. ihost = container_of(table, typeof(*ihost), ports[0]);
  77. return &ihost->pdev->dev;
  78. }
  79. static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
  80. {
  81. u8 index;
  82. proto->all = 0;
  83. for (index = 0; index < SCI_MAX_PHYS; index++) {
  84. struct isci_phy *iphy = iport->phy_table[index];
  85. if (!iphy)
  86. continue;
  87. sci_phy_get_protocols(iphy, proto);
  88. }
  89. }
  90. static u32 sci_port_get_phys(struct isci_port *iport)
  91. {
  92. u32 index;
  93. u32 mask;
  94. mask = 0;
  95. for (index = 0; index < SCI_MAX_PHYS; index++)
  96. if (iport->phy_table[index])
  97. mask |= (1 << index);
  98. return mask;
  99. }
  100. /**
  101. * sci_port_get_properties() - This method simply returns the properties
  102. * regarding the port, such as: physical index, protocols, sas address, etc.
  103. * @iport: this parameter specifies the port for which to retrieve the physical
  104. * index.
  105. * @prop: This parameter specifies the properties structure into which to
  106. * copy the requested information.
  107. *
  108. * Indicate if the user specified a valid port. SCI_SUCCESS This value is
  109. * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
  110. * value is returned if the specified port is not valid. When this value is
  111. * returned, no data is copied to the properties output parameter.
  112. */
  113. enum sci_status sci_port_get_properties(struct isci_port *iport,
  114. struct sci_port_properties *prop)
  115. {
  116. if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
  117. return SCI_FAILURE_INVALID_PORT;
  118. prop->index = iport->logical_port_index;
  119. prop->phy_mask = sci_port_get_phys(iport);
  120. sci_port_get_sas_address(iport, &prop->local.sas_address);
  121. sci_port_get_protocols(iport, &prop->local.protocols);
  122. sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
  123. return SCI_SUCCESS;
  124. }
  125. static void sci_port_bcn_enable(struct isci_port *iport)
  126. {
  127. struct isci_phy *iphy;
  128. u32 val;
  129. int i;
  130. for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
  131. iphy = iport->phy_table[i];
  132. if (!iphy)
  133. continue;
  134. val = readl(&iphy->link_layer_registers->link_layer_control);
  135. /* clear the bit by writing 1. */
  136. writel(val, &iphy->link_layer_registers->link_layer_control);
  137. }
  138. }
  139. static void isci_port_bc_change_received(struct isci_host *ihost,
  140. struct isci_port *iport,
  141. struct isci_phy *iphy)
  142. {
  143. dev_dbg(&ihost->pdev->dev,
  144. "%s: isci_phy = %p, sas_phy = %p\n",
  145. __func__, iphy, &iphy->sas_phy);
  146. sas_notify_port_event(&iphy->sas_phy,
  147. PORTE_BROADCAST_RCVD, GFP_ATOMIC);
  148. sci_port_bcn_enable(iport);
  149. }
  150. static void isci_port_link_up(struct isci_host *isci_host,
  151. struct isci_port *iport,
  152. struct isci_phy *iphy)
  153. {
  154. unsigned long flags;
  155. struct sci_port_properties properties;
  156. unsigned long success = true;
  157. dev_dbg(&isci_host->pdev->dev,
  158. "%s: isci_port = %p\n",
  159. __func__, iport);
  160. spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
  161. sci_port_get_properties(iport, &properties);
  162. if (iphy->protocol == SAS_PROTOCOL_SATA) {
  163. u64 attached_sas_address;
  164. iphy->sas_phy.oob_mode = SATA_OOB_MODE;
  165. iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
  166. /*
  167. * For direct-attached SATA devices, the SCI core will
  168. * automagically assign a SAS address to the end device
  169. * for the purpose of creating a port. This SAS address
  170. * will not be the same as assigned to the PHY and needs
  171. * to be obtained from struct sci_port_properties properties.
  172. */
  173. attached_sas_address = properties.remote.sas_address.high;
  174. attached_sas_address <<= 32;
  175. attached_sas_address |= properties.remote.sas_address.low;
  176. swab64s(&attached_sas_address);
  177. memcpy(&iphy->sas_phy.attached_sas_addr,
  178. &attached_sas_address, sizeof(attached_sas_address));
  179. } else if (iphy->protocol == SAS_PROTOCOL_SSP) {
  180. iphy->sas_phy.oob_mode = SAS_OOB_MODE;
  181. iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
  182. /* Copy the attached SAS address from the IAF */
  183. memcpy(iphy->sas_phy.attached_sas_addr,
  184. iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
  185. } else {
  186. dev_err(&isci_host->pdev->dev, "%s: unknown target\n", __func__);
  187. success = false;
  188. }
  189. iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
  190. spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
  191. /* Notify libsas that we have an address frame, if indeed
  192. * we've found an SSP, SMP, or STP target */
  193. if (success)
  194. sas_notify_port_event(&iphy->sas_phy,
  195. PORTE_BYTES_DMAED, GFP_ATOMIC);
  196. }
  197. /**
  198. * isci_port_link_down() - This function is called by the sci core when a link
  199. * becomes inactive.
  200. * @isci_host: This parameter specifies the isci host object.
  201. * @isci_phy: This parameter specifies the isci phy with the active link.
  202. * @isci_port: This parameter specifies the isci port with the active link.
  203. *
  204. */
  205. static void isci_port_link_down(struct isci_host *isci_host,
  206. struct isci_phy *isci_phy,
  207. struct isci_port *isci_port)
  208. {
  209. struct isci_remote_device *isci_device;
  210. dev_dbg(&isci_host->pdev->dev,
  211. "%s: isci_port = %p\n", __func__, isci_port);
  212. if (isci_port) {
  213. /* check to see if this is the last phy on this port. */
  214. if (isci_phy->sas_phy.port &&
  215. isci_phy->sas_phy.port->num_phys == 1) {
  216. /* change the state for all devices on this port. The
  217. * next task sent to this device will be returned as
  218. * SAS_TASK_UNDELIVERED, and the scsi mid layer will
  219. * remove the target
  220. */
  221. list_for_each_entry(isci_device,
  222. &isci_port->remote_dev_list,
  223. node) {
  224. dev_dbg(&isci_host->pdev->dev,
  225. "%s: isci_device = %p\n",
  226. __func__, isci_device);
  227. set_bit(IDEV_GONE, &isci_device->flags);
  228. }
  229. }
  230. }
  231. /* Notify libsas of the borken link, this will trigger calls to our
  232. * isci_port_deformed and isci_dev_gone functions.
  233. */
  234. sas_phy_disconnected(&isci_phy->sas_phy);
  235. sas_notify_phy_event(&isci_phy->sas_phy,
  236. PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC);
  237. dev_dbg(&isci_host->pdev->dev,
  238. "%s: isci_port = %p - Done\n", __func__, isci_port);
  239. }
  240. static bool is_port_ready_state(enum sci_port_states state)
  241. {
  242. switch (state) {
  243. case SCI_PORT_READY:
  244. case SCI_PORT_SUB_WAITING:
  245. case SCI_PORT_SUB_OPERATIONAL:
  246. case SCI_PORT_SUB_CONFIGURING:
  247. return true;
  248. default:
  249. return false;
  250. }
  251. }
  252. /* flag dummy rnc hanling when exiting a ready state */
  253. static void port_state_machine_change(struct isci_port *iport,
  254. enum sci_port_states state)
  255. {
  256. struct sci_base_state_machine *sm = &iport->sm;
  257. enum sci_port_states old_state = sm->current_state_id;
  258. if (is_port_ready_state(old_state) && !is_port_ready_state(state))
  259. iport->ready_exit = true;
  260. sci_change_state(sm, state);
  261. iport->ready_exit = false;
  262. }
  263. /**
  264. * isci_port_hard_reset_complete() - This function is called by the sci core
  265. * when the hard reset complete notification has been received.
  266. * @isci_port: This parameter specifies the sci port with the active link.
  267. * @completion_status: This parameter specifies the core status for the reset
  268. * process.
  269. *
  270. */
  271. static void isci_port_hard_reset_complete(struct isci_port *isci_port,
  272. enum sci_status completion_status)
  273. {
  274. struct isci_host *ihost = isci_port->owning_controller;
  275. dev_dbg(&ihost->pdev->dev,
  276. "%s: isci_port = %p, completion_status=%x\n",
  277. __func__, isci_port, completion_status);
  278. /* Save the status of the hard reset from the port. */
  279. isci_port->hard_reset_status = completion_status;
  280. if (completion_status != SCI_SUCCESS) {
  281. /* The reset failed. The port state is now SCI_PORT_FAILED. */
  282. if (isci_port->active_phy_mask == 0) {
  283. int phy_idx = isci_port->last_active_phy;
  284. struct isci_phy *iphy = &ihost->phys[phy_idx];
  285. /* Generate the link down now to the host, since it
  286. * was intercepted by the hard reset state machine when
  287. * it really happened.
  288. */
  289. isci_port_link_down(ihost, iphy, isci_port);
  290. }
  291. /* Advance the port state so that link state changes will be
  292. * noticed.
  293. */
  294. port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
  295. }
  296. clear_bit(IPORT_RESET_PENDING, &isci_port->state);
  297. wake_up(&ihost->eventq);
  298. }
  299. /* This method will return a true value if the specified phy can be assigned to
  300. * this port The following is a list of phys for each port that are allowed: -
  301. * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method
  302. * doesn't preclude all configurations. It merely ensures that a phy is part
  303. * of the allowable set of phy identifiers for that port. For example, one
  304. * could assign phy 3 to port 0 and no other phys. Please refer to
  305. * sci_port_is_phy_mask_valid() for information regarding whether the
  306. * phy_mask for a port can be supported. bool true if this is a valid phy
  307. * assignment for the port false if this is not a valid phy assignment for the
  308. * port
  309. */
  310. bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
  311. {
  312. struct isci_host *ihost = iport->owning_controller;
  313. struct sci_user_parameters *user = &ihost->user_parameters;
  314. /* Initialize to invalid value. */
  315. u32 existing_phy_index = SCI_MAX_PHYS;
  316. u32 index;
  317. if ((iport->physical_port_index == 1) && (phy_index != 1))
  318. return false;
  319. if (iport->physical_port_index == 3 && phy_index != 3)
  320. return false;
  321. if (iport->physical_port_index == 2 &&
  322. (phy_index == 0 || phy_index == 1))
  323. return false;
  324. for (index = 0; index < SCI_MAX_PHYS; index++)
  325. if (iport->phy_table[index] && index != phy_index)
  326. existing_phy_index = index;
  327. /* Ensure that all of the phys in the port are capable of
  328. * operating at the same maximum link rate.
  329. */
  330. if (existing_phy_index < SCI_MAX_PHYS &&
  331. user->phys[phy_index].max_speed_generation !=
  332. user->phys[existing_phy_index].max_speed_generation)
  333. return false;
  334. return true;
  335. }
  336. /**
  337. * sci_port_is_phy_mask_valid()
  338. * @iport: This is the port object for which to determine if the phy mask
  339. * can be supported.
  340. * @phy_mask: Phy mask belonging to this port
  341. *
  342. * This method will return a true value if the port's phy mask can be supported
  343. * by the SCU. The following is a list of valid PHY mask configurations for
  344. * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2]
  345. * - Port 3 - [3] This method returns a boolean indication specifying if the
  346. * phy mask can be supported. true if this is a valid phy assignment for the
  347. * port false if this is not a valid phy assignment for the port
  348. */
  349. static bool sci_port_is_phy_mask_valid(
  350. struct isci_port *iport,
  351. u32 phy_mask)
  352. {
  353. if (iport->physical_port_index == 0) {
  354. if (((phy_mask & 0x0F) == 0x0F)
  355. || ((phy_mask & 0x03) == 0x03)
  356. || ((phy_mask & 0x01) == 0x01)
  357. || (phy_mask == 0))
  358. return true;
  359. } else if (iport->physical_port_index == 1) {
  360. if (((phy_mask & 0x02) == 0x02)
  361. || (phy_mask == 0))
  362. return true;
  363. } else if (iport->physical_port_index == 2) {
  364. if (((phy_mask & 0x0C) == 0x0C)
  365. || ((phy_mask & 0x04) == 0x04)
  366. || (phy_mask == 0))
  367. return true;
  368. } else if (iport->physical_port_index == 3) {
  369. if (((phy_mask & 0x08) == 0x08)
  370. || (phy_mask == 0))
  371. return true;
  372. }
  373. return false;
  374. }
  375. /*
  376. * This method retrieves a currently active (i.e. connected) phy contained in
  377. * the port. Currently, the lowest order phy that is connected is returned.
  378. * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
  379. * returned if there are no currently active (i.e. connected to a remote end
  380. * point) phys contained in the port. All other values specify a struct sci_phy
  381. * object that is active in the port.
  382. */
  383. static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
  384. {
  385. u32 index;
  386. struct isci_phy *iphy;
  387. for (index = 0; index < SCI_MAX_PHYS; index++) {
  388. /* Ensure that the phy is both part of the port and currently
  389. * connected to the remote end-point.
  390. */
  391. iphy = iport->phy_table[index];
  392. if (iphy && sci_port_active_phy(iport, iphy))
  393. return iphy;
  394. }
  395. return NULL;
  396. }
  397. static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
  398. {
  399. /* Check to see if we can add this phy to a port
  400. * that means that the phy is not part of a port and that the port does
  401. * not already have a phy assinged to the phy index.
  402. */
  403. if (!iport->phy_table[iphy->phy_index] &&
  404. !phy_get_non_dummy_port(iphy) &&
  405. sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
  406. /* Phy is being added in the stopped state so we are in MPC mode
  407. * make logical port index = physical port index
  408. */
  409. iport->logical_port_index = iport->physical_port_index;
  410. iport->phy_table[iphy->phy_index] = iphy;
  411. sci_phy_set_port(iphy, iport);
  412. return SCI_SUCCESS;
  413. }
  414. return SCI_FAILURE;
  415. }
  416. static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
  417. {
  418. /* Make sure that this phy is part of this port */
  419. if (iport->phy_table[iphy->phy_index] == iphy &&
  420. phy_get_non_dummy_port(iphy) == iport) {
  421. struct isci_host *ihost = iport->owning_controller;
  422. /* Yep it is assigned to this port so remove it */
  423. sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
  424. iport->phy_table[iphy->phy_index] = NULL;
  425. return SCI_SUCCESS;
  426. }
  427. return SCI_FAILURE;
  428. }
  429. void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
  430. {
  431. u32 index;
  432. sas->high = 0;
  433. sas->low = 0;
  434. for (index = 0; index < SCI_MAX_PHYS; index++)
  435. if (iport->phy_table[index])
  436. sci_phy_get_sas_address(iport->phy_table[index], sas);
  437. }
  438. void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
  439. {
  440. struct isci_phy *iphy;
  441. /*
  442. * Ensure that the phy is both part of the port and currently
  443. * connected to the remote end-point.
  444. */
  445. iphy = sci_port_get_a_connected_phy(iport);
  446. if (iphy) {
  447. if (iphy->protocol != SAS_PROTOCOL_SATA) {
  448. sci_phy_get_attached_sas_address(iphy, sas);
  449. } else {
  450. sci_phy_get_sas_address(iphy, sas);
  451. sas->low += iphy->phy_index;
  452. }
  453. } else {
  454. sas->high = 0;
  455. sas->low = 0;
  456. }
  457. }
  458. /**
  459. * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
  460. *
  461. * @iport: logical port on which we need to create the remote node context
  462. * @rni: remote node index for this remote node context.
  463. *
  464. * This routine will construct a dummy remote node context data structure
  465. * This structure will be posted to the hardware to work around a scheduler
  466. * error in the hardware.
  467. */
  468. static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
  469. {
  470. union scu_remote_node_context *rnc;
  471. rnc = &iport->owning_controller->remote_node_context_table[rni];
  472. memset(rnc, 0, sizeof(union scu_remote_node_context));
  473. rnc->ssp.remote_sas_address_hi = 0;
  474. rnc->ssp.remote_sas_address_lo = 0;
  475. rnc->ssp.remote_node_index = rni;
  476. rnc->ssp.remote_node_port_width = 1;
  477. rnc->ssp.logical_port_index = iport->physical_port_index;
  478. rnc->ssp.nexus_loss_timer_enable = false;
  479. rnc->ssp.check_bit = false;
  480. rnc->ssp.is_valid = true;
  481. rnc->ssp.is_remote_node_context = true;
  482. rnc->ssp.function_number = 0;
  483. rnc->ssp.arbitration_wait_time = 0;
  484. }
  485. /*
  486. * construct a dummy task context data structure. This
  487. * structure will be posted to the hardwre to work around a scheduler error
  488. * in the hardware.
  489. */
  490. static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
  491. {
  492. struct isci_host *ihost = iport->owning_controller;
  493. struct scu_task_context *task_context;
  494. task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
  495. memset(task_context, 0, sizeof(struct scu_task_context));
  496. task_context->initiator_request = 1;
  497. task_context->connection_rate = 1;
  498. task_context->logical_port_index = iport->physical_port_index;
  499. task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
  500. task_context->task_index = ISCI_TAG_TCI(tag);
  501. task_context->valid = SCU_TASK_CONTEXT_VALID;
  502. task_context->context_type = SCU_TASK_CONTEXT_TYPE;
  503. task_context->remote_node_index = iport->reserved_rni;
  504. task_context->do_not_dma_ssp_good_response = 1;
  505. task_context->task_phase = 0x01;
  506. }
  507. static void sci_port_destroy_dummy_resources(struct isci_port *iport)
  508. {
  509. struct isci_host *ihost = iport->owning_controller;
  510. if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
  511. isci_free_tag(ihost, iport->reserved_tag);
  512. if (iport->reserved_rni != SCU_DUMMY_INDEX)
  513. sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
  514. 1, iport->reserved_rni);
  515. iport->reserved_rni = SCU_DUMMY_INDEX;
  516. iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
  517. }
  518. void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
  519. {
  520. u8 index;
  521. for (index = 0; index < SCI_MAX_PHYS; index++) {
  522. if (iport->active_phy_mask & (1 << index))
  523. sci_phy_setup_transport(iport->phy_table[index], device_id);
  524. }
  525. }
  526. static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
  527. {
  528. sci_phy_resume(iphy);
  529. iport->enabled_phy_mask |= 1 << iphy->phy_index;
  530. }
  531. static void sci_port_activate_phy(struct isci_port *iport,
  532. struct isci_phy *iphy,
  533. u8 flags)
  534. {
  535. struct isci_host *ihost = iport->owning_controller;
  536. if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME))
  537. sci_phy_resume(iphy);
  538. iport->active_phy_mask |= 1 << iphy->phy_index;
  539. sci_controller_clear_invalid_phy(ihost, iphy);
  540. if (flags & PF_NOTIFY)
  541. isci_port_link_up(ihost, iport, iphy);
  542. }
  543. void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
  544. bool do_notify_user)
  545. {
  546. struct isci_host *ihost = iport->owning_controller;
  547. iport->active_phy_mask &= ~(1 << iphy->phy_index);
  548. iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
  549. if (!iport->active_phy_mask)
  550. iport->last_active_phy = iphy->phy_index;
  551. iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
  552. /* Re-assign the phy back to the LP as if it were a narrow port for APC
  553. * mode. For MPC mode, the phy will remain in the port.
  554. */
  555. if (iport->owning_controller->oem_parameters.controller.mode_type ==
  556. SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
  557. writel(iphy->phy_index,
  558. &iport->port_pe_configuration_register[iphy->phy_index]);
  559. if (do_notify_user == true)
  560. isci_port_link_down(ihost, iphy, iport);
  561. }
  562. static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
  563. {
  564. struct isci_host *ihost = iport->owning_controller;
  565. /*
  566. * Check to see if we have alreay reported this link as bad and if
  567. * not go ahead and tell the SCI_USER that we have discovered an
  568. * invalid link.
  569. */
  570. if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
  571. ihost->invalid_phy_mask |= 1 << iphy->phy_index;
  572. dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
  573. }
  574. }
  575. /**
  576. * sci_port_general_link_up_handler - phy can be assigned to port?
  577. * @iport: sci_port object for which has a phy that has gone link up.
  578. * @iphy: This is the struct isci_phy object that has gone link up.
  579. * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
  580. *
  581. * Determine if this phy can be assigned to this port . If the phy is
  582. * not a valid PHY for this port then the function will notify the user.
  583. * A PHY can only be part of a port if it's attached SAS ADDRESS is the
  584. * same as all other PHYs in the same port.
  585. */
  586. static void sci_port_general_link_up_handler(struct isci_port *iport,
  587. struct isci_phy *iphy,
  588. u8 flags)
  589. {
  590. struct sci_sas_address port_sas_address;
  591. struct sci_sas_address phy_sas_address;
  592. sci_port_get_attached_sas_address(iport, &port_sas_address);
  593. sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
  594. /* If the SAS address of the new phy matches the SAS address of
  595. * other phys in the port OR this is the first phy in the port,
  596. * then activate the phy and allow it to be used for operations
  597. * in this port.
  598. */
  599. if ((phy_sas_address.high == port_sas_address.high &&
  600. phy_sas_address.low == port_sas_address.low) ||
  601. iport->active_phy_mask == 0) {
  602. struct sci_base_state_machine *sm = &iport->sm;
  603. sci_port_activate_phy(iport, iphy, flags);
  604. if (sm->current_state_id == SCI_PORT_RESETTING)
  605. port_state_machine_change(iport, SCI_PORT_READY);
  606. } else
  607. sci_port_invalid_link_up(iport, iphy);
  608. }
  609. /**
  610. * sci_port_is_wide()
  611. * This method returns false if the port only has a single phy object assigned.
  612. * If there are no phys or more than one phy then the method will return
  613. * true.
  614. * @iport: The port for which the wide port condition is to be checked.
  615. *
  616. * bool true Is returned if this is a wide ported port. false Is returned if
  617. * this is a narrow port.
  618. */
  619. static bool sci_port_is_wide(struct isci_port *iport)
  620. {
  621. u32 index;
  622. u32 phy_count = 0;
  623. for (index = 0; index < SCI_MAX_PHYS; index++) {
  624. if (iport->phy_table[index] != NULL) {
  625. phy_count++;
  626. }
  627. }
  628. return phy_count != 1;
  629. }
  630. /**
  631. * sci_port_link_detected()
  632. * This method is called by the PHY object when the link is detected. if the
  633. * port wants the PHY to continue on to the link up state then the port
  634. * layer must return true. If the port object returns false the phy object
  635. * must halt its attempt to go link up.
  636. * @iport: The port associated with the phy object.
  637. * @iphy: The phy object that is trying to go link up.
  638. *
  639. * true if the phy object can continue to the link up condition. true Is
  640. * returned if this phy can continue to the ready state. false Is returned if
  641. * can not continue on to the ready state. This notification is in place for
  642. * wide ports and direct attached phys. Since there are no wide ported SATA
  643. * devices this could become an invalid port configuration.
  644. */
  645. bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy)
  646. {
  647. if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
  648. (iphy->protocol == SAS_PROTOCOL_SATA)) {
  649. if (sci_port_is_wide(iport)) {
  650. sci_port_invalid_link_up(iport, iphy);
  651. return false;
  652. } else {
  653. struct isci_host *ihost = iport->owning_controller;
  654. struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
  655. writel(iphy->phy_index,
  656. &dst_port->port_pe_configuration_register[iphy->phy_index]);
  657. }
  658. }
  659. return true;
  660. }
  661. static void port_timeout(struct timer_list *t)
  662. {
  663. struct sci_timer *tmr = from_timer(tmr, t, timer);
  664. struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
  665. struct isci_host *ihost = iport->owning_controller;
  666. unsigned long flags;
  667. u32 current_state;
  668. spin_lock_irqsave(&ihost->scic_lock, flags);
  669. if (tmr->cancel)
  670. goto done;
  671. current_state = iport->sm.current_state_id;
  672. if (current_state == SCI_PORT_RESETTING) {
  673. /* if the port is still in the resetting state then the timeout
  674. * fired before the reset completed.
  675. */
  676. port_state_machine_change(iport, SCI_PORT_FAILED);
  677. } else if (current_state == SCI_PORT_STOPPED) {
  678. /* if the port is stopped then the start request failed In this
  679. * case stay in the stopped state.
  680. */
  681. dev_err(sciport_to_dev(iport),
  682. "%s: SCIC Port 0x%p failed to stop before timeout.\n",
  683. __func__,
  684. iport);
  685. } else if (current_state == SCI_PORT_STOPPING) {
  686. dev_dbg(sciport_to_dev(iport),
  687. "%s: port%d: stop complete timeout\n",
  688. __func__, iport->physical_port_index);
  689. } else {
  690. /* The port is in the ready state and we have a timer
  691. * reporting a timeout this should not happen.
  692. */
  693. dev_err(sciport_to_dev(iport),
  694. "%s: SCIC Port 0x%p is processing a timeout operation "
  695. "in state %d.\n", __func__, iport, current_state);
  696. }
  697. done:
  698. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  699. }
  700. /* --------------------------------------------------------------------------- */
  701. /*
  702. * This function updates the hardwares VIIT entry for this port.
  703. */
  704. static void sci_port_update_viit_entry(struct isci_port *iport)
  705. {
  706. struct sci_sas_address sas_address;
  707. sci_port_get_sas_address(iport, &sas_address);
  708. writel(sas_address.high,
  709. &iport->viit_registers->initiator_sas_address_hi);
  710. writel(sas_address.low,
  711. &iport->viit_registers->initiator_sas_address_lo);
  712. /* This value get cleared just in case its not already cleared */
  713. writel(0, &iport->viit_registers->reserved);
  714. /* We are required to update the status register last */
  715. writel(SCU_VIIT_ENTRY_ID_VIIT |
  716. SCU_VIIT_IPPT_INITIATOR |
  717. ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
  718. SCU_VIIT_STATUS_ALL_VALID,
  719. &iport->viit_registers->status);
  720. }
  721. enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
  722. {
  723. u16 index;
  724. struct isci_phy *iphy;
  725. enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
  726. /*
  727. * Loop through all of the phys in this port and find the phy with the
  728. * lowest maximum link rate. */
  729. for (index = 0; index < SCI_MAX_PHYS; index++) {
  730. iphy = iport->phy_table[index];
  731. if (iphy && sci_port_active_phy(iport, iphy) &&
  732. iphy->max_negotiated_speed < max_allowed_speed)
  733. max_allowed_speed = iphy->max_negotiated_speed;
  734. }
  735. return max_allowed_speed;
  736. }
  737. static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
  738. {
  739. u32 pts_control_value;
  740. pts_control_value = readl(&iport->port_task_scheduler_registers->control);
  741. pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
  742. writel(pts_control_value, &iport->port_task_scheduler_registers->control);
  743. }
  744. /**
  745. * sci_port_post_dummy_request() - post dummy/workaround request
  746. * @iport: port to post task
  747. *
  748. * Prevent the hardware scheduler from posting new requests to the front
  749. * of the scheduler queue causing a starvation problem for currently
  750. * ongoing requests.
  751. *
  752. */
  753. static void sci_port_post_dummy_request(struct isci_port *iport)
  754. {
  755. struct isci_host *ihost = iport->owning_controller;
  756. u16 tag = iport->reserved_tag;
  757. struct scu_task_context *tc;
  758. u32 command;
  759. tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
  760. tc->abort = 0;
  761. command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
  762. iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
  763. ISCI_TAG_TCI(tag);
  764. sci_controller_post_request(ihost, command);
  765. }
  766. /**
  767. * sci_port_abort_dummy_request()
  768. * This routine will abort the dummy request. This will allow the hardware to
  769. * power down parts of the silicon to save power.
  770. *
  771. * @iport: The port on which the task must be aborted.
  772. *
  773. */
  774. static void sci_port_abort_dummy_request(struct isci_port *iport)
  775. {
  776. struct isci_host *ihost = iport->owning_controller;
  777. u16 tag = iport->reserved_tag;
  778. struct scu_task_context *tc;
  779. u32 command;
  780. tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
  781. tc->abort = 1;
  782. command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
  783. iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
  784. ISCI_TAG_TCI(tag);
  785. sci_controller_post_request(ihost, command);
  786. }
  787. /**
  788. * sci_port_resume_port_task_scheduler()
  789. * @iport: This is the struct isci_port object to resume.
  790. *
  791. * This method will resume the port task scheduler for this port object. none
  792. */
  793. static void
  794. sci_port_resume_port_task_scheduler(struct isci_port *iport)
  795. {
  796. u32 pts_control_value;
  797. pts_control_value = readl(&iport->port_task_scheduler_registers->control);
  798. pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
  799. writel(pts_control_value, &iport->port_task_scheduler_registers->control);
  800. }
  801. static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
  802. {
  803. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  804. sci_port_suspend_port_task_scheduler(iport);
  805. iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
  806. if (iport->active_phy_mask != 0) {
  807. /* At least one of the phys on the port is ready */
  808. port_state_machine_change(iport,
  809. SCI_PORT_SUB_OPERATIONAL);
  810. }
  811. }
  812. static void scic_sds_port_ready_substate_waiting_exit(
  813. struct sci_base_state_machine *sm)
  814. {
  815. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  816. sci_port_resume_port_task_scheduler(iport);
  817. }
  818. static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
  819. {
  820. u32 index;
  821. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  822. struct isci_host *ihost = iport->owning_controller;
  823. dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n",
  824. __func__, iport->physical_port_index);
  825. for (index = 0; index < SCI_MAX_PHYS; index++) {
  826. if (iport->phy_table[index]) {
  827. writel(iport->physical_port_index,
  828. &iport->port_pe_configuration_register[
  829. iport->phy_table[index]->phy_index]);
  830. if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
  831. sci_port_resume_phy(iport, iport->phy_table[index]);
  832. }
  833. }
  834. sci_port_update_viit_entry(iport);
  835. /*
  836. * Post the dummy task for the port so the hardware can schedule
  837. * io correctly
  838. */
  839. sci_port_post_dummy_request(iport);
  840. }
  841. static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
  842. {
  843. struct isci_host *ihost = iport->owning_controller;
  844. u8 phys_index = iport->physical_port_index;
  845. union scu_remote_node_context *rnc;
  846. u16 rni = iport->reserved_rni;
  847. u32 command;
  848. rnc = &ihost->remote_node_context_table[rni];
  849. rnc->ssp.is_valid = false;
  850. /* ensure the preceding tc abort request has reached the
  851. * controller and give it ample time to act before posting the rnc
  852. * invalidate
  853. */
  854. readl(&ihost->smu_registers->interrupt_status); /* flush */
  855. udelay(10);
  856. command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
  857. phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
  858. sci_controller_post_request(ihost, command);
  859. }
  860. /**
  861. * sci_port_ready_substate_operational_exit()
  862. * @sm: This is the object which is cast to a struct isci_port object.
  863. *
  864. * This method will perform the actions required by the struct isci_port on
  865. * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
  866. * the port not ready and suspends the port task scheduler. none
  867. */
  868. static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
  869. {
  870. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  871. struct isci_host *ihost = iport->owning_controller;
  872. /*
  873. * Kill the dummy task for this port if it has not yet posted
  874. * the hardware will treat this as a NOP and just return abort
  875. * complete.
  876. */
  877. sci_port_abort_dummy_request(iport);
  878. dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
  879. __func__, iport->physical_port_index);
  880. if (iport->ready_exit)
  881. sci_port_invalidate_dummy_remote_node(iport);
  882. }
  883. static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
  884. {
  885. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  886. struct isci_host *ihost = iport->owning_controller;
  887. if (iport->active_phy_mask == 0) {
  888. dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
  889. __func__, iport->physical_port_index);
  890. port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
  891. } else
  892. port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
  893. }
  894. enum sci_status sci_port_start(struct isci_port *iport)
  895. {
  896. struct isci_host *ihost = iport->owning_controller;
  897. enum sci_status status = SCI_SUCCESS;
  898. enum sci_port_states state;
  899. u32 phy_mask;
  900. state = iport->sm.current_state_id;
  901. if (state != SCI_PORT_STOPPED) {
  902. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  903. __func__, port_state_name(state));
  904. return SCI_FAILURE_INVALID_STATE;
  905. }
  906. if (iport->assigned_device_count > 0) {
  907. /* TODO This is a start failure operation because
  908. * there are still devices assigned to this port.
  909. * There must be no devices assigned to a port on a
  910. * start operation.
  911. */
  912. return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
  913. }
  914. if (iport->reserved_rni == SCU_DUMMY_INDEX) {
  915. u16 rni = sci_remote_node_table_allocate_remote_node(
  916. &ihost->available_remote_nodes, 1);
  917. if (rni != SCU_DUMMY_INDEX)
  918. sci_port_construct_dummy_rnc(iport, rni);
  919. else
  920. status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
  921. iport->reserved_rni = rni;
  922. }
  923. if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
  924. u16 tag;
  925. tag = isci_alloc_tag(ihost);
  926. if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
  927. status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
  928. else
  929. sci_port_construct_dummy_task(iport, tag);
  930. iport->reserved_tag = tag;
  931. }
  932. if (status == SCI_SUCCESS) {
  933. phy_mask = sci_port_get_phys(iport);
  934. /*
  935. * There are one or more phys assigned to this port. Make sure
  936. * the port's phy mask is in fact legal and supported by the
  937. * silicon.
  938. */
  939. if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
  940. port_state_machine_change(iport,
  941. SCI_PORT_READY);
  942. return SCI_SUCCESS;
  943. }
  944. status = SCI_FAILURE;
  945. }
  946. if (status != SCI_SUCCESS)
  947. sci_port_destroy_dummy_resources(iport);
  948. return status;
  949. }
  950. enum sci_status sci_port_stop(struct isci_port *iport)
  951. {
  952. enum sci_port_states state;
  953. state = iport->sm.current_state_id;
  954. switch (state) {
  955. case SCI_PORT_STOPPED:
  956. return SCI_SUCCESS;
  957. case SCI_PORT_SUB_WAITING:
  958. case SCI_PORT_SUB_OPERATIONAL:
  959. case SCI_PORT_SUB_CONFIGURING:
  960. case SCI_PORT_RESETTING:
  961. port_state_machine_change(iport,
  962. SCI_PORT_STOPPING);
  963. return SCI_SUCCESS;
  964. default:
  965. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  966. __func__, port_state_name(state));
  967. return SCI_FAILURE_INVALID_STATE;
  968. }
  969. }
  970. static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
  971. {
  972. enum sci_status status = SCI_FAILURE_INVALID_PHY;
  973. struct isci_phy *iphy = NULL;
  974. enum sci_port_states state;
  975. u32 phy_index;
  976. state = iport->sm.current_state_id;
  977. if (state != SCI_PORT_SUB_OPERATIONAL) {
  978. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  979. __func__, port_state_name(state));
  980. return SCI_FAILURE_INVALID_STATE;
  981. }
  982. /* Select a phy on which we can send the hard reset request. */
  983. for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
  984. iphy = iport->phy_table[phy_index];
  985. if (iphy && !sci_port_active_phy(iport, iphy)) {
  986. /*
  987. * We found a phy but it is not ready select
  988. * different phy
  989. */
  990. iphy = NULL;
  991. }
  992. }
  993. /* If we have a phy then go ahead and start the reset procedure */
  994. if (!iphy)
  995. return status;
  996. status = sci_phy_reset(iphy);
  997. if (status != SCI_SUCCESS)
  998. return status;
  999. sci_mod_timer(&iport->timer, timeout);
  1000. iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
  1001. port_state_machine_change(iport, SCI_PORT_RESETTING);
  1002. return SCI_SUCCESS;
  1003. }
  1004. /**
  1005. * sci_port_add_phy()
  1006. * @iport: This parameter specifies the port in which the phy will be added.
  1007. * @iphy: This parameter is the phy which is to be added to the port.
  1008. *
  1009. * This method will add a PHY to the selected port. This method returns an
  1010. * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
  1011. * status is a failure to add the phy to the port.
  1012. */
  1013. enum sci_status sci_port_add_phy(struct isci_port *iport,
  1014. struct isci_phy *iphy)
  1015. {
  1016. enum sci_status status;
  1017. enum sci_port_states state;
  1018. sci_port_bcn_enable(iport);
  1019. state = iport->sm.current_state_id;
  1020. switch (state) {
  1021. case SCI_PORT_STOPPED: {
  1022. struct sci_sas_address port_sas_address;
  1023. /* Read the port assigned SAS Address if there is one */
  1024. sci_port_get_sas_address(iport, &port_sas_address);
  1025. if (port_sas_address.high != 0 && port_sas_address.low != 0) {
  1026. struct sci_sas_address phy_sas_address;
  1027. /* Make sure that the PHY SAS Address matches the SAS Address
  1028. * for this port
  1029. */
  1030. sci_phy_get_sas_address(iphy, &phy_sas_address);
  1031. if (port_sas_address.high != phy_sas_address.high ||
  1032. port_sas_address.low != phy_sas_address.low)
  1033. return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
  1034. }
  1035. return sci_port_set_phy(iport, iphy);
  1036. }
  1037. case SCI_PORT_SUB_WAITING:
  1038. case SCI_PORT_SUB_OPERATIONAL:
  1039. status = sci_port_set_phy(iport, iphy);
  1040. if (status != SCI_SUCCESS)
  1041. return status;
  1042. sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
  1043. iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
  1044. port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
  1045. return status;
  1046. case SCI_PORT_SUB_CONFIGURING:
  1047. status = sci_port_set_phy(iport, iphy);
  1048. if (status != SCI_SUCCESS)
  1049. return status;
  1050. sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
  1051. /* Re-enter the configuring state since this may be the last phy in
  1052. * the port.
  1053. */
  1054. port_state_machine_change(iport,
  1055. SCI_PORT_SUB_CONFIGURING);
  1056. return SCI_SUCCESS;
  1057. default:
  1058. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  1059. __func__, port_state_name(state));
  1060. return SCI_FAILURE_INVALID_STATE;
  1061. }
  1062. }
  1063. /**
  1064. * sci_port_remove_phy()
  1065. * @iport: This parameter specifies the port in which the phy will be added.
  1066. * @iphy: This parameter is the phy which is to be added to the port.
  1067. *
  1068. * This method will remove the PHY from the selected PORT. This method returns
  1069. * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
  1070. * other status is a failure to add the phy to the port.
  1071. */
  1072. enum sci_status sci_port_remove_phy(struct isci_port *iport,
  1073. struct isci_phy *iphy)
  1074. {
  1075. enum sci_status status;
  1076. enum sci_port_states state;
  1077. state = iport->sm.current_state_id;
  1078. switch (state) {
  1079. case SCI_PORT_STOPPED:
  1080. return sci_port_clear_phy(iport, iphy);
  1081. case SCI_PORT_SUB_OPERATIONAL:
  1082. status = sci_port_clear_phy(iport, iphy);
  1083. if (status != SCI_SUCCESS)
  1084. return status;
  1085. sci_port_deactivate_phy(iport, iphy, true);
  1086. iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
  1087. port_state_machine_change(iport,
  1088. SCI_PORT_SUB_CONFIGURING);
  1089. return SCI_SUCCESS;
  1090. case SCI_PORT_SUB_CONFIGURING:
  1091. status = sci_port_clear_phy(iport, iphy);
  1092. if (status != SCI_SUCCESS)
  1093. return status;
  1094. sci_port_deactivate_phy(iport, iphy, true);
  1095. /* Re-enter the configuring state since this may be the last phy in
  1096. * the port
  1097. */
  1098. port_state_machine_change(iport,
  1099. SCI_PORT_SUB_CONFIGURING);
  1100. return SCI_SUCCESS;
  1101. default:
  1102. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  1103. __func__, port_state_name(state));
  1104. return SCI_FAILURE_INVALID_STATE;
  1105. }
  1106. }
  1107. enum sci_status sci_port_link_up(struct isci_port *iport,
  1108. struct isci_phy *iphy)
  1109. {
  1110. enum sci_port_states state;
  1111. state = iport->sm.current_state_id;
  1112. switch (state) {
  1113. case SCI_PORT_SUB_WAITING:
  1114. /* Since this is the first phy going link up for the port we
  1115. * can just enable it and continue
  1116. */
  1117. sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
  1118. port_state_machine_change(iport,
  1119. SCI_PORT_SUB_OPERATIONAL);
  1120. return SCI_SUCCESS;
  1121. case SCI_PORT_SUB_OPERATIONAL:
  1122. sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
  1123. return SCI_SUCCESS;
  1124. case SCI_PORT_RESETTING:
  1125. /* TODO We should make sure that the phy that has gone
  1126. * link up is the same one on which we sent the reset. It is
  1127. * possible that the phy on which we sent the reset is not the
  1128. * one that has gone link up and we want to make sure that
  1129. * phy being reset comes back. Consider the case where a
  1130. * reset is sent but before the hardware processes the reset it
  1131. * get a link up on the port because of a hot plug event.
  1132. * because of the reset request this phy will go link down
  1133. * almost immediately.
  1134. */
  1135. /* In the resetting state we don't notify the user regarding
  1136. * link up and link down notifications.
  1137. */
  1138. sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
  1139. return SCI_SUCCESS;
  1140. default:
  1141. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  1142. __func__, port_state_name(state));
  1143. return SCI_FAILURE_INVALID_STATE;
  1144. }
  1145. }
  1146. enum sci_status sci_port_link_down(struct isci_port *iport,
  1147. struct isci_phy *iphy)
  1148. {
  1149. enum sci_port_states state;
  1150. state = iport->sm.current_state_id;
  1151. switch (state) {
  1152. case SCI_PORT_SUB_OPERATIONAL:
  1153. sci_port_deactivate_phy(iport, iphy, true);
  1154. /* If there are no active phys left in the port, then
  1155. * transition the port to the WAITING state until such time
  1156. * as a phy goes link up
  1157. */
  1158. if (iport->active_phy_mask == 0)
  1159. port_state_machine_change(iport,
  1160. SCI_PORT_SUB_WAITING);
  1161. return SCI_SUCCESS;
  1162. case SCI_PORT_RESETTING:
  1163. /* In the resetting state we don't notify the user regarding
  1164. * link up and link down notifications. */
  1165. sci_port_deactivate_phy(iport, iphy, false);
  1166. return SCI_SUCCESS;
  1167. default:
  1168. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  1169. __func__, port_state_name(state));
  1170. return SCI_FAILURE_INVALID_STATE;
  1171. }
  1172. }
  1173. enum sci_status sci_port_start_io(struct isci_port *iport,
  1174. struct isci_remote_device *idev,
  1175. struct isci_request *ireq)
  1176. {
  1177. enum sci_port_states state;
  1178. state = iport->sm.current_state_id;
  1179. switch (state) {
  1180. case SCI_PORT_SUB_WAITING:
  1181. return SCI_FAILURE_INVALID_STATE;
  1182. case SCI_PORT_SUB_OPERATIONAL:
  1183. iport->started_request_count++;
  1184. return SCI_SUCCESS;
  1185. default:
  1186. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  1187. __func__, port_state_name(state));
  1188. return SCI_FAILURE_INVALID_STATE;
  1189. }
  1190. }
  1191. enum sci_status sci_port_complete_io(struct isci_port *iport,
  1192. struct isci_remote_device *idev,
  1193. struct isci_request *ireq)
  1194. {
  1195. enum sci_port_states state;
  1196. state = iport->sm.current_state_id;
  1197. switch (state) {
  1198. case SCI_PORT_STOPPED:
  1199. dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
  1200. __func__, port_state_name(state));
  1201. return SCI_FAILURE_INVALID_STATE;
  1202. case SCI_PORT_STOPPING:
  1203. sci_port_decrement_request_count(iport);
  1204. if (iport->started_request_count == 0)
  1205. port_state_machine_change(iport,
  1206. SCI_PORT_STOPPED);
  1207. break;
  1208. case SCI_PORT_READY:
  1209. case SCI_PORT_RESETTING:
  1210. case SCI_PORT_FAILED:
  1211. case SCI_PORT_SUB_WAITING:
  1212. case SCI_PORT_SUB_OPERATIONAL:
  1213. sci_port_decrement_request_count(iport);
  1214. break;
  1215. case SCI_PORT_SUB_CONFIGURING:
  1216. sci_port_decrement_request_count(iport);
  1217. if (iport->started_request_count == 0) {
  1218. port_state_machine_change(iport,
  1219. SCI_PORT_SUB_OPERATIONAL);
  1220. }
  1221. break;
  1222. }
  1223. return SCI_SUCCESS;
  1224. }
  1225. static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
  1226. {
  1227. u32 pts_control_value;
  1228. /* enable the port task scheduler in a suspended state */
  1229. pts_control_value = readl(&iport->port_task_scheduler_registers->control);
  1230. pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
  1231. writel(pts_control_value, &iport->port_task_scheduler_registers->control);
  1232. }
  1233. static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
  1234. {
  1235. u32 pts_control_value;
  1236. pts_control_value = readl(&iport->port_task_scheduler_registers->control);
  1237. pts_control_value &=
  1238. ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
  1239. writel(pts_control_value, &iport->port_task_scheduler_registers->control);
  1240. }
  1241. static void sci_port_post_dummy_remote_node(struct isci_port *iport)
  1242. {
  1243. struct isci_host *ihost = iport->owning_controller;
  1244. u8 phys_index = iport->physical_port_index;
  1245. union scu_remote_node_context *rnc;
  1246. u16 rni = iport->reserved_rni;
  1247. u32 command;
  1248. rnc = &ihost->remote_node_context_table[rni];
  1249. rnc->ssp.is_valid = true;
  1250. command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
  1251. phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
  1252. sci_controller_post_request(ihost, command);
  1253. /* ensure hardware has seen the post rnc command and give it
  1254. * ample time to act before sending the suspend
  1255. */
  1256. readl(&ihost->smu_registers->interrupt_status); /* flush */
  1257. udelay(10);
  1258. command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
  1259. phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
  1260. sci_controller_post_request(ihost, command);
  1261. }
  1262. static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
  1263. {
  1264. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  1265. if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
  1266. /*
  1267. * If we enter this state becasuse of a request to stop
  1268. * the port then we want to disable the hardwares port
  1269. * task scheduler. */
  1270. sci_port_disable_port_task_scheduler(iport);
  1271. }
  1272. }
  1273. static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
  1274. {
  1275. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  1276. /* Enable and suspend the port task scheduler */
  1277. sci_port_enable_port_task_scheduler(iport);
  1278. }
  1279. static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
  1280. {
  1281. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  1282. struct isci_host *ihost = iport->owning_controller;
  1283. u32 prev_state;
  1284. prev_state = iport->sm.previous_state_id;
  1285. if (prev_state == SCI_PORT_RESETTING)
  1286. isci_port_hard_reset_complete(iport, SCI_SUCCESS);
  1287. else
  1288. dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
  1289. __func__, iport->physical_port_index);
  1290. /* Post and suspend the dummy remote node context for this port. */
  1291. sci_port_post_dummy_remote_node(iport);
  1292. /* Start the ready substate machine */
  1293. port_state_machine_change(iport,
  1294. SCI_PORT_SUB_WAITING);
  1295. }
  1296. static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
  1297. {
  1298. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  1299. sci_del_timer(&iport->timer);
  1300. }
  1301. static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
  1302. {
  1303. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  1304. sci_del_timer(&iport->timer);
  1305. sci_port_destroy_dummy_resources(iport);
  1306. }
  1307. static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
  1308. {
  1309. struct isci_port *iport = container_of(sm, typeof(*iport), sm);
  1310. isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
  1311. }
  1312. void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout)
  1313. {
  1314. int phy_index;
  1315. u32 phy_mask = iport->active_phy_mask;
  1316. if (timeout)
  1317. ++iport->hang_detect_users;
  1318. else if (iport->hang_detect_users > 1)
  1319. --iport->hang_detect_users;
  1320. else
  1321. iport->hang_detect_users = 0;
  1322. if (timeout || (iport->hang_detect_users == 0)) {
  1323. for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
  1324. if ((phy_mask >> phy_index) & 1) {
  1325. writel(timeout,
  1326. &iport->phy_table[phy_index]
  1327. ->link_layer_registers
  1328. ->link_layer_hang_detection_timeout);
  1329. }
  1330. }
  1331. }
  1332. }
  1333. /* --------------------------------------------------------------------------- */
  1334. static const struct sci_base_state sci_port_state_table[] = {
  1335. [SCI_PORT_STOPPED] = {
  1336. .enter_state = sci_port_stopped_state_enter,
  1337. .exit_state = sci_port_stopped_state_exit
  1338. },
  1339. [SCI_PORT_STOPPING] = {
  1340. .exit_state = sci_port_stopping_state_exit
  1341. },
  1342. [SCI_PORT_READY] = {
  1343. .enter_state = sci_port_ready_state_enter,
  1344. },
  1345. [SCI_PORT_SUB_WAITING] = {
  1346. .enter_state = sci_port_ready_substate_waiting_enter,
  1347. .exit_state = scic_sds_port_ready_substate_waiting_exit,
  1348. },
  1349. [SCI_PORT_SUB_OPERATIONAL] = {
  1350. .enter_state = sci_port_ready_substate_operational_enter,
  1351. .exit_state = sci_port_ready_substate_operational_exit
  1352. },
  1353. [SCI_PORT_SUB_CONFIGURING] = {
  1354. .enter_state = sci_port_ready_substate_configuring_enter
  1355. },
  1356. [SCI_PORT_RESETTING] = {
  1357. .exit_state = sci_port_resetting_state_exit
  1358. },
  1359. [SCI_PORT_FAILED] = {
  1360. .enter_state = sci_port_failed_state_enter,
  1361. }
  1362. };
  1363. void sci_port_construct(struct isci_port *iport, u8 index,
  1364. struct isci_host *ihost)
  1365. {
  1366. sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
  1367. iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
  1368. iport->physical_port_index = index;
  1369. iport->active_phy_mask = 0;
  1370. iport->enabled_phy_mask = 0;
  1371. iport->last_active_phy = 0;
  1372. iport->ready_exit = false;
  1373. iport->owning_controller = ihost;
  1374. iport->started_request_count = 0;
  1375. iport->assigned_device_count = 0;
  1376. iport->hang_detect_users = 0;
  1377. iport->reserved_rni = SCU_DUMMY_INDEX;
  1378. iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
  1379. sci_init_timer(&iport->timer, port_timeout);
  1380. iport->port_task_scheduler_registers = NULL;
  1381. for (index = 0; index < SCI_MAX_PHYS; index++)
  1382. iport->phy_table[index] = NULL;
  1383. }
  1384. void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
  1385. {
  1386. struct isci_host *ihost = iport->owning_controller;
  1387. /* notify the user. */
  1388. isci_port_bc_change_received(ihost, iport, iphy);
  1389. }
  1390. static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport)
  1391. {
  1392. wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state));
  1393. }
  1394. int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
  1395. struct isci_phy *iphy)
  1396. {
  1397. unsigned long flags;
  1398. enum sci_status status;
  1399. int ret = TMF_RESP_FUNC_COMPLETE;
  1400. dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
  1401. __func__, iport);
  1402. spin_lock_irqsave(&ihost->scic_lock, flags);
  1403. set_bit(IPORT_RESET_PENDING, &iport->state);
  1404. #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
  1405. status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
  1406. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1407. if (status == SCI_SUCCESS) {
  1408. wait_port_reset(ihost, iport);
  1409. dev_dbg(&ihost->pdev->dev,
  1410. "%s: iport = %p; hard reset completion\n",
  1411. __func__, iport);
  1412. if (iport->hard_reset_status != SCI_SUCCESS) {
  1413. ret = TMF_RESP_FUNC_FAILED;
  1414. dev_err(&ihost->pdev->dev,
  1415. "%s: iport = %p; hard reset failed (0x%x)\n",
  1416. __func__, iport, iport->hard_reset_status);
  1417. }
  1418. } else {
  1419. clear_bit(IPORT_RESET_PENDING, &iport->state);
  1420. wake_up(&ihost->eventq);
  1421. ret = TMF_RESP_FUNC_FAILED;
  1422. dev_err(&ihost->pdev->dev,
  1423. "%s: iport = %p; sci_port_hard_reset call"
  1424. " failed 0x%x\n",
  1425. __func__, iport, status);
  1426. }
  1427. return ret;
  1428. }
  1429. int isci_ata_check_ready(struct domain_device *dev)
  1430. {
  1431. struct isci_port *iport = dev->port->lldd_port;
  1432. struct isci_host *ihost = dev_to_ihost(dev);
  1433. struct isci_remote_device *idev;
  1434. unsigned long flags;
  1435. int rc = 0;
  1436. spin_lock_irqsave(&ihost->scic_lock, flags);
  1437. idev = isci_lookup_device(dev);
  1438. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1439. if (!idev)
  1440. goto out;
  1441. if (test_bit(IPORT_RESET_PENDING, &iport->state))
  1442. goto out;
  1443. rc = !!iport->active_phy_mask;
  1444. out:
  1445. isci_put_device(idev);
  1446. return rc;
  1447. }
  1448. void isci_port_deformed(struct asd_sas_phy *phy)
  1449. {
  1450. struct isci_host *ihost = phy->ha->lldd_ha;
  1451. struct isci_port *iport = phy->port->lldd_port;
  1452. unsigned long flags;
  1453. int i;
  1454. /* we got a port notification on a port that was subsequently
  1455. * torn down and libsas is just now catching up
  1456. */
  1457. if (!iport)
  1458. return;
  1459. spin_lock_irqsave(&ihost->scic_lock, flags);
  1460. for (i = 0; i < SCI_MAX_PHYS; i++) {
  1461. if (iport->active_phy_mask & 1 << i)
  1462. break;
  1463. }
  1464. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1465. if (i >= SCI_MAX_PHYS)
  1466. dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n",
  1467. __func__, (long) (iport - &ihost->ports[0]));
  1468. }
  1469. void isci_port_formed(struct asd_sas_phy *phy)
  1470. {
  1471. struct isci_host *ihost = phy->ha->lldd_ha;
  1472. struct isci_phy *iphy = to_iphy(phy);
  1473. struct asd_sas_port *port = phy->port;
  1474. struct isci_port *iport = NULL;
  1475. unsigned long flags;
  1476. int i;
  1477. /* initial ports are formed as the driver is still initializing,
  1478. * wait for that process to complete
  1479. */
  1480. wait_for_start(ihost);
  1481. spin_lock_irqsave(&ihost->scic_lock, flags);
  1482. for (i = 0; i < SCI_MAX_PORTS; i++) {
  1483. iport = &ihost->ports[i];
  1484. if (iport->active_phy_mask & 1 << iphy->phy_index)
  1485. break;
  1486. }
  1487. spin_unlock_irqrestore(&ihost->scic_lock, flags);
  1488. if (i >= SCI_MAX_PORTS)
  1489. iport = NULL;
  1490. port->lldd_port = iport;
  1491. }