policy_engine.c 125 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/completion.h>
  6. #include <linux/delay.h>
  7. #include <linux/hrtimer.h>
  8. #include <linux/ipc_logging.h>
  9. #include <linux/iio/consumer.h>
  10. #include <linux/kernel.h>
  11. #include <linux/list.h>
  12. #include <linux/module.h>
  13. #include <linux/of.h>
  14. #include <linux/of_platform.h>
  15. #include <linux/power_supply.h>
  16. #include <linux/qti_power_supply.h>
  17. #include <linux/regulator/consumer.h>
  18. #include <linux/slab.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/extcon-provider.h>
  22. #include <linux/usb/typec.h>
  23. #include <linux/usb/usbpd.h>
  24. #include "usbpd.h"
  25. enum usbpd_state {
  26. PE_UNKNOWN,
  27. PE_ERROR_RECOVERY,
  28. PE_SRC_DISABLED,
  29. PE_SRC_STARTUP,
  30. PE_SRC_STARTUP_WAIT_FOR_VDM_RESP,
  31. PE_SRC_SEND_CAPABILITIES,
  32. PE_SRC_SEND_CAPABILITIES_WAIT, /* substate to wait for Request */
  33. PE_SRC_NEGOTIATE_CAPABILITY,
  34. PE_SRC_TRANSITION_SUPPLY,
  35. PE_SRC_READY,
  36. PE_SRC_HARD_RESET,
  37. PE_SRC_SOFT_RESET,
  38. PE_SRC_DISCOVERY,
  39. PE_SRC_TRANSITION_TO_DEFAULT,
  40. PE_SNK_STARTUP,
  41. PE_SNK_DISCOVERY,
  42. PE_SNK_WAIT_FOR_CAPABILITIES,
  43. PE_SNK_EVALUATE_CAPABILITY,
  44. PE_SNK_SELECT_CAPABILITY,
  45. PE_SNK_TRANSITION_SINK,
  46. PE_SNK_READY,
  47. PE_SNK_HARD_RESET,
  48. PE_SNK_SOFT_RESET,
  49. PE_SNK_TRANSITION_TO_DEFAULT,
  50. PE_DRS_SEND_DR_SWAP,
  51. PE_PRS_SNK_SRC_SEND_SWAP,
  52. PE_PRS_SNK_SRC_TRANSITION_TO_OFF,
  53. PE_PRS_SNK_SRC_SOURCE_ON,
  54. PE_PRS_SRC_SNK_SEND_SWAP,
  55. PE_PRS_SRC_SNK_TRANSITION_TO_OFF,
  56. PE_PRS_SRC_SNK_WAIT_SOURCE_ON,
  57. PE_SEND_SOFT_RESET,
  58. PE_VCS_WAIT_FOR_VCONN,
  59. PE_MAX_STATES,
  60. };
  61. static const char * const usbpd_state_strings[] = {
  62. "UNKNOWN",
  63. "ERROR_RECOVERY",
  64. "SRC_Disabled",
  65. "SRC_Startup",
  66. "SRC_Startup_Wait_for_VDM_Resp",
  67. "SRC_Send_Capabilities",
  68. "SRC_Send_Capabilities (Wait for Request)",
  69. "SRC_Negotiate_Capability",
  70. "SRC_Transition_Supply",
  71. "SRC_Ready",
  72. "SRC_Hard_Reset",
  73. "SRC_Soft_Reset",
  74. "SRC_Discovery",
  75. "SRC_Transition_to_default",
  76. "SNK_Startup",
  77. "SNK_Discovery",
  78. "SNK_Wait_for_Capabilities",
  79. "SNK_Evaluate_Capability",
  80. "SNK_Select_Capability",
  81. "SNK_Transition_Sink",
  82. "SNK_Ready",
  83. "SNK_Hard_Reset",
  84. "SNK_Soft_Reset",
  85. "SNK_Transition_to_default",
  86. "DRS_Send_DR_Swap",
  87. "PRS_SNK_SRC_Send_Swap",
  88. "PRS_SNK_SRC_Transition_to_off",
  89. "PRS_SNK_SRC_Source_on",
  90. "PRS_SRC_SNK_Send_Swap",
  91. "PRS_SRC_SNK_Transition_to_off",
  92. "PRS_SRC_SNK_Wait_Source_on",
  93. "Send_Soft_Reset",
  94. "VCS_Wait_for_VCONN",
  95. };
  96. enum usbpd_control_msg_type {
  97. MSG_RESERVED = 0,
  98. MSG_GOODCRC,
  99. MSG_GOTOMIN,
  100. MSG_ACCEPT,
  101. MSG_REJECT,
  102. MSG_PING,
  103. MSG_PS_RDY,
  104. MSG_GET_SOURCE_CAP,
  105. MSG_GET_SINK_CAP,
  106. MSG_DR_SWAP,
  107. MSG_PR_SWAP,
  108. MSG_VCONN_SWAP,
  109. MSG_WAIT,
  110. MSG_SOFT_RESET,
  111. MSG_NOT_SUPPORTED = 0x10,
  112. MSG_GET_SOURCE_CAP_EXTENDED,
  113. MSG_GET_STATUS,
  114. MSG_FR_SWAP,
  115. MSG_GET_PPS_STATUS,
  116. MSG_GET_COUNTRY_CODES,
  117. };
  118. static const char * const usbpd_control_msg_strings[] = {
  119. "", "GoodCRC", "GotoMin", "Accept", "Reject", "Ping", "PS_RDY",
  120. "Get_Source_Cap", "Get_Sink_Cap", "DR_Swap", "PR_Swap", "VCONN_Swap",
  121. "Wait", "Soft_Reset", "", "", "Not_Supported",
  122. "Get_Source_Cap_Extended", "Get_Status", "FR_Swap", "Get_PPS_Status",
  123. "Get_Country_Codes",
  124. };
  125. enum usbpd_data_msg_type {
  126. MSG_SOURCE_CAPABILITIES = 1,
  127. MSG_REQUEST,
  128. MSG_BIST,
  129. MSG_SINK_CAPABILITIES,
  130. MSG_BATTERY_STATUS,
  131. MSG_ALERT,
  132. MSG_GET_COUNTRY_INFO,
  133. MSG_VDM = 0xF,
  134. };
  135. static const char * const usbpd_data_msg_strings[] = {
  136. "", "Source_Capabilities", "Request", "BIST", "Sink_Capabilities",
  137. "Battery_Status", "Alert", "Get_Country_Info", "", "", "", "", "", "",
  138. "", "Vendor_Defined",
  139. };
  140. enum usbpd_ext_msg_type {
  141. MSG_SOURCE_CAPABILITIES_EXTENDED = 1,
  142. MSG_STATUS,
  143. MSG_GET_BATTERY_CAP,
  144. MSG_GET_BATTERY_STATUS,
  145. MSG_BATTERY_CAPABILITIES,
  146. MSG_GET_MANUFACTURER_INFO,
  147. MSG_MANUFACTURER_INFO,
  148. MSG_SECURITY_REQUEST,
  149. MSG_SECURITY_RESPONSE,
  150. MSG_FIRMWARE_UPDATE_REQUEST,
  151. MSG_FIRMWARE_UPDATE_RESPONSE,
  152. MSG_PPS_STATUS,
  153. MSG_COUNTRY_INFO,
  154. MSG_COUNTRY_CODES,
  155. };
  156. static const char * const usbpd_ext_msg_strings[] = {
  157. "", "Source_Capabilities_Extended", "Status", "Get_Battery_Cap",
  158. "Get_Battery_Status", "Get_Manufacturer_Info", "Manufacturer_Info",
  159. "Security_Request", "Security_Response", "Firmware_Update_Request",
  160. "Firmware_Update_Response", "PPS_Status", "Country_Info",
  161. "Country_Codes",
  162. };
  163. enum iio_psy_property {
  164. POWER_SUPPLY_PROP_PD_ACTIVE = 0,
  165. POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION,
  166. POWER_SUPPLY_PROP_CONNECTOR_TYPE,
  167. POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
  168. POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED,
  169. POWER_SUPPLY_PROP_TYPEC_SRC_RP,
  170. POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
  171. POWER_SUPPLY_PROP_PD_CURRENT_MAX,
  172. POWER_SUPPLY_PROP_PR_SWAP,
  173. POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
  174. POWER_SUPPLY_PROP_PD_VOLTAGE_MAX,
  175. POWER_SUPPLY_PROP_REAL_TYPE,
  176. POWER_SUPPLY_PROP_TYPEC_MODE,
  177. POWER_SUPPLY_PROP_PE_START,
  178. POWER_SUPPLY_IIO_PROP_MAX,
  179. };
  180. static const char * const usbpd_iio_channel_map[] = {
  181. "pd_active", "typec_cc_orientation", "connector_type",
  182. "typec_power_role", "pd_usb_suspend_supported", "typec_src_rp",
  183. "pd_in_hard_reset", "pr_current_max", "pr_swap", "pd_voltage_min",
  184. "pd_voltage_max", "real_type", "typec_mode", "pe_start",
  185. };
  186. static inline const char *msg_to_string(u8 id, bool is_data, bool is_ext)
  187. {
  188. if (is_ext) {
  189. if (id < ARRAY_SIZE(usbpd_ext_msg_strings))
  190. return usbpd_ext_msg_strings[id];
  191. } else if (is_data) {
  192. if (id < ARRAY_SIZE(usbpd_data_msg_strings))
  193. return usbpd_data_msg_strings[id];
  194. } else if (id < ARRAY_SIZE(usbpd_control_msg_strings)) {
  195. return usbpd_control_msg_strings[id];
  196. }
  197. return "Invalid";
  198. }
  199. enum vdm_state {
  200. VDM_NONE,
  201. DISCOVERED_ID,
  202. DISCOVERED_SVIDS,
  203. DISCOVERED_MODES,
  204. MODE_ENTERED,
  205. MODE_EXITED,
  206. };
  207. static void *usbpd_ipc_log;
  208. #define usbpd_dbg(dev, fmt, ...) do { \
  209. ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
  210. ##__VA_ARGS__); \
  211. dev_dbg(dev, fmt, ##__VA_ARGS__); \
  212. } while (0)
  213. #define usbpd_info(dev, fmt, ...) do { \
  214. ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
  215. ##__VA_ARGS__); \
  216. dev_info(dev, fmt, ##__VA_ARGS__); \
  217. } while (0)
  218. #define usbpd_warn(dev, fmt, ...) do { \
  219. ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
  220. ##__VA_ARGS__); \
  221. dev_warn(dev, fmt, ##__VA_ARGS__); \
  222. } while (0)
  223. #define usbpd_err(dev, fmt, ...) do { \
  224. ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
  225. ##__VA_ARGS__); \
  226. dev_err(dev, fmt, ##__VA_ARGS__); \
  227. } while (0)
  228. #define NUM_LOG_PAGES 10
  229. /* Timeouts (in ms) */
  230. #define ERROR_RECOVERY_TIME 25
  231. #define SENDER_RESPONSE_TIME 26
  232. #define SINK_WAIT_CAP_TIME 500
  233. #define PS_TRANSITION_TIME 450
  234. #define SRC_CAP_TIME 120
  235. #define SRC_TRANSITION_TIME 25
  236. #define SRC_RECOVER_TIME 750
  237. #define PS_HARD_RESET_TIME 25
  238. #define PS_SOURCE_ON 400
  239. #define PS_SOURCE_OFF 750
  240. #define FIRST_SOURCE_CAP_TIME 100
  241. #define VDM_BUSY_TIME 50
  242. #define VCONN_ON_TIME 100
  243. #define SINK_TX_TIME 16
  244. /* tPSHardReset + tSafe0V */
  245. #define SNK_HARD_RESET_VBUS_OFF_TIME (35 + 650)
  246. /* tSrcRecover + tSrcTurnOn */
  247. #define SNK_HARD_RESET_VBUS_ON_TIME (1000 + 275)
  248. #define PD_CAPS_COUNT 50
  249. #define PD_MAX_MSG_ID 7
  250. #define PD_MAX_DATA_OBJ 7
  251. #define PD_SRC_CAP_EXT_DB_LEN 24
  252. #define PD_STATUS_DB_LEN 6
  253. #define PD_BATTERY_CAP_DB_LEN 9
  254. #define PD_MAX_EXT_MSG_LEN 260
  255. #define PD_MAX_EXT_MSG_LEGACY_LEN 26
  256. #define PD_MSG_HDR(type, dr, pr, id, cnt, rev) \
  257. (((type) & 0x1F) | ((dr) << 5) | (rev << 6) | \
  258. ((pr) << 8) | ((id) << 9) | ((cnt) << 12))
  259. #define PD_MSG_HDR_COUNT(hdr) (((hdr) >> 12) & 7)
  260. #define PD_MSG_HDR_TYPE(hdr) ((hdr) & 0x1F)
  261. #define PD_MSG_HDR_ID(hdr) (((hdr) >> 9) & 7)
  262. #define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
  263. #define PD_MSG_HDR_EXTENDED BIT(15)
  264. #define PD_MSG_HDR_IS_EXTENDED(hdr) ((hdr) & PD_MSG_HDR_EXTENDED)
  265. #define PD_MSG_EXT_HDR(chunked, num, req, size) \
  266. (((chunked) << 15) | (((num) & 0xF) << 11) | \
  267. ((req) << 10) | ((size) & 0x1FF))
  268. #define PD_MSG_EXT_HDR_IS_CHUNKED(ehdr) ((ehdr) & 0x8000)
  269. #define PD_MSG_EXT_HDR_CHUNK_NUM(ehdr) (((ehdr) >> 11) & 0xF)
  270. #define PD_MSG_EXT_HDR_REQ_CHUNK(ehdr) ((ehdr) & 0x400)
  271. #define PD_MSG_EXT_HDR_DATA_SIZE(ehdr) ((ehdr) & 0x1FF)
  272. #define PD_RDO_FIXED(obj, gb, mismatch, usb_comm, no_usb_susp, curr1, curr2) \
  273. (((obj) << 28) | ((gb) << 27) | ((mismatch) << 26) | \
  274. ((usb_comm) << 25) | ((no_usb_susp) << 24) | \
  275. ((curr1) << 10) | (curr2))
  276. #define PD_RDO_AUGMENTED(obj, mismatch, usb_comm, no_usb_susp, volt, curr) \
  277. (((obj) << 28) | ((mismatch) << 26) | ((usb_comm) << 25) | \
  278. ((no_usb_susp) << 24) | ((volt) << 9) | (curr))
  279. #define PD_RDO_OBJ_POS(rdo) ((rdo) >> 28 & 7)
  280. #define PD_RDO_GIVEBACK(rdo) ((rdo) >> 27 & 1)
  281. #define PD_RDO_MISMATCH(rdo) ((rdo) >> 26 & 1)
  282. #define PD_RDO_USB_COMM(rdo) ((rdo) >> 25 & 1)
  283. #define PD_RDO_NO_USB_SUSP(rdo) ((rdo) >> 24 & 1)
  284. #define PD_RDO_FIXED_CURR(rdo) ((rdo) >> 10 & 0x3FF)
  285. #define PD_RDO_FIXED_CURR_MINMAX(rdo) ((rdo) & 0x3FF)
  286. #define PD_RDO_PROG_VOLTAGE(rdo) ((rdo) >> 9 & 0x7FF)
  287. #define PD_RDO_PROG_CURR(rdo) ((rdo) & 0x7F)
  288. #define PD_SRC_PDO_TYPE(pdo) (((pdo) >> 30) & 3)
  289. #define PD_SRC_PDO_TYPE_FIXED 0
  290. #define PD_SRC_PDO_TYPE_BATTERY 1
  291. #define PD_SRC_PDO_TYPE_VARIABLE 2
  292. #define PD_SRC_PDO_TYPE_AUGMENTED 3
  293. #define PD_SRC_PDO_FIXED_PR_SWAP(pdo) (((pdo) >> 29) & 1)
  294. #define PD_SRC_PDO_FIXED_USB_SUSP(pdo) (((pdo) >> 28) & 1)
  295. #define PD_SRC_PDO_FIXED_EXT_POWERED(pdo) (((pdo) >> 27) & 1)
  296. #define PD_SRC_PDO_FIXED_USB_COMM(pdo) (((pdo) >> 26) & 1)
  297. #define PD_SRC_PDO_FIXED_DR_SWAP(pdo) (((pdo) >> 25) & 1)
  298. #define PD_SRC_PDO_FIXED_PEAK_CURR(pdo) (((pdo) >> 20) & 3)
  299. #define PD_SRC_PDO_FIXED_VOLTAGE(pdo) (((pdo) >> 10) & 0x3FF)
  300. #define PD_SRC_PDO_FIXED_MAX_CURR(pdo) ((pdo) & 0x3FF)
  301. #define PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo) (((pdo) >> 20) & 0x3FF)
  302. #define PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo) (((pdo) >> 10) & 0x3FF)
  303. #define PD_SRC_PDO_VAR_BATT_MAX(pdo) ((pdo) & 0x3FF)
  304. #define PD_APDO_PPS(pdo) (((pdo) >> 28) & 3)
  305. #define PD_APDO_MAX_VOLT(pdo) (((pdo) >> 17) & 0xFF)
  306. #define PD_APDO_MIN_VOLT(pdo) (((pdo) >> 8) & 0xFF)
  307. #define PD_APDO_MAX_CURR(pdo) ((pdo) & 0x7F)
  308. /* Vendor Defined Messages */
  309. #define MAX_CRC_RECEIVE_TIME 9 /* ~(2 * tReceive_max(1.1ms) * # retry 4) */
  310. #define MAX_VDM_RESPONSE_TIME 60 /* 2 * tVDMSenderResponse_max(30ms) */
  311. #define MAX_VDM_BUSY_TIME 100 /* 2 * tVDMBusy (50ms) */
  312. #define PD_SNK_PDO_FIXED(prs, hc, uc, usb_comm, drs, volt, curr) \
  313. (((prs) << 29) | ((hc) << 28) | ((uc) << 27) | ((usb_comm) << 26) | \
  314. ((drs) << 25) | ((volt) << 10) | (curr))
  315. /* VDM header is the first 32-bit object following the 16-bit PD header */
  316. #define VDM_HDR_SVID(hdr) ((hdr) >> 16)
  317. #define VDM_IS_SVDM(hdr) ((hdr) & 0x8000)
  318. #define SVDM_HDR_VER(hdr) (((hdr) >> 13) & 0x3)
  319. #define SVDM_HDR_OBJ_POS(hdr) (((hdr) >> 8) & 0x7)
  320. #define SVDM_HDR_CMD_TYPE(hdr) (((hdr) >> 6) & 0x3)
  321. #define SVDM_HDR_CMD(hdr) ((hdr) & 0x1f)
  322. #define SVDM_HDR(svid, ver, obj, cmd_type, cmd) \
  323. (((svid) << 16) | (1 << 15) | ((ver) << 13) \
  324. | ((obj) << 8) | ((cmd_type) << 6) | (cmd))
  325. /* discover id response vdo bit fields */
  326. #define ID_HDR_USB_HOST BIT(31)
  327. #define ID_HDR_USB_DEVICE BIT(30)
  328. #define ID_HDR_MODAL_OPR BIT(26)
  329. #define ID_HDR_PRODUCT_TYPE(n) (((n) >> 27) & 0x7)
  330. #define ID_HDR_PRODUCT_PER_MASK (2 << 27)
  331. #define ID_HDR_PRODUCT_HUB 1
  332. #define ID_HDR_PRODUCT_PER 2
  333. #define ID_HDR_PRODUCT_AMA 5
  334. #define ID_HDR_PRODUCT_VPD 6
  335. #define PD_MIN_SINK_CURRENT 900
  336. static const u32 default_src_caps[] = { 0x36019096 }; /* VSafe5V @ 1.5A */
  337. static const u32 default_snk_caps[] = { 0x2601912C }; /* VSafe5V @ 3A */
  338. struct vdm_tx {
  339. u32 data[PD_MAX_DATA_OBJ];
  340. int size;
  341. };
  342. struct rx_msg {
  343. u16 hdr;
  344. u16 data_len; /* size of payload in bytes */
  345. struct list_head entry;
  346. u8 payload[];
  347. };
  348. #define IS_DATA(m, t) ((m) && !PD_MSG_HDR_IS_EXTENDED((m)->hdr) && \
  349. PD_MSG_HDR_COUNT((m)->hdr) && \
  350. (PD_MSG_HDR_TYPE((m)->hdr) == (t)))
  351. #define IS_CTRL(m, t) ((m) && !PD_MSG_HDR_COUNT((m)->hdr) && \
  352. (PD_MSG_HDR_TYPE((m)->hdr) == (t)))
  353. #define IS_EXT(m, t) ((m) && PD_MSG_HDR_IS_EXTENDED((m)->hdr) && \
  354. (PD_MSG_HDR_TYPE((m)->hdr) == (t)))
  355. struct usbpd {
  356. struct device dev;
  357. struct workqueue_struct *wq;
  358. struct work_struct sm_work;
  359. struct work_struct start_periph_work;
  360. struct work_struct psy_chg_work;
  361. struct hrtimer timer;
  362. bool sm_queued;
  363. struct extcon_dev *extcon;
  364. enum usbpd_state current_state;
  365. bool hard_reset_recvd;
  366. ktime_t hard_reset_recvd_time;
  367. struct list_head rx_q;
  368. spinlock_t rx_lock;
  369. struct rx_msg *rx_ext_msg;
  370. u32 received_pdos[PD_MAX_DATA_OBJ];
  371. u16 src_cap_id;
  372. u8 selected_pdo;
  373. u8 requested_pdo;
  374. u32 rdo; /* can be either source or sink */
  375. int current_voltage; /* uV */
  376. int requested_voltage; /* uV */
  377. int requested_current; /* mA */
  378. bool pd_connected;
  379. bool in_explicit_contract;
  380. bool peer_usb_comm;
  381. bool peer_pr_swap;
  382. bool peer_dr_swap;
  383. u32 sink_caps[7];
  384. int num_sink_caps;
  385. struct power_supply *usb_psy;
  386. struct power_supply *bat_psy;
  387. struct notifier_block psy_nb;
  388. struct iio_channel *iio_channels[POWER_SUPPLY_IIO_PROP_MAX];
  389. int bat_charge_full;
  390. int bat_voltage_max;
  391. enum power_supply_typec_mode typec_mode;
  392. enum power_supply_typec_power_role forced_pr;
  393. bool vbus_present;
  394. enum pd_spec_rev spec_rev;
  395. enum data_role current_dr;
  396. enum power_role current_pr;
  397. bool in_pr_swap;
  398. bool pd_phy_opened;
  399. bool send_request;
  400. struct completion is_ready;
  401. struct completion tx_chunk_request;
  402. u8 next_tx_chunk;
  403. struct typec_capability typec_caps;
  404. struct typec_port *typec_port;
  405. struct typec_partner *partner;
  406. struct typec_partner_desc partner_desc;
  407. struct usb_pd_identity partner_identity;
  408. struct mutex swap_lock;
  409. bool send_pr_swap;
  410. bool send_dr_swap;
  411. struct regulator *vbus;
  412. struct regulator *vconn;
  413. bool vbus_enabled;
  414. bool vconn_enabled;
  415. u8 tx_msgid[SOPII_MSG + 1];
  416. u8 rx_msgid[SOPII_MSG + 1];
  417. int caps_count;
  418. int hard_reset_count;
  419. enum vdm_state vdm_state;
  420. u16 *discovered_svids;
  421. int num_svids;
  422. struct vdm_tx *vdm_tx;
  423. struct vdm_tx *vdm_tx_retry;
  424. struct mutex svid_handler_lock;
  425. struct list_head svid_handlers;
  426. ktime_t svdm_start_time;
  427. bool vdm_in_suspend;
  428. struct list_head instance;
  429. bool has_dp;
  430. u16 ss_lane_svid;
  431. /* ext msg support */
  432. bool send_get_src_cap_ext;
  433. u8 src_cap_ext_db[PD_SRC_CAP_EXT_DB_LEN];
  434. bool send_get_pps_status;
  435. u32 pps_status_db;
  436. bool send_get_status;
  437. u8 status_db[PD_STATUS_DB_LEN];
  438. bool send_get_battery_cap;
  439. u8 get_battery_cap_db;
  440. u8 battery_cap_db[PD_BATTERY_CAP_DB_LEN];
  441. u8 get_battery_status_db;
  442. bool send_get_battery_status;
  443. u32 battery_sts_dobj;
  444. struct pd_phy_ops *pdphy_ops;
  445. };
  446. static LIST_HEAD(_usbpd); /* useful for debugging */
  447. static const unsigned int usbpd_extcon_cable[] = {
  448. EXTCON_USB,
  449. EXTCON_USB_HOST,
  450. EXTCON_DISP_DP,
  451. EXTCON_NONE,
  452. };
  453. struct usbpd_state_handler {
  454. void (*enter_state)(struct usbpd *pd);
  455. void (*handle_state)(struct usbpd *pd, struct rx_msg *msg);
  456. };
  457. static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type);
  458. static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state);
  459. static void handle_state_snk_wait_for_capabilities(struct usbpd *pd,
  460. struct rx_msg *rx_msg);
  461. static void handle_state_prs_snk_src_source_on(struct usbpd *pd,
  462. struct rx_msg *rx_msg);
  463. static const struct usbpd_state_handler state_handlers[];
  464. int usbpd_get_psy_iio_property(struct usbpd *pd,
  465. enum iio_psy_property prop, union power_supply_propval *val)
  466. {
  467. int ret, value;
  468. ret = iio_read_channel_processed(pd->iio_channels[prop], &value);
  469. if (ret < 0) {
  470. usbpd_err(&pd->dev, "failed to get IIO property: %d\n", prop);
  471. return ret;
  472. }
  473. val->intval = value;
  474. return 0;
  475. }
  476. int usbpd_set_psy_iio_property(struct usbpd *pd,
  477. enum iio_psy_property prop, union power_supply_propval *val)
  478. {
  479. int ret;
  480. ret = iio_write_channel_raw(pd->iio_channels[prop], val->intval);
  481. if (ret < 0) {
  482. usbpd_err(&pd->dev, "failed to set IIO property: %d\n", prop);
  483. return ret;
  484. }
  485. return 0;
  486. }
  487. enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd)
  488. {
  489. int ret;
  490. union power_supply_propval val;
  491. ret = usbpd_get_psy_iio_property(pd,
  492. POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION, &val);
  493. if (ret)
  494. return ORIENTATION_NONE;
  495. return val.intval;
  496. }
  497. EXPORT_SYMBOL(usbpd_get_plug_orientation);
  498. static unsigned int get_connector_type(struct usbpd *pd)
  499. {
  500. int ret;
  501. union power_supply_propval val;
  502. ret = usbpd_get_psy_iio_property(pd,
  503. POWER_SUPPLY_PROP_CONNECTOR_TYPE, &val);
  504. if (ret) {
  505. dev_err(&pd->dev, "Unable to read CONNECTOR TYPE: %d\n", ret);
  506. return ret;
  507. }
  508. return val.intval;
  509. }
  510. static inline void stop_usb_host(struct usbpd *pd)
  511. {
  512. extcon_set_state_sync(pd->extcon, EXTCON_USB_HOST, 0);
  513. }
  514. static inline void start_usb_host(struct usbpd *pd, bool ss)
  515. {
  516. enum plug_orientation cc = usbpd_get_plug_orientation(pd);
  517. union extcon_property_value val;
  518. val.intval = (cc == ORIENTATION_CC2);
  519. extcon_set_property(pd->extcon, EXTCON_USB_HOST,
  520. EXTCON_PROP_USB_TYPEC_POLARITY, val);
  521. val.intval = ss;
  522. extcon_set_property(pd->extcon, EXTCON_USB_HOST,
  523. EXTCON_PROP_USB_SS, val);
  524. extcon_set_state_sync(pd->extcon, EXTCON_USB_HOST, 1);
  525. }
  526. static inline void stop_usb_peripheral(struct usbpd *pd)
  527. {
  528. extcon_set_state_sync(pd->extcon, EXTCON_USB, 0);
  529. }
  530. static inline void start_usb_peripheral(struct usbpd *pd)
  531. {
  532. enum plug_orientation cc = usbpd_get_plug_orientation(pd);
  533. union extcon_property_value val;
  534. val.intval = (cc == ORIENTATION_CC2);
  535. extcon_set_property(pd->extcon, EXTCON_USB,
  536. EXTCON_PROP_USB_TYPEC_POLARITY, val);
  537. val.intval = 1;
  538. extcon_set_property(pd->extcon, EXTCON_USB, EXTCON_PROP_USB_SS, val);
  539. extcon_set_state_sync(pd->extcon, EXTCON_USB, 1);
  540. }
  541. static void start_usb_peripheral_work(struct work_struct *w)
  542. {
  543. struct usbpd *pd = container_of(w, struct usbpd, start_periph_work);
  544. pd->current_state = PE_SNK_STARTUP;
  545. pd->current_pr = PR_SINK;
  546. pd->current_dr = DR_UFP;
  547. start_usb_peripheral(pd);
  548. typec_set_data_role(pd->typec_port, TYPEC_DEVICE);
  549. typec_set_pwr_role(pd->typec_port, TYPEC_SINK);
  550. typec_set_pwr_opmode(pd->typec_port,
  551. pd->typec_mode - QTI_POWER_SUPPLY_TYPEC_SOURCE_DEFAULT);
  552. if (!pd->partner) {
  553. memset(&pd->partner_identity, 0, sizeof(pd->partner_identity));
  554. pd->partner_desc.usb_pd = false;
  555. pd->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
  556. pd->partner = typec_register_partner(pd->typec_port,
  557. &pd->partner_desc);
  558. }
  559. }
  560. static void start_usb_dp(struct usbpd *pd, bool ss)
  561. {
  562. enum plug_orientation cc = usbpd_get_plug_orientation(pd);
  563. union extcon_property_value val;
  564. /* set state to enable to allow client can get polarity */
  565. extcon_set_state(pd->extcon, EXTCON_USB_HOST, 1);
  566. val.intval = (cc == ORIENTATION_CC2);
  567. extcon_set_property(pd->extcon, EXTCON_USB_HOST, EXTCON_PROP_USB_TYPEC_POLARITY, val);
  568. val.intval = ss ? 1 : 0;
  569. extcon_set_property(pd->extcon, EXTCON_USB_HOST, EXTCON_PROP_USB_SS, val);
  570. extcon_set_state(pd->extcon, EXTCON_DISP_DP, false);
  571. extcon_set_state_sync(pd->extcon, EXTCON_DISP_DP, true);
  572. }
  573. static void stop_usb_dp(struct usbpd *pd)
  574. {
  575. extcon_set_state_sync(pd->extcon, EXTCON_DISP_DP, false);
  576. }
  577. /**
  578. * This API allows client driver to request for releasing SS lanes. It should
  579. * not be called from atomic context.
  580. *
  581. * @pd - USBPD handler
  582. * @hdlr - client's handler
  583. *
  584. * @returns int - Success - 0, else negative error code
  585. */
  586. static int usbpd_release_ss_lane(struct usbpd *pd,
  587. struct usbpd_svid_handler *hdlr)
  588. {
  589. int ret = 0;
  590. if (!hdlr || !pd)
  591. return -EINVAL;
  592. usbpd_dbg(&pd->dev, "hdlr:%pK svid:%d", hdlr, hdlr->svid);
  593. /*
  594. * If USB SS lanes are already used by one client, and other client is
  595. * requesting for same or same client requesting again, return -EBUSY.
  596. */
  597. if (pd->ss_lane_svid) {
  598. usbpd_dbg(&pd->dev, "-EBUSY: ss_lanes are already used by(%d)",
  599. pd->ss_lane_svid);
  600. ret = -EBUSY;
  601. goto err_exit;
  602. }
  603. pd->ss_lane_svid = hdlr->svid;
  604. start_usb_dp(pd, false);
  605. err_exit:
  606. return ret;
  607. }
  608. static int set_power_role(struct usbpd *pd, enum power_role pr)
  609. {
  610. union power_supply_propval val = {0};
  611. switch (pr) {
  612. case PR_NONE:
  613. val.intval = QTI_POWER_SUPPLY_TYPEC_PR_NONE;
  614. break;
  615. case PR_SINK:
  616. val.intval = QTI_POWER_SUPPLY_TYPEC_PR_SINK;
  617. break;
  618. case PR_SRC:
  619. val.intval = QTI_POWER_SUPPLY_TYPEC_PR_SOURCE;
  620. break;
  621. }
  622. return usbpd_set_psy_iio_property(pd,
  623. POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
  624. }
  625. static struct usbpd_svid_handler *_find_svid_handler(struct usbpd *pd, u16 svid)
  626. {
  627. struct usbpd_svid_handler *handler;
  628. list_for_each_entry(handler, &pd->svid_handlers, entry) {
  629. if (svid == handler->svid)
  630. return handler;
  631. }
  632. return NULL;
  633. }
  634. static struct usbpd_svid_handler *find_svid_handler(struct usbpd *pd, u16 svid)
  635. {
  636. struct usbpd_svid_handler *handler;
  637. /* in_interrupt() == true when handling VDM RX during suspend */
  638. if (!in_interrupt()) {
  639. mutex_lock(&pd->svid_handler_lock);
  640. handler = _find_svid_handler(pd, svid);
  641. mutex_unlock(&pd->svid_handler_lock);
  642. } else {
  643. handler = _find_svid_handler(pd, svid);
  644. }
  645. return handler;
  646. }
  647. /* Reset protocol layer */
  648. static inline void pd_reset_protocol(struct usbpd *pd)
  649. {
  650. /*
  651. * first Rx ID should be 0; set this to a sentinel of -1 so that in
  652. * phy_msg_received() we can check if we had seen it before.
  653. */
  654. memset(pd->rx_msgid, -1, sizeof(pd->rx_msgid));
  655. memset(pd->tx_msgid, 0, sizeof(pd->tx_msgid));
  656. pd->send_request = false;
  657. pd->send_get_status = false;
  658. pd->send_pr_swap = false;
  659. pd->send_dr_swap = false;
  660. }
  661. static int pd_send_msg(struct usbpd *pd, u8 msg_type, const u32 *data,
  662. size_t num_data, enum pd_sop_type sop)
  663. {
  664. unsigned long flags;
  665. int ret;
  666. u16 hdr;
  667. if (pd->hard_reset_recvd)
  668. return -EBUSY;
  669. if (sop == SOP_MSG)
  670. hdr = PD_MSG_HDR(msg_type, pd->current_dr, pd->current_pr,
  671. pd->tx_msgid[sop], num_data, pd->spec_rev);
  672. else
  673. /* sending SOP'/SOP'' to a cable, PR/DR fields should be 0 */
  674. hdr = PD_MSG_HDR(msg_type, 0, 0, pd->tx_msgid[sop], num_data,
  675. pd->spec_rev);
  676. /* bail out and try again later if a message just arrived */
  677. spin_lock_irqsave(&pd->rx_lock, flags);
  678. if (!list_empty(&pd->rx_q)) {
  679. spin_unlock_irqrestore(&pd->rx_lock, flags);
  680. usbpd_dbg(&pd->dev, "Abort send due to pending RX\n");
  681. return -EBUSY;
  682. }
  683. spin_unlock_irqrestore(&pd->rx_lock, flags);
  684. ret = pd->pdphy_ops->write(hdr, (u8 *)data,
  685. num_data * sizeof(u32), sop);
  686. if (ret) {
  687. if (pd->pd_connected)
  688. usbpd_err(&pd->dev, "Error sending %s: %d\n",
  689. msg_to_string(msg_type, num_data,
  690. false),
  691. ret);
  692. return ret;
  693. }
  694. pd->tx_msgid[sop] = (pd->tx_msgid[sop] + 1) & PD_MAX_MSG_ID;
  695. return 0;
  696. }
  697. static int pd_send_ext_msg(struct usbpd *pd, u8 msg_type,
  698. const u8 *data, size_t data_len, enum pd_sop_type sop)
  699. {
  700. int ret;
  701. size_t len_remain, chunk_len;
  702. u8 chunked_payload[PD_MAX_DATA_OBJ * sizeof(u32)] = {0};
  703. u16 hdr;
  704. u16 ext_hdr;
  705. u8 num_objs;
  706. if (data_len > PD_MAX_EXT_MSG_LEN) {
  707. usbpd_warn(&pd->dev, "Extended message length exceeds max, truncating...\n");
  708. data_len = PD_MAX_EXT_MSG_LEN;
  709. }
  710. pd->next_tx_chunk = 0;
  711. len_remain = data_len;
  712. do {
  713. ext_hdr = PD_MSG_EXT_HDR(1, pd->next_tx_chunk++, 0, data_len);
  714. memcpy(chunked_payload, &ext_hdr, sizeof(ext_hdr));
  715. chunk_len = min_t(size_t, len_remain,
  716. PD_MAX_EXT_MSG_LEGACY_LEN);
  717. memcpy(chunked_payload + sizeof(ext_hdr), data, chunk_len);
  718. num_objs = DIV_ROUND_UP(chunk_len + sizeof(u16), sizeof(u32));
  719. len_remain -= chunk_len;
  720. reinit_completion(&pd->tx_chunk_request);
  721. hdr = PD_MSG_HDR(msg_type, pd->current_dr, pd->current_pr,
  722. pd->tx_msgid[sop], num_objs, pd->spec_rev) |
  723. PD_MSG_HDR_EXTENDED;
  724. ret = pd->pdphy_ops->write(hdr, chunked_payload,
  725. num_objs * sizeof(u32), sop);
  726. if (ret) {
  727. usbpd_err(&pd->dev, "Error sending %s: %d\n",
  728. usbpd_ext_msg_strings[msg_type],
  729. ret);
  730. return ret;
  731. }
  732. pd->tx_msgid[sop] = (pd->tx_msgid[sop] + 1) & PD_MAX_MSG_ID;
  733. /* Wait for request chunk */
  734. if (len_remain &&
  735. !wait_for_completion_timeout(&pd->tx_chunk_request,
  736. msecs_to_jiffies(SENDER_RESPONSE_TIME))) {
  737. usbpd_err(&pd->dev, "Timed out waiting for chunk request\n");
  738. return -EPROTO;
  739. }
  740. } while (len_remain);
  741. return 0;
  742. }
  743. static int pd_select_pdo(struct usbpd *pd, int pdo_pos, int uv, int ua)
  744. {
  745. int curr;
  746. int max_current;
  747. bool mismatch = false;
  748. u8 type;
  749. u32 pdo = pd->received_pdos[pdo_pos - 1];
  750. type = PD_SRC_PDO_TYPE(pdo);
  751. if (type == PD_SRC_PDO_TYPE_FIXED) {
  752. curr = max_current = PD_SRC_PDO_FIXED_MAX_CURR(pdo) * 10;
  753. /*
  754. * Check if the PDO has enough current, otherwise set the
  755. * Capability Mismatch flag
  756. */
  757. if (curr < PD_MIN_SINK_CURRENT) {
  758. mismatch = true;
  759. max_current = PD_MIN_SINK_CURRENT;
  760. }
  761. pd->requested_voltage =
  762. PD_SRC_PDO_FIXED_VOLTAGE(pdo) * 50 * 1000;
  763. pd->rdo = PD_RDO_FIXED(pdo_pos, 0, mismatch, 1, 1, curr / 10,
  764. max_current / 10);
  765. } else if (type == PD_SRC_PDO_TYPE_AUGMENTED) {
  766. if ((uv / 100000) > PD_APDO_MAX_VOLT(pdo) ||
  767. (uv / 100000) < PD_APDO_MIN_VOLT(pdo) ||
  768. (ua / 50000) > PD_APDO_MAX_CURR(pdo) || (ua < 0)) {
  769. usbpd_err(&pd->dev, "uv (%d) and ua (%d) out of range of APDO\n",
  770. uv, ua);
  771. return -EINVAL;
  772. }
  773. curr = ua / 1000;
  774. pd->requested_voltage = uv;
  775. pd->rdo = PD_RDO_AUGMENTED(pdo_pos, mismatch, 1, 1,
  776. uv / 20000, ua / 50000);
  777. } else {
  778. usbpd_err(&pd->dev, "Only Fixed or Programmable PDOs supported\n");
  779. return -EOPNOTSUPP;
  780. }
  781. pd->requested_current = curr;
  782. pd->requested_pdo = pdo_pos;
  783. return 0;
  784. }
  785. static int pd_eval_src_caps(struct usbpd *pd)
  786. {
  787. int i;
  788. union power_supply_propval val;
  789. bool pps_found = false;
  790. u32 first_pdo = pd->received_pdos[0];
  791. if (PD_SRC_PDO_TYPE(first_pdo) != PD_SRC_PDO_TYPE_FIXED) {
  792. usbpd_err(&pd->dev, "First src_cap invalid %08x\n", first_pdo);
  793. return -EINVAL;
  794. }
  795. pd->peer_usb_comm = PD_SRC_PDO_FIXED_USB_COMM(first_pdo);
  796. pd->peer_pr_swap = PD_SRC_PDO_FIXED_PR_SWAP(first_pdo);
  797. pd->peer_dr_swap = PD_SRC_PDO_FIXED_DR_SWAP(first_pdo);
  798. val.intval = PD_SRC_PDO_FIXED_USB_SUSP(first_pdo);
  799. usbpd_set_psy_iio_property(pd,
  800. POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED, &val);
  801. /* Check for PPS APDOs */
  802. if (pd->spec_rev == USBPD_REV_30) {
  803. for (i = 1; i < PD_MAX_DATA_OBJ; i++) {
  804. if ((PD_SRC_PDO_TYPE(pd->received_pdos[i]) ==
  805. PD_SRC_PDO_TYPE_AUGMENTED) &&
  806. !PD_APDO_PPS(pd->received_pdos[i])) {
  807. pps_found = true;
  808. break;
  809. }
  810. }
  811. }
  812. val.intval = pps_found ?
  813. QTI_POWER_SUPPLY_PD_PPS_ACTIVE :
  814. QTI_POWER_SUPPLY_PD_ACTIVE;
  815. usbpd_set_psy_iio_property(pd,
  816. POWER_SUPPLY_PROP_PD_ACTIVE, &val);
  817. /* First time connecting to a PD source and it supports USB data */
  818. if (pd->peer_usb_comm && pd->current_dr == DR_UFP && !pd->pd_connected)
  819. start_usb_peripheral(pd);
  820. /* Select the first PDO (vSafe5V) immediately. */
  821. pd_select_pdo(pd, 1, 0, 0);
  822. return 0;
  823. }
  824. static void pd_send_hard_reset(struct usbpd *pd)
  825. {
  826. union power_supply_propval val = {0};
  827. usbpd_dbg(&pd->dev, "send hard reset");
  828. pd->hard_reset_count++;
  829. pd->pdphy_ops->signal(HARD_RESET_SIG);
  830. pd->in_pr_swap = false;
  831. pd->pd_connected = false;
  832. usbpd_set_psy_iio_property(pd, POWER_SUPPLY_PROP_PR_SWAP, &val);
  833. }
  834. static void kick_sm(struct usbpd *pd, int ms)
  835. {
  836. pm_stay_awake(&pd->dev);
  837. pd->sm_queued = true;
  838. if (ms) {
  839. usbpd_dbg(&pd->dev, "delay %d ms", ms);
  840. hrtimer_start(&pd->timer, ms_to_ktime(ms), HRTIMER_MODE_REL);
  841. } else {
  842. queue_work(pd->wq, &pd->sm_work);
  843. }
  844. }
  845. static void phy_sig_received(struct usbpd *pd, enum pd_sig_type sig)
  846. {
  847. union power_supply_propval val = {1};
  848. if (sig != HARD_RESET_SIG) {
  849. usbpd_err(&pd->dev, "invalid signal (%d) received\n", sig);
  850. return;
  851. }
  852. pd->hard_reset_recvd = true;
  853. pd->hard_reset_recvd_time = ktime_get();
  854. usbpd_err(&pd->dev, "hard reset received\n");
  855. usbpd_set_psy_iio_property(pd,
  856. POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
  857. kick_sm(pd, 0);
  858. }
  859. struct pd_request_chunk {
  860. struct work_struct w;
  861. struct usbpd *pd;
  862. u8 msg_type;
  863. u8 chunk_num;
  864. enum pd_sop_type sop;
  865. };
  866. static void pd_request_chunk_work(struct work_struct *w)
  867. {
  868. struct pd_request_chunk *req =
  869. container_of(w, struct pd_request_chunk, w);
  870. struct usbpd *pd = req->pd;
  871. unsigned long flags;
  872. int ret;
  873. u8 payload[4] = {0}; /* ext_hdr + padding */
  874. u16 hdr = PD_MSG_HDR(req->msg_type, pd->current_dr, pd->current_pr,
  875. pd->tx_msgid[req->sop], 1, pd->spec_rev)
  876. | PD_MSG_HDR_EXTENDED;
  877. *(u16 *)payload = PD_MSG_EXT_HDR(1, req->chunk_num, 1, 0);
  878. ret = pd->pdphy_ops->write(hdr, payload, sizeof(payload), req->sop);
  879. if (!ret) {
  880. pd->tx_msgid[req->sop] =
  881. (pd->tx_msgid[req->sop] + 1) & PD_MAX_MSG_ID;
  882. } else {
  883. usbpd_err(&pd->dev, "could not send chunk request\n");
  884. /* queue what we have anyway */
  885. spin_lock_irqsave(&pd->rx_lock, flags);
  886. list_add_tail(&pd->rx_ext_msg->entry, &pd->rx_q);
  887. spin_unlock_irqrestore(&pd->rx_lock, flags);
  888. pd->rx_ext_msg = NULL;
  889. }
  890. kfree(req);
  891. }
  892. static struct rx_msg *pd_ext_msg_received(struct usbpd *pd, u16 header, u8 *buf,
  893. size_t len, enum pd_sop_type sop)
  894. {
  895. struct rx_msg *rx_msg;
  896. u16 bytes_to_copy;
  897. u16 ext_hdr = *(u16 *)buf;
  898. u8 chunk_num;
  899. if (!PD_MSG_EXT_HDR_IS_CHUNKED(ext_hdr)) {
  900. usbpd_err(&pd->dev, "unchunked extended messages unsupported\n");
  901. return NULL;
  902. }
  903. /* request for next Tx chunk */
  904. if (PD_MSG_EXT_HDR_REQ_CHUNK(ext_hdr)) {
  905. if (PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr) ||
  906. PD_MSG_EXT_HDR_CHUNK_NUM(ext_hdr) !=
  907. pd->next_tx_chunk) {
  908. usbpd_err(&pd->dev, "invalid request chunk ext header 0x%02x\n",
  909. ext_hdr);
  910. return NULL;
  911. }
  912. if (!completion_done(&pd->tx_chunk_request))
  913. complete(&pd->tx_chunk_request);
  914. return NULL;
  915. }
  916. chunk_num = PD_MSG_EXT_HDR_CHUNK_NUM(ext_hdr);
  917. if (!chunk_num) {
  918. /* allocate new message if first chunk */
  919. rx_msg = kzalloc(sizeof(*rx_msg) +
  920. PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr),
  921. GFP_ATOMIC);
  922. if (!rx_msg)
  923. return NULL;
  924. rx_msg->hdr = header;
  925. rx_msg->data_len = PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr);
  926. if (rx_msg->data_len > PD_MAX_EXT_MSG_LEN) {
  927. usbpd_warn(&pd->dev, "Extended message length exceeds max, truncating...\n");
  928. rx_msg->data_len = PD_MAX_EXT_MSG_LEN;
  929. }
  930. } else {
  931. if (!pd->rx_ext_msg) {
  932. usbpd_err(&pd->dev, "missing first rx_ext_msg chunk\n");
  933. return NULL;
  934. }
  935. rx_msg = pd->rx_ext_msg;
  936. }
  937. /*
  938. * The amount to copy is derived as follows:
  939. *
  940. * - if extended data_len < 26, then copy data_len bytes
  941. * - for chunks 0..N-2, copy 26 bytes
  942. * - for the last chunk (N-1), copy the remainder
  943. */
  944. bytes_to_copy =
  945. min((rx_msg->data_len - chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN),
  946. PD_MAX_EXT_MSG_LEGACY_LEN);
  947. /* check against received length to avoid overrun */
  948. if (bytes_to_copy > len - sizeof(ext_hdr)) {
  949. usbpd_warn(&pd->dev, "not enough bytes in chunk, expected:%u received:%lu\n",
  950. bytes_to_copy, len - sizeof(ext_hdr));
  951. bytes_to_copy = len - sizeof(ext_hdr);
  952. }
  953. memcpy(rx_msg->payload + chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN, buf + 2,
  954. bytes_to_copy);
  955. /* request next chunk? */
  956. if ((rx_msg->data_len - chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN) >
  957. PD_MAX_EXT_MSG_LEGACY_LEN) {
  958. struct pd_request_chunk *req;
  959. if (pd->rx_ext_msg && pd->rx_ext_msg != rx_msg) {
  960. usbpd_dbg(&pd->dev, "stale previous rx_ext_msg?\n");
  961. kfree(pd->rx_ext_msg);
  962. }
  963. pd->rx_ext_msg = rx_msg;
  964. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  965. if (!req)
  966. goto queue_rx; /* return what we have anyway */
  967. INIT_WORK(&req->w, pd_request_chunk_work);
  968. req->pd = pd;
  969. req->msg_type = PD_MSG_HDR_TYPE(header);
  970. req->chunk_num = chunk_num + 1;
  971. req->sop = sop;
  972. queue_work(pd->wq, &req->w);
  973. return NULL;
  974. }
  975. queue_rx:
  976. pd->rx_ext_msg = NULL;
  977. return rx_msg; /* queue it for usbpd_sm */
  978. }
  979. static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg);
  980. static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop,
  981. u8 *buf, size_t len)
  982. {
  983. struct rx_msg *rx_msg;
  984. unsigned long flags;
  985. u16 header;
  986. u8 msg_type, num_objs;
  987. if (sop == SOPII_MSG) {
  988. usbpd_err(&pd->dev, "only SOP/SOP' supported\n");
  989. return;
  990. }
  991. if (len < 2) {
  992. usbpd_err(&pd->dev, "invalid message received, len=%zd\n", len);
  993. return;
  994. }
  995. header = *((u16 *)buf);
  996. buf += sizeof(u16);
  997. len -= sizeof(u16);
  998. if (len % 4 != 0) {
  999. usbpd_err(&pd->dev, "len=%zd not multiple of 4\n", len);
  1000. return;
  1001. }
  1002. /* if MSGID already seen, discard */
  1003. if (PD_MSG_HDR_ID(header) == pd->rx_msgid[sop] &&
  1004. PD_MSG_HDR_TYPE(header) != MSG_SOFT_RESET) {
  1005. usbpd_dbg(&pd->dev, "MessageID already seen, discarding\n");
  1006. return;
  1007. }
  1008. pd->rx_msgid[sop] = PD_MSG_HDR_ID(header);
  1009. /* discard Pings */
  1010. if (PD_MSG_HDR_TYPE(header) == MSG_PING && !len)
  1011. return;
  1012. /* check header's count field to see if it matches len */
  1013. if (PD_MSG_HDR_COUNT(header) != (len / 4)) {
  1014. usbpd_err(&pd->dev, "header count (%d) mismatch, len=%zd\n",
  1015. PD_MSG_HDR_COUNT(header), len);
  1016. return;
  1017. }
  1018. /* if spec rev differs (i.e. is older), update PHY */
  1019. if (PD_MSG_HDR_REV(header) < pd->spec_rev)
  1020. pd->spec_rev = PD_MSG_HDR_REV(header);
  1021. msg_type = PD_MSG_HDR_TYPE(header);
  1022. num_objs = PD_MSG_HDR_COUNT(header);
  1023. usbpd_dbg(&pd->dev, "%s type(%d) num_objs(%d)\n",
  1024. msg_to_string(msg_type, num_objs,
  1025. PD_MSG_HDR_IS_EXTENDED(header)),
  1026. msg_type, num_objs);
  1027. if (!PD_MSG_HDR_IS_EXTENDED(header)) {
  1028. rx_msg = kzalloc(sizeof(*rx_msg) + len, GFP_ATOMIC);
  1029. if (!rx_msg)
  1030. return;
  1031. rx_msg->hdr = header;
  1032. rx_msg->data_len = len;
  1033. memcpy(rx_msg->payload, buf, len);
  1034. } else {
  1035. rx_msg = pd_ext_msg_received(pd, header, buf, len, sop);
  1036. if (!rx_msg)
  1037. return;
  1038. }
  1039. if (pd->vdm_in_suspend && msg_type == MSG_VDM) {
  1040. usbpd_dbg(&pd->dev, "Skip wq and handle VDM directly\n");
  1041. handle_vdm_rx(pd, rx_msg);
  1042. kfree(rx_msg);
  1043. return;
  1044. }
  1045. spin_lock_irqsave(&pd->rx_lock, flags);
  1046. list_add_tail(&rx_msg->entry, &pd->rx_q);
  1047. spin_unlock_irqrestore(&pd->rx_lock, flags);
  1048. if (!work_busy(&pd->sm_work))
  1049. kick_sm(pd, 0);
  1050. else
  1051. usbpd_dbg(&pd->dev, "usbpd_sm already running\n");
  1052. }
  1053. static void phy_shutdown(struct usbpd *pd)
  1054. {
  1055. usbpd_dbg(&pd->dev, "shutdown");
  1056. if (pd->vconn_enabled) {
  1057. regulator_disable(pd->vconn);
  1058. pd->vconn_enabled = false;
  1059. }
  1060. if (pd->vbus_enabled) {
  1061. regulator_disable(pd->vbus);
  1062. pd->vbus_enabled = false;
  1063. }
  1064. }
  1065. static enum hrtimer_restart pd_timeout(struct hrtimer *timer)
  1066. {
  1067. struct usbpd *pd = container_of(timer, struct usbpd, timer);
  1068. queue_work(pd->wq, &pd->sm_work);
  1069. return HRTIMER_NORESTART;
  1070. }
  1071. static void log_decoded_request(struct usbpd *pd, u32 rdo)
  1072. {
  1073. const u32 *pdos;
  1074. int pos = PD_RDO_OBJ_POS(rdo);
  1075. int type;
  1076. usbpd_dbg(&pd->dev, "RDO: 0x%08x\n", pd->rdo);
  1077. if (pd->current_pr == PR_SINK)
  1078. pdos = pd->received_pdos;
  1079. else
  1080. pdos = default_src_caps;
  1081. type = PD_SRC_PDO_TYPE(pdos[pos - 1]);
  1082. switch (type) {
  1083. case PD_SRC_PDO_TYPE_FIXED:
  1084. case PD_SRC_PDO_TYPE_VARIABLE:
  1085. usbpd_dbg(&pd->dev, "Request Fixed/Variable PDO:%d Volt:%dmV OpCurr:%dmA Curr:%dmA\n",
  1086. pos,
  1087. PD_SRC_PDO_FIXED_VOLTAGE(pdos[pos - 1]) * 50,
  1088. PD_RDO_FIXED_CURR(rdo) * 10,
  1089. PD_RDO_FIXED_CURR_MINMAX(rdo) * 10);
  1090. break;
  1091. case PD_SRC_PDO_TYPE_BATTERY:
  1092. usbpd_dbg(&pd->dev, "Request Battery PDO:%d OpPow:%dmW Pow:%dmW\n",
  1093. pos,
  1094. PD_RDO_FIXED_CURR(rdo) * 250,
  1095. PD_RDO_FIXED_CURR_MINMAX(rdo) * 250);
  1096. break;
  1097. case PD_SRC_PDO_TYPE_AUGMENTED:
  1098. usbpd_dbg(&pd->dev, "Request PPS PDO:%d Volt:%dmV Curr:%dmA\n",
  1099. pos,
  1100. PD_RDO_PROG_VOLTAGE(rdo) * 20,
  1101. PD_RDO_PROG_CURR(rdo) * 50);
  1102. break;
  1103. }
  1104. }
  1105. static bool in_src_ams(struct usbpd *pd)
  1106. {
  1107. union power_supply_propval val = {0};
  1108. if (pd->current_pr != PR_SRC)
  1109. return false;
  1110. if (pd->spec_rev != USBPD_REV_30)
  1111. return true;
  1112. usbpd_get_psy_iio_property(pd,
  1113. POWER_SUPPLY_PROP_TYPEC_SRC_RP, &val);
  1114. return val.intval == 1;
  1115. }
  1116. static void start_src_ams(struct usbpd *pd, bool ams)
  1117. {
  1118. union power_supply_propval val = {0};
  1119. if (pd->current_pr != PR_SRC || pd->spec_rev < USBPD_REV_30)
  1120. return;
  1121. usbpd_dbg(&pd->dev, "Set Rp to %s\n", ams ? "1.5A" : "3A");
  1122. val.intval = ams ? 1 : 2; /* SinkTxNG / SinkTxOK */
  1123. usbpd_set_psy_iio_property(pd,
  1124. POWER_SUPPLY_PROP_TYPEC_SRC_RP, &val);
  1125. if (ams)
  1126. kick_sm(pd, SINK_TX_TIME);
  1127. }
  1128. int usbpd_register_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr)
  1129. {
  1130. if (find_svid_handler(pd, hdlr->svid)) {
  1131. usbpd_err(&pd->dev, "SVID 0x%04x already registered\n",
  1132. hdlr->svid);
  1133. return -EINVAL;
  1134. }
  1135. /* require connect/disconnect callbacks be implemented */
  1136. if (!hdlr->connect || !hdlr->disconnect) {
  1137. usbpd_err(&pd->dev, "SVID 0x%04x connect/disconnect must be non-NULL\n",
  1138. hdlr->svid);
  1139. return -EINVAL;
  1140. }
  1141. usbpd_dbg(&pd->dev, "registered handler(%pK) for SVID 0x%04x\n",
  1142. hdlr, hdlr->svid);
  1143. mutex_lock(&pd->svid_handler_lock);
  1144. list_add_tail(&hdlr->entry, &pd->svid_handlers);
  1145. mutex_unlock(&pd->svid_handler_lock);
  1146. hdlr->request_usb_ss_lane = usbpd_release_ss_lane;
  1147. /* already connected with this SVID discovered? */
  1148. if (pd->vdm_state >= DISCOVERED_SVIDS) {
  1149. int i;
  1150. for (i = 0; i < pd->num_svids; i++) {
  1151. if (pd->discovered_svids[i] == hdlr->svid) {
  1152. usbpd_dbg(&pd->dev, "Notify SVID: 0x%04x disconnect\n",
  1153. hdlr->svid);
  1154. hdlr->connect(hdlr, pd->peer_usb_comm);
  1155. hdlr->discovered = true;
  1156. break;
  1157. }
  1158. }
  1159. }
  1160. return 0;
  1161. }
  1162. EXPORT_SYMBOL(usbpd_register_svid);
  1163. void usbpd_unregister_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr)
  1164. {
  1165. usbpd_dbg(&pd->dev, "unregistered handler(%pK) for SVID 0x%04x\n",
  1166. hdlr, hdlr->svid);
  1167. mutex_lock(&pd->svid_handler_lock);
  1168. list_del_init(&hdlr->entry);
  1169. mutex_unlock(&pd->svid_handler_lock);
  1170. }
  1171. EXPORT_SYMBOL(usbpd_unregister_svid);
  1172. int usbpd_send_vdm(struct usbpd *pd, u32 vdm_hdr, const u32 *vdos, int num_vdos)
  1173. {
  1174. struct vdm_tx *vdm_tx;
  1175. if (pd->vdm_tx) {
  1176. usbpd_warn(&pd->dev, "Discarding previously queued VDM tx (SVID:0x%04x)\n",
  1177. VDM_HDR_SVID(pd->vdm_tx->data[0]));
  1178. kfree(pd->vdm_tx);
  1179. pd->vdm_tx = NULL;
  1180. }
  1181. vdm_tx = kzalloc(sizeof(*vdm_tx), GFP_KERNEL);
  1182. if (!vdm_tx)
  1183. return -ENOMEM;
  1184. vdm_tx->data[0] = vdm_hdr;
  1185. if (vdos && num_vdos)
  1186. memcpy(&vdm_tx->data[1], vdos, num_vdos * sizeof(u32));
  1187. vdm_tx->size = num_vdos + 1; /* include the header */
  1188. /* VDM will get sent in PE_SRC/SNK_READY state handling */
  1189. pd->vdm_tx = vdm_tx;
  1190. pd->vdm_in_suspend = false;
  1191. /* slight delay before queuing to prioritize handling of incoming VDM */
  1192. if (pd->in_explicit_contract)
  1193. kick_sm(pd, 2);
  1194. return 0;
  1195. }
  1196. EXPORT_SYMBOL(usbpd_send_vdm);
  1197. int usbpd_send_svdm(struct usbpd *pd, u16 svid, u8 cmd,
  1198. enum usbpd_svdm_cmd_type cmd_type, int obj_pos,
  1199. const u32 *vdos, int num_vdos)
  1200. {
  1201. u32 svdm_hdr = SVDM_HDR(svid, pd->spec_rev == USBPD_REV_30 ? 1 : 0,
  1202. obj_pos, cmd_type, cmd);
  1203. usbpd_dbg(&pd->dev, "VDM tx: svid:%04x ver:%d obj_pos:%d cmd:%x cmd_type:%x svdm_hdr:%x\n",
  1204. svid, pd->spec_rev == USBPD_REV_30 ? 1 : 0, obj_pos,
  1205. cmd, cmd_type, svdm_hdr);
  1206. return usbpd_send_vdm(pd, svdm_hdr, vdos, num_vdos);
  1207. }
  1208. EXPORT_SYMBOL(usbpd_send_svdm);
  1209. void usbpd_vdm_in_suspend(struct usbpd *pd, bool in_suspend)
  1210. {
  1211. usbpd_dbg(&pd->dev, "VDM in_suspend:%d\n", in_suspend);
  1212. pd->vdm_in_suspend = in_suspend;
  1213. }
  1214. EXPORT_SYMBOL(usbpd_vdm_in_suspend);
  1215. static void handle_vdm_resp_ack(struct usbpd *pd, u32 *vdos, u8 num_vdos,
  1216. u16 vdm_hdr)
  1217. {
  1218. int ret, i;
  1219. u16 svid, *psvid;
  1220. u8 cmd = SVDM_HDR_CMD(vdm_hdr);
  1221. struct usbpd_svid_handler *handler;
  1222. switch (cmd) {
  1223. case USBPD_SVDM_DISCOVER_IDENTITY:
  1224. kfree(pd->vdm_tx_retry);
  1225. pd->vdm_tx_retry = NULL;
  1226. if (!num_vdos) {
  1227. usbpd_dbg(&pd->dev, "Discarding Discover ID response with no VDOs\n");
  1228. break;
  1229. }
  1230. if (ID_HDR_PRODUCT_TYPE(vdos[0]) == ID_HDR_PRODUCT_VPD) {
  1231. usbpd_dbg(&pd->dev, "VPD detected turn off vbus\n");
  1232. if (pd->vbus_enabled) {
  1233. ret = regulator_disable(pd->vbus);
  1234. if (ret)
  1235. usbpd_err(&pd->dev, "Err disabling vbus (%d)\n",
  1236. ret);
  1237. else
  1238. pd->vbus_enabled = false;
  1239. }
  1240. }
  1241. if (!pd->in_explicit_contract)
  1242. break;
  1243. if (SVDM_HDR_OBJ_POS(vdm_hdr) != 0) {
  1244. usbpd_dbg(&pd->dev, "Discarding Discover ID response with incorrect object position:%d\n",
  1245. SVDM_HDR_OBJ_POS(vdm_hdr));
  1246. break;
  1247. }
  1248. pd->vdm_state = DISCOVERED_ID;
  1249. usbpd_send_svdm(pd, USBPD_SID,
  1250. USBPD_SVDM_DISCOVER_SVIDS,
  1251. SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
  1252. break;
  1253. case USBPD_SVDM_DISCOVER_SVIDS:
  1254. pd->vdm_state = DISCOVERED_SVIDS;
  1255. kfree(pd->vdm_tx_retry);
  1256. pd->vdm_tx_retry = NULL;
  1257. if (!pd->discovered_svids) {
  1258. pd->num_svids = 2 * num_vdos;
  1259. pd->discovered_svids = kcalloc(pd->num_svids,
  1260. sizeof(u16),
  1261. GFP_KERNEL);
  1262. if (!pd->discovered_svids)
  1263. break;
  1264. psvid = pd->discovered_svids;
  1265. } else { /* handle > 12 SVIDs */
  1266. void *ptr;
  1267. size_t oldsize = pd->num_svids * sizeof(u16);
  1268. size_t newsize = oldsize +
  1269. (2 * num_vdos * sizeof(u16));
  1270. ptr = krealloc(pd->discovered_svids, newsize,
  1271. GFP_KERNEL);
  1272. if (!ptr)
  1273. break;
  1274. pd->discovered_svids = ptr;
  1275. psvid = pd->discovered_svids + pd->num_svids;
  1276. memset(psvid, 0, (2 * num_vdos));
  1277. pd->num_svids += 2 * num_vdos;
  1278. }
  1279. /* convert 32-bit VDOs to list of 16-bit SVIDs */
  1280. for (i = 0; i < num_vdos * 2; i++) {
  1281. /*
  1282. * Within each 32-bit VDO,
  1283. * SVID[i]: upper 16-bits
  1284. * SVID[i+1]: lower 16-bits
  1285. * where i is even.
  1286. */
  1287. if (!(i & 1))
  1288. svid = vdos[i >> 1] >> 16;
  1289. else
  1290. svid = vdos[i >> 1] & 0xFFFF;
  1291. /*
  1292. * There are some devices that incorrectly
  1293. * swap the order of SVIDs within a VDO. So in
  1294. * case of an odd-number of SVIDs it could end
  1295. * up with SVID[i] as 0 while SVID[i+1] is
  1296. * non-zero. Just skip over the zero ones.
  1297. */
  1298. if (svid) {
  1299. usbpd_dbg(&pd->dev, "Discovered SVID: 0x%04x\n",
  1300. svid);
  1301. *psvid++ = svid;
  1302. }
  1303. }
  1304. /* if more than 12 SVIDs, resend the request */
  1305. if (num_vdos == 6 && vdos[5] != 0) {
  1306. usbpd_send_svdm(pd, USBPD_SID,
  1307. USBPD_SVDM_DISCOVER_SVIDS,
  1308. SVDM_CMD_TYPE_INITIATOR, 0,
  1309. NULL, 0);
  1310. break;
  1311. }
  1312. /* now that all SVIDs are discovered, notify handlers */
  1313. for (i = 0; i < pd->num_svids; i++) {
  1314. svid = pd->discovered_svids[i];
  1315. if (svid) {
  1316. handler = find_svid_handler(pd, svid);
  1317. if (handler) {
  1318. usbpd_dbg(&pd->dev, "Notify SVID: 0x%04x discovered\n",
  1319. handler->svid);
  1320. handler->connect(handler,
  1321. pd->peer_usb_comm);
  1322. handler->discovered = true;
  1323. }
  1324. }
  1325. }
  1326. break;
  1327. default:
  1328. usbpd_dbg(&pd->dev, "unhandled ACK for command:0x%x\n",
  1329. cmd);
  1330. break;
  1331. }
  1332. }
  1333. static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg)
  1334. {
  1335. int ret;
  1336. u32 vdm_hdr =
  1337. rx_msg->data_len >= sizeof(u32) ? ((u32 *)rx_msg->payload)[0] : 0;
  1338. u32 *vdos = (u32 *)&rx_msg->payload[sizeof(u32)];
  1339. u16 svid = VDM_HDR_SVID(vdm_hdr);
  1340. u8 num_vdos = PD_MSG_HDR_COUNT(rx_msg->hdr) - 1;
  1341. u8 cmd = SVDM_HDR_CMD(vdm_hdr);
  1342. u8 cmd_type = SVDM_HDR_CMD_TYPE(vdm_hdr);
  1343. struct usbpd_svid_handler *handler;
  1344. ktime_t recvd_time = ktime_get();
  1345. usbpd_dbg(&pd->dev,
  1346. "VDM rx: svid:%04x cmd:%x cmd_type:%x vdm_hdr:%x has_dp: %s\n",
  1347. svid, cmd, cmd_type, vdm_hdr,
  1348. pd->has_dp ? "true" : "false");
  1349. if ((svid == 0xFF01) && (!pd->has_dp)) {
  1350. pd->has_dp = true;
  1351. /* policy engine based display driver only support release 4 lanes,
  1352. * it is not good, as from usb view, for two lanes display,
  1353. * there is extra operation except phy.
  1354. */
  1355. start_usb_dp(pd, true);
  1356. }
  1357. /* if it's a supported SVID, pass the message to the handler */
  1358. handler = find_svid_handler(pd, svid);
  1359. /* Unstructured VDM */
  1360. if (!VDM_IS_SVDM(vdm_hdr)) {
  1361. if (handler && handler->vdm_received) {
  1362. handler->vdm_received(handler, vdm_hdr, vdos, num_vdos);
  1363. } else if (pd->spec_rev == USBPD_REV_30) {
  1364. ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL, 0,
  1365. SOP_MSG);
  1366. if (ret)
  1367. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  1368. }
  1369. return;
  1370. }
  1371. if (SVDM_HDR_VER(vdm_hdr) > 1)
  1372. usbpd_dbg(&pd->dev, "Received SVDM with unsupported version:%d\n",
  1373. SVDM_HDR_VER(vdm_hdr));
  1374. if (cmd_type != SVDM_CMD_TYPE_INITIATOR &&
  1375. pd->current_state != PE_SRC_STARTUP_WAIT_FOR_VDM_RESP)
  1376. start_src_ams(pd, false);
  1377. if (handler && handler->svdm_received) {
  1378. handler->svdm_received(handler, cmd, cmd_type, vdos, num_vdos);
  1379. /* handle any previously queued TX */
  1380. if (pd->vdm_tx && !pd->sm_queued)
  1381. kick_sm(pd, 0);
  1382. return;
  1383. }
  1384. /* Standard Discovery or unhandled messages go here */
  1385. switch (cmd_type) {
  1386. case SVDM_CMD_TYPE_INITIATOR:
  1387. if (cmd != USBPD_SVDM_ATTENTION) {
  1388. if (pd->spec_rev == USBPD_REV_30) {
  1389. ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL,
  1390. 0, SOP_MSG);
  1391. if (ret)
  1392. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  1393. }
  1394. }
  1395. break;
  1396. case SVDM_CMD_TYPE_RESP_ACK:
  1397. if (svid != USBPD_SID) {
  1398. usbpd_err(&pd->dev, "unhandled ACK for SVID:0x%x\n",
  1399. svid);
  1400. break;
  1401. }
  1402. if (ktime_ms_delta(recvd_time, pd->svdm_start_time) >
  1403. SENDER_RESPONSE_TIME) {
  1404. usbpd_dbg(&pd->dev, "Discarding delayed SVDM response due to timeout\n");
  1405. break;
  1406. }
  1407. handle_vdm_resp_ack(pd, vdos, num_vdos, vdm_hdr);
  1408. break;
  1409. case SVDM_CMD_TYPE_RESP_NAK:
  1410. usbpd_info(&pd->dev, "VDM NAK received for SVID:0x%04x command:0x%x\n",
  1411. svid, cmd);
  1412. break;
  1413. case SVDM_CMD_TYPE_RESP_BUSY:
  1414. switch (cmd) {
  1415. case USBPD_SVDM_DISCOVER_IDENTITY:
  1416. case USBPD_SVDM_DISCOVER_SVIDS:
  1417. if (!pd->vdm_tx_retry) {
  1418. usbpd_err(&pd->dev, "Discover command %d VDM was unexpectedly freed\n",
  1419. cmd);
  1420. break;
  1421. }
  1422. /* wait tVDMBusy, then retry */
  1423. pd->vdm_tx = pd->vdm_tx_retry;
  1424. pd->vdm_tx_retry = NULL;
  1425. kick_sm(pd, VDM_BUSY_TIME);
  1426. break;
  1427. default:
  1428. break;
  1429. }
  1430. break;
  1431. }
  1432. }
  1433. static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type)
  1434. {
  1435. u32 vdm_hdr;
  1436. int ret;
  1437. mutex_lock(&pd->svid_handler_lock);
  1438. if (!pd->vdm_tx) {
  1439. mutex_unlock(&pd->svid_handler_lock);
  1440. return;
  1441. }
  1442. /* only send one VDM at a time */
  1443. vdm_hdr = pd->vdm_tx->data[0];
  1444. /*
  1445. * PD 3.0: For initiated SVDMs, source must first ensure Rp is set
  1446. * to SinkTxNG to indicate the start of an AMS
  1447. */
  1448. if (VDM_IS_SVDM(vdm_hdr) &&
  1449. SVDM_HDR_CMD_TYPE(vdm_hdr) == SVDM_CMD_TYPE_INITIATOR &&
  1450. pd->current_pr == PR_SRC && !in_src_ams(pd)) {
  1451. /* Set SinkTxNG and reschedule sm_work to send again */
  1452. start_src_ams(pd, true);
  1453. mutex_unlock(&pd->svid_handler_lock);
  1454. return;
  1455. }
  1456. ret = pd_send_msg(pd, MSG_VDM, pd->vdm_tx->data,
  1457. pd->vdm_tx->size, sop_type);
  1458. if (ret) {
  1459. usbpd_err(&pd->dev, "Error (%d) sending VDM command %d\n",
  1460. ret, SVDM_HDR_CMD(pd->vdm_tx->data[0]));
  1461. mutex_unlock(&pd->svid_handler_lock);
  1462. /* retry when hitting PE_SRC/SNK_Ready again */
  1463. if (ret != -EBUSY && sop_type == SOP_MSG)
  1464. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  1465. return;
  1466. }
  1467. /* start tVDMSenderResponse timer */
  1468. if (VDM_IS_SVDM(vdm_hdr) &&
  1469. SVDM_HDR_CMD_TYPE(vdm_hdr) == SVDM_CMD_TYPE_INITIATOR) {
  1470. pd->svdm_start_time = ktime_get();
  1471. }
  1472. /*
  1473. * special case: keep initiated Discover ID/SVIDs
  1474. * around in case we need to re-try when receiving BUSY
  1475. */
  1476. if (VDM_IS_SVDM(vdm_hdr) &&
  1477. SVDM_HDR_CMD_TYPE(vdm_hdr) == SVDM_CMD_TYPE_INITIATOR &&
  1478. SVDM_HDR_CMD(vdm_hdr) <= USBPD_SVDM_DISCOVER_SVIDS) {
  1479. if (pd->vdm_tx_retry) {
  1480. usbpd_dbg(&pd->dev, "Previous Discover VDM command %d not ACKed/NAKed\n",
  1481. SVDM_HDR_CMD(
  1482. pd->vdm_tx_retry->data[0]));
  1483. kfree(pd->vdm_tx_retry);
  1484. }
  1485. pd->vdm_tx_retry = pd->vdm_tx;
  1486. } else {
  1487. kfree(pd->vdm_tx);
  1488. }
  1489. pd->vdm_tx = NULL;
  1490. mutex_unlock(&pd->svid_handler_lock);
  1491. }
  1492. static void reset_vdm_state(struct usbpd *pd)
  1493. {
  1494. struct usbpd_svid_handler *handler;
  1495. mutex_lock(&pd->svid_handler_lock);
  1496. list_for_each_entry(handler, &pd->svid_handlers, entry) {
  1497. if (handler->discovered) {
  1498. usbpd_dbg(&pd->dev, "Notify SVID: 0x%04x disconnect\n",
  1499. handler->svid);
  1500. handler->disconnect(handler);
  1501. handler->discovered = false;
  1502. }
  1503. }
  1504. stop_usb_dp(pd);
  1505. pd->vdm_state = VDM_NONE;
  1506. kfree(pd->vdm_tx_retry);
  1507. pd->vdm_tx_retry = NULL;
  1508. kfree(pd->discovered_svids);
  1509. pd->discovered_svids = NULL;
  1510. pd->num_svids = 0;
  1511. kfree(pd->vdm_tx);
  1512. pd->vdm_tx = NULL;
  1513. pd->ss_lane_svid = 0x0;
  1514. pd->vdm_in_suspend = false;
  1515. mutex_unlock(&pd->svid_handler_lock);
  1516. }
  1517. static void handle_get_src_cap_extended(struct usbpd *pd)
  1518. {
  1519. int ret;
  1520. struct {
  1521. u16 vid;
  1522. u16 pid;
  1523. u32 xid;
  1524. u8 fw_version;
  1525. u8 hw_version;
  1526. u8 voltage_reg;
  1527. u8 holdup_time;
  1528. u8 compliance;
  1529. u8 touch_current;
  1530. u16 peak_current1;
  1531. u16 peak_current2;
  1532. u16 peak_current3;
  1533. u8 touch_temp;
  1534. u8 source_inputs;
  1535. u8 num_batt;
  1536. u8 pdp;
  1537. } __packed caps = {0};
  1538. caps.vid = 0x5c6;
  1539. caps.num_batt = 1;
  1540. caps.pdp = 5 * PD_SRC_PDO_FIXED_MAX_CURR(default_src_caps[0]) / 100;
  1541. ret = pd_send_ext_msg(pd, MSG_SOURCE_CAPABILITIES_EXTENDED, (u8 *)&caps,
  1542. sizeof(caps), SOP_MSG);
  1543. if (ret)
  1544. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  1545. }
  1546. static void handle_get_battery_cap(struct usbpd *pd, struct rx_msg *rx_msg)
  1547. {
  1548. int ret;
  1549. u8 bat_num;
  1550. struct {
  1551. u16 vid;
  1552. u16 pid;
  1553. u16 capacity;
  1554. u16 last_full;
  1555. u8 type;
  1556. } __packed bcdb = {0, 0, 0xffff, 0xffff, 0};
  1557. if (rx_msg->data_len != 1) {
  1558. usbpd_err(&pd->dev, "Invalid payload size: %d\n",
  1559. rx_msg->data_len);
  1560. return;
  1561. }
  1562. bat_num = rx_msg->payload[0];
  1563. if (bat_num || !pd->bat_psy) {
  1564. usbpd_warn(&pd->dev, "Battery %d unsupported\n", bat_num);
  1565. bcdb.type = BIT(0); /* invalid */
  1566. goto send;
  1567. }
  1568. bcdb.capacity = ((pd->bat_charge_full / 1000) *
  1569. (pd->bat_voltage_max / 1000)) / 100000;
  1570. /* fix me */
  1571. bcdb.last_full = ((pd->bat_charge_full / 1000) *
  1572. (pd->bat_voltage_max / 1000)) / 100000;
  1573. send:
  1574. ret = pd_send_ext_msg(pd, MSG_BATTERY_CAPABILITIES, (u8 *)&bcdb,
  1575. sizeof(bcdb), SOP_MSG);
  1576. if (ret)
  1577. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  1578. }
  1579. static void handle_get_battery_status(struct usbpd *pd, struct rx_msg *rx_msg)
  1580. {
  1581. int ret;
  1582. int cap;
  1583. union power_supply_propval val = {0};
  1584. u8 bat_num;
  1585. u32 bsdo = 0xffff0000;
  1586. if (rx_msg->data_len != 1) {
  1587. usbpd_err(&pd->dev, "Invalid payload size: %d\n",
  1588. rx_msg->data_len);
  1589. return;
  1590. }
  1591. bat_num = rx_msg->payload[0];
  1592. if (bat_num || !pd->bat_psy) {
  1593. usbpd_warn(&pd->dev, "Battery %d unsupported\n", bat_num);
  1594. bsdo |= BIT(8); /* invalid */
  1595. goto send;
  1596. }
  1597. ret = power_supply_get_property(pd->bat_psy, POWER_SUPPLY_PROP_PRESENT,
  1598. &val);
  1599. if (ret || !val.intval)
  1600. goto send;
  1601. bsdo |= BIT(9);
  1602. ret = power_supply_get_property(pd->bat_psy, POWER_SUPPLY_PROP_STATUS,
  1603. &val);
  1604. if (!ret) {
  1605. switch (val.intval) {
  1606. case POWER_SUPPLY_STATUS_CHARGING:
  1607. break;
  1608. case POWER_SUPPLY_STATUS_DISCHARGING:
  1609. bsdo |= (1 << 10);
  1610. break;
  1611. default:
  1612. bsdo |= (2 << 10);
  1613. break;
  1614. }
  1615. }
  1616. /* state of charge */
  1617. ret = power_supply_get_property(pd->bat_psy,
  1618. POWER_SUPPLY_PROP_CAPACITY, &val);
  1619. if (ret)
  1620. goto send;
  1621. cap = val.intval;
  1622. bsdo &= 0xffff;
  1623. bsdo |= ((cap * (pd->bat_charge_full / 1000) *
  1624. (pd->bat_voltage_max / 1000)) / 10000000) << 16;
  1625. send:
  1626. ret = pd_send_msg(pd, MSG_BATTERY_STATUS, &bsdo, 1, SOP_MSG);
  1627. if (ret)
  1628. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  1629. }
  1630. static void dr_swap(struct usbpd *pd)
  1631. {
  1632. reset_vdm_state(pd);
  1633. usbpd_dbg(&pd->dev, "%s: current_dr(%d)\n", __func__, pd->current_dr);
  1634. if (pd->current_dr == DR_DFP) {
  1635. pd->current_dr = DR_UFP;
  1636. pd->pdphy_ops->update_roles(pd->current_dr, pd->current_pr);
  1637. stop_usb_host(pd);
  1638. if (pd->peer_usb_comm)
  1639. start_usb_peripheral(pd);
  1640. typec_set_data_role(pd->typec_port, TYPEC_DEVICE);
  1641. } else if (pd->current_dr == DR_UFP) {
  1642. pd->current_dr = DR_DFP;
  1643. pd->pdphy_ops->update_roles(pd->current_dr, pd->current_pr);
  1644. stop_usb_peripheral(pd);
  1645. if (pd->peer_usb_comm)
  1646. start_usb_host(pd, true);
  1647. typec_set_data_role(pd->typec_port, TYPEC_HOST);
  1648. usbpd_send_svdm(pd, USBPD_SID, USBPD_SVDM_DISCOVER_IDENTITY,
  1649. SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
  1650. }
  1651. }
  1652. static void vconn_swap(struct usbpd *pd)
  1653. {
  1654. int ret;
  1655. if (pd->vconn_enabled) {
  1656. pd->pdphy_ops->update_frame_filter(FRAME_FILTER_EN_SOP |
  1657. FRAME_FILTER_EN_HARD_RESET);
  1658. pd->current_state = PE_VCS_WAIT_FOR_VCONN;
  1659. kick_sm(pd, VCONN_ON_TIME);
  1660. } else {
  1661. if (!pd->vconn) {
  1662. pd->vconn = devm_regulator_get(pd->dev.parent, "vconn");
  1663. if (IS_ERR(pd->vconn)) {
  1664. usbpd_err(&pd->dev, "Unable to get vconn\n");
  1665. return;
  1666. }
  1667. }
  1668. ret = regulator_enable(pd->vconn);
  1669. if (ret) {
  1670. usbpd_err(&pd->dev, "Unable to enable vconn\n");
  1671. return;
  1672. }
  1673. pd->vconn_enabled = true;
  1674. pd->pdphy_ops->update_frame_filter(FRAME_FILTER_EN_SOP |
  1675. FRAME_FILTER_EN_SOPI |
  1676. FRAME_FILTER_EN_HARD_RESET);
  1677. /*
  1678. * Small delay to ensure Vconn has ramped up. This is well
  1679. * below tVCONNSourceOn (100ms) so we still send PS_RDY within
  1680. * the allowed time.
  1681. */
  1682. usleep_range(5000, 10000);
  1683. ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
  1684. if (ret) {
  1685. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  1686. return;
  1687. }
  1688. }
  1689. }
  1690. static int enable_vbus(struct usbpd *pd)
  1691. {
  1692. union power_supply_propval val = {0};
  1693. int count = 100;
  1694. int ret;
  1695. /*
  1696. * Check to make sure there's no lingering charge on
  1697. * VBUS before enabling it as a source. If so poll here
  1698. * until it goes below VSafe0V (0.8V) before proceeding.
  1699. */
  1700. while (count--) {
  1701. ret = power_supply_get_property(pd->usb_psy,
  1702. POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
  1703. if (ret || val.intval <= 800000)
  1704. break;
  1705. usleep_range(20000, 30000);
  1706. }
  1707. if (count < 99)
  1708. msleep(100); /* need to wait an additional tCCDebounce */
  1709. if (!pd->vbus) {
  1710. pd->vbus = devm_regulator_get(pd->dev.parent, "vbus");
  1711. if (IS_ERR(pd->vbus)) {
  1712. usbpd_err(&pd->dev, "Unable to get vbus\n");
  1713. return -EAGAIN;
  1714. }
  1715. }
  1716. ret = regulator_enable(pd->vbus);
  1717. if (ret)
  1718. usbpd_err(&pd->dev, "Unable to enable vbus (%d)\n", ret);
  1719. else
  1720. pd->vbus_enabled = true;
  1721. count = 10;
  1722. /*
  1723. * Check to make sure VBUS voltage reaches above Vsafe5Vmin (4.75v)
  1724. * before proceeding.
  1725. */
  1726. while (count--) {
  1727. ret = power_supply_get_property(pd->usb_psy,
  1728. POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
  1729. if (ret || val.intval >= 4750000) /*vsafe5Vmin*/
  1730. break;
  1731. usleep_range(10000, 12000); /* Delay between two reads */
  1732. }
  1733. if (ret)
  1734. msleep(100); /* Delay to wait for VBUS ramp up if read fails */
  1735. return ret;
  1736. }
  1737. static inline void rx_msg_cleanup(struct usbpd *pd)
  1738. {
  1739. struct rx_msg *msg, *tmp;
  1740. unsigned long flags;
  1741. spin_lock_irqsave(&pd->rx_lock, flags);
  1742. list_for_each_entry_safe(msg, tmp, &pd->rx_q, entry) {
  1743. list_del(&msg->entry);
  1744. kfree(msg);
  1745. }
  1746. spin_unlock_irqrestore(&pd->rx_lock, flags);
  1747. }
  1748. /* For PD 3.0, check SinkTxOk before allowing initiating AMS */
  1749. static inline bool is_sink_tx_ok(struct usbpd *pd)
  1750. {
  1751. if (pd->spec_rev == USBPD_REV_30)
  1752. return pd->typec_mode == QTI_POWER_SUPPLY_TYPEC_SOURCE_HIGH;
  1753. return true;
  1754. }
  1755. static void handle_state_unknown(struct usbpd *pd, struct rx_msg *rx_msg)
  1756. {
  1757. union power_supply_propval val = {0};
  1758. int ret;
  1759. val.intval = 0;
  1760. usbpd_set_psy_iio_property(pd,
  1761. POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
  1762. if (pd->current_pr == PR_SINK) {
  1763. usbpd_set_state(pd, PE_SNK_STARTUP);
  1764. } else if (pd->current_pr == PR_SRC) {
  1765. if (!pd->vconn_enabled && pd->typec_mode ==
  1766. QTI_POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE) {
  1767. if (!pd->vconn) {
  1768. pd->vconn = devm_regulator_get(
  1769. pd->dev.parent, "vconn");
  1770. if (IS_ERR(pd->vconn)) {
  1771. usbpd_err(&pd->dev, "Unable to get vconn\n");
  1772. return;
  1773. }
  1774. }
  1775. ret = regulator_enable(pd->vconn);
  1776. if (ret)
  1777. usbpd_err(&pd->dev, "Unable to enable vconn\n");
  1778. else
  1779. pd->vconn_enabled = true;
  1780. }
  1781. enable_vbus(pd);
  1782. usbpd_set_state(pd, PE_SRC_STARTUP);
  1783. }
  1784. }
  1785. static void enter_state_error_recovery(struct usbpd *pd)
  1786. {
  1787. /* perform hard disconnect/reconnect */
  1788. pd->in_pr_swap = false;
  1789. pd->current_pr = PR_NONE;
  1790. set_power_role(pd, PR_NONE);
  1791. pd->typec_mode = QTI_POWER_SUPPLY_TYPEC_NONE;
  1792. kick_sm(pd, 0);
  1793. }
  1794. static void enter_state_src_disabled(struct usbpd *pd)
  1795. {
  1796. /* are we still connected? */
  1797. if (pd->typec_mode == QTI_POWER_SUPPLY_TYPEC_NONE) {
  1798. pd->current_pr = PR_NONE;
  1799. kick_sm(pd, 0);
  1800. }
  1801. }
  1802. static int usbpd_startup_common(struct usbpd *pd,
  1803. struct pd_phy_params *phy_params)
  1804. {
  1805. int ret = 0;
  1806. pd_reset_protocol(pd);
  1807. if (!pd->in_pr_swap) {
  1808. /*
  1809. * support up to PD 3.0; if peer is 2.0
  1810. * phy_msg_received() will handle the downgrade.
  1811. */
  1812. pd->spec_rev = USBPD_REV_30;
  1813. if (pd->pd_phy_opened) {
  1814. pd->pdphy_ops->close();
  1815. pd->pd_phy_opened = false;
  1816. }
  1817. phy_params->data_role = pd->current_dr;
  1818. phy_params->power_role = pd->current_pr;
  1819. if (pd->vconn_enabled)
  1820. phy_params->frame_filter_val |= FRAME_FILTER_EN_SOPI;
  1821. if (!pd->pdphy_ops->open)
  1822. return -ENODEV;
  1823. ret = pd->pdphy_ops->open(phy_params);
  1824. if (ret) {
  1825. WARN_ON_ONCE(1);
  1826. usbpd_err(&pd->dev, "error opening PD PHY %d\n",
  1827. ret);
  1828. pd->current_state = PE_UNKNOWN;
  1829. return ret;
  1830. }
  1831. pd->pd_phy_opened = true;
  1832. }
  1833. return 0;
  1834. }
  1835. static void enter_state_src_startup(struct usbpd *pd)
  1836. {
  1837. struct pd_phy_params phy_params = {
  1838. .signal_cb = phy_sig_received,
  1839. .msg_rx_cb = phy_msg_received,
  1840. .shutdown_cb = phy_shutdown,
  1841. .frame_filter_val = FRAME_FILTER_EN_SOP |
  1842. FRAME_FILTER_EN_HARD_RESET,
  1843. };
  1844. union power_supply_propval val = {0};
  1845. if (pd->current_dr == DR_NONE) {
  1846. pd->current_dr = DR_DFP;
  1847. start_usb_host(pd, true);
  1848. pd->ss_lane_svid = 0x0;
  1849. typec_set_data_role(pd->typec_port, TYPEC_HOST);
  1850. }
  1851. typec_set_pwr_role(pd->typec_port, TYPEC_SOURCE);
  1852. if (!pd->partner) {
  1853. typec_set_pwr_opmode(pd->typec_port, TYPEC_PWR_MODE_1_5A);
  1854. memset(&pd->partner_identity, 0, sizeof(pd->partner_identity));
  1855. pd->partner_desc.usb_pd = false;
  1856. pd->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
  1857. pd->partner = typec_register_partner(pd->typec_port,
  1858. &pd->partner_desc);
  1859. }
  1860. val.intval = 1; /* Rp-1.5A; SinkTxNG for PD 3.0 */
  1861. usbpd_set_psy_iio_property(pd,
  1862. POWER_SUPPLY_PROP_TYPEC_SRC_RP, &val);
  1863. /* Set CC back to DRP toggle for the next disconnect */
  1864. val.intval = QTI_POWER_SUPPLY_TYPEC_PR_DUAL;
  1865. usbpd_set_psy_iio_property(pd,
  1866. POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
  1867. if (usbpd_startup_common(pd, &phy_params))
  1868. return;
  1869. if (pd->in_pr_swap) {
  1870. pd->in_pr_swap = false;
  1871. val.intval = 0;
  1872. usbpd_set_psy_iio_property(pd,
  1873. POWER_SUPPLY_PROP_PR_SWAP, &val);
  1874. }
  1875. if (pd->vconn_enabled) {
  1876. /*
  1877. * wait for tVCONNStable (50ms), until SOPI becomes
  1878. * ready for communication.
  1879. */
  1880. usleep_range(50000, 51000);
  1881. usbpd_send_svdm(pd, USBPD_SID,
  1882. USBPD_SVDM_DISCOVER_IDENTITY,
  1883. SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
  1884. handle_vdm_tx(pd, SOPI_MSG);
  1885. pd->current_state = PE_SRC_STARTUP_WAIT_FOR_VDM_RESP;
  1886. kick_sm(pd, SENDER_RESPONSE_TIME);
  1887. return;
  1888. }
  1889. /*
  1890. * A sink might remove its terminations (during some Type-C
  1891. * compliance tests or a sink attempting to do Try.SRC)
  1892. * at this point just after we enabled VBUS. Sending PD
  1893. * messages now would delay detecting the detach beyond the
  1894. * required timing. Instead, delay sending out the first
  1895. * source capabilities to allow for the other side to
  1896. * completely settle CC debounce and allow HW to detect detach
  1897. * sooner in the meantime. PD spec allows up to
  1898. * tFirstSourceCap (250ms).
  1899. */
  1900. pd->current_state = PE_SRC_SEND_CAPABILITIES;
  1901. kick_sm(pd, FIRST_SOURCE_CAP_TIME);
  1902. }
  1903. static void handle_state_src_startup(struct usbpd *pd, struct rx_msg *rx_msg)
  1904. {
  1905. usbpd_set_state(pd, PE_SRC_STARTUP);
  1906. }
  1907. static void handle_state_src_startup_wait_for_vdm_resp(struct usbpd *pd,
  1908. struct rx_msg *rx_msg)
  1909. {
  1910. int ms;
  1911. if (IS_DATA(rx_msg, MSG_VDM))
  1912. handle_vdm_rx(pd, rx_msg);
  1913. /* tVCONNStable (50ms) elapsed */
  1914. ms = FIRST_SOURCE_CAP_TIME - 50;
  1915. /* if no vdm msg received SENDER_RESPONSE_TIME elapsed */
  1916. if (!rx_msg)
  1917. ms -= SENDER_RESPONSE_TIME;
  1918. /*
  1919. * Emarker may have negotiated down to rev 2.0.
  1920. * Reset to 3.0 to begin SOP communication with sink
  1921. */
  1922. pd->spec_rev = USBPD_REV_30;
  1923. pd->current_state = PE_SRC_SEND_CAPABILITIES;
  1924. kick_sm(pd, ms);
  1925. }
  1926. static void enter_state_src_send_capabilities(struct usbpd *pd)
  1927. {
  1928. kick_sm(pd, 0);
  1929. }
  1930. static void handle_state_src_send_capabilities(struct usbpd *pd,
  1931. struct rx_msg *rx_msg)
  1932. {
  1933. int ret;
  1934. union power_supply_propval val = {0};
  1935. ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES, default_src_caps,
  1936. ARRAY_SIZE(default_src_caps), SOP_MSG);
  1937. if (ret) {
  1938. if (pd->pd_connected) {
  1939. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  1940. return;
  1941. }
  1942. /*
  1943. * Technically this is PE_SRC_Discovery, but we can
  1944. * handle it by setting a timer to come back to the
  1945. * same state for the next retry.
  1946. */
  1947. pd->caps_count++;
  1948. if (pd->caps_count >= PD_CAPS_COUNT) {
  1949. usbpd_dbg(&pd->dev, "Src CapsCounter exceeded, disabling PD\n");
  1950. usbpd_set_state(pd, PE_SRC_DISABLED);
  1951. val.intval = QTI_POWER_SUPPLY_PD_INACTIVE;
  1952. usbpd_set_psy_iio_property(pd,
  1953. POWER_SUPPLY_PROP_PD_ACTIVE, &val);
  1954. return;
  1955. }
  1956. kick_sm(pd, SRC_CAP_TIME);
  1957. return;
  1958. }
  1959. /* transmit was successful if GoodCRC was received */
  1960. pd->caps_count = 0;
  1961. pd->hard_reset_count = 0;
  1962. pd->pd_connected = true; /* we know peer is PD capable */
  1963. /* wait for REQUEST */
  1964. pd->current_state = PE_SRC_SEND_CAPABILITIES_WAIT;
  1965. kick_sm(pd, SENDER_RESPONSE_TIME);
  1966. val.intval = QTI_POWER_SUPPLY_PD_ACTIVE;
  1967. usbpd_set_psy_iio_property(pd,
  1968. POWER_SUPPLY_PROP_PD_ACTIVE, &val);
  1969. }
  1970. static void handle_state_src_send_capabilities_wait(struct usbpd *pd,
  1971. struct rx_msg *rx_msg)
  1972. {
  1973. if (IS_DATA(rx_msg, MSG_REQUEST)) {
  1974. pd->rdo = *(u32 *)rx_msg->payload;
  1975. usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
  1976. } else if (rx_msg) {
  1977. usbpd_err(&pd->dev, "Unexpected message received\n");
  1978. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  1979. } else {
  1980. usbpd_set_state(pd, PE_SRC_HARD_RESET);
  1981. }
  1982. }
  1983. static void enter_state_src_negotiate_capability(struct usbpd *pd)
  1984. {
  1985. int ret;
  1986. log_decoded_request(pd, pd->rdo);
  1987. pd->peer_usb_comm = PD_RDO_USB_COMM(pd->rdo);
  1988. if (PD_RDO_OBJ_POS(pd->rdo) != 1 ||
  1989. PD_RDO_FIXED_CURR(pd->rdo) >
  1990. PD_SRC_PDO_FIXED_MAX_CURR(*default_src_caps)) {
  1991. /* send Reject */
  1992. ret = pd_send_msg(pd, MSG_REJECT, NULL, 0, SOP_MSG);
  1993. if (ret) {
  1994. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  1995. return;
  1996. }
  1997. usbpd_err(&pd->dev, "Invalid request: %08x\n", pd->rdo);
  1998. if (pd->in_explicit_contract)
  1999. usbpd_set_state(pd, PE_SRC_READY);
  2000. else
  2001. /*
  2002. * bypass PE_SRC_Capability_Response and
  2003. * PE_SRC_Wait_New_Capabilities in this
  2004. * implementation for simplicity.
  2005. */
  2006. usbpd_set_state(pd, PE_SRC_SEND_CAPABILITIES);
  2007. return;
  2008. }
  2009. /* PE_SRC_TRANSITION_SUPPLY pseudo-state */
  2010. ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
  2011. if (ret) {
  2012. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2013. return;
  2014. }
  2015. /* tSrcTransition required after ACCEPT */
  2016. usleep_range(SRC_TRANSITION_TIME * USEC_PER_MSEC,
  2017. (SRC_TRANSITION_TIME + 5) * USEC_PER_MSEC);
  2018. /*
  2019. * Normally a voltage change should occur within tSrcReady
  2020. * but since we only support VSafe5V there is nothing more to
  2021. * prepare from the power supply so send PS_RDY right away.
  2022. */
  2023. ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
  2024. if (ret) {
  2025. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2026. return;
  2027. }
  2028. usbpd_set_state(pd, PE_SRC_READY);
  2029. }
  2030. static void enter_state_src_ready(struct usbpd *pd)
  2031. {
  2032. /*
  2033. * USB Host stack was started at PE_SRC_STARTUP but if peer
  2034. * doesn't support USB communication, we can turn it off
  2035. */
  2036. if (pd->current_dr == DR_DFP && !pd->peer_usb_comm &&
  2037. !pd->in_explicit_contract)
  2038. stop_usb_host(pd);
  2039. pd->in_explicit_contract = true;
  2040. if (pd->vdm_tx && !pd->sm_queued)
  2041. kick_sm(pd, 0);
  2042. else if (pd->current_dr == DR_DFP && pd->vdm_state == VDM_NONE)
  2043. usbpd_send_svdm(pd, USBPD_SID,
  2044. USBPD_SVDM_DISCOVER_IDENTITY,
  2045. SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
  2046. else
  2047. start_src_ams(pd, false);
  2048. kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
  2049. complete(&pd->is_ready);
  2050. typec_set_pwr_opmode(pd->typec_port, TYPEC_PWR_MODE_PD);
  2051. }
  2052. static void handle_state_src_ready(struct usbpd *pd, struct rx_msg *rx_msg)
  2053. {
  2054. int ret;
  2055. if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP)) {
  2056. pd->current_state = PE_SRC_SEND_CAPABILITIES;
  2057. kick_sm(pd, 0);
  2058. } else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) {
  2059. ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
  2060. pd->sink_caps, pd->num_sink_caps,
  2061. SOP_MSG);
  2062. if (ret)
  2063. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2064. } else if (IS_DATA(rx_msg, MSG_REQUEST)) {
  2065. pd->rdo = *(u32 *)rx_msg->payload;
  2066. usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
  2067. } else if (IS_CTRL(rx_msg, MSG_DR_SWAP)) {
  2068. if (pd->vdm_state == MODE_ENTERED) {
  2069. usbpd_set_state(pd, PE_SRC_HARD_RESET);
  2070. return;
  2071. }
  2072. ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
  2073. if (ret) {
  2074. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2075. return;
  2076. }
  2077. dr_swap(pd);
  2078. } else if (IS_CTRL(rx_msg, MSG_PR_SWAP)) {
  2079. /* we'll happily accept Src->Sink requests anytime */
  2080. ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
  2081. if (ret) {
  2082. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2083. return;
  2084. }
  2085. usbpd_set_state(pd, PE_PRS_SRC_SNK_TRANSITION_TO_OFF);
  2086. return;
  2087. } else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) {
  2088. ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
  2089. if (ret) {
  2090. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2091. return;
  2092. }
  2093. vconn_swap(pd);
  2094. } else if (IS_DATA(rx_msg, MSG_VDM)) {
  2095. handle_vdm_rx(pd, rx_msg);
  2096. } else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP_EXTENDED)) {
  2097. handle_get_src_cap_extended(pd);
  2098. } else if (IS_EXT(rx_msg, MSG_GET_BATTERY_CAP)) {
  2099. handle_get_battery_cap(pd, rx_msg);
  2100. } else if (IS_EXT(rx_msg, MSG_GET_BATTERY_STATUS)) {
  2101. handle_get_battery_status(pd, rx_msg);
  2102. } else if (IS_CTRL(rx_msg, MSG_ACCEPT) ||
  2103. IS_CTRL(rx_msg, MSG_REJECT) ||
  2104. IS_CTRL(rx_msg, MSG_WAIT)) {
  2105. usbpd_warn(&pd->dev, "Unexpected message\n");
  2106. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2107. return;
  2108. } else if (rx_msg && !IS_CTRL(rx_msg, MSG_NOT_SUPPORTED)) {
  2109. usbpd_dbg(&pd->dev, "Unsupported message\n");
  2110. ret = pd_send_msg(pd, pd->spec_rev == USBPD_REV_30 ?
  2111. MSG_NOT_SUPPORTED : MSG_REJECT,
  2112. NULL, 0, SOP_MSG);
  2113. if (ret)
  2114. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2115. return;
  2116. } else if (pd->send_pr_swap) {
  2117. pd->send_pr_swap = false;
  2118. ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG);
  2119. if (ret) {
  2120. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2121. return;
  2122. }
  2123. pd->current_state = PE_PRS_SRC_SNK_SEND_SWAP;
  2124. kick_sm(pd, SENDER_RESPONSE_TIME);
  2125. } else if (pd->send_dr_swap) {
  2126. pd->send_dr_swap = false;
  2127. ret = pd_send_msg(pd, MSG_DR_SWAP, NULL, 0, SOP_MSG);
  2128. if (ret) {
  2129. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2130. return;
  2131. }
  2132. pd->current_state = PE_DRS_SEND_DR_SWAP;
  2133. kick_sm(pd, SENDER_RESPONSE_TIME);
  2134. } else if (pd->vdm_tx) {
  2135. handle_vdm_tx(pd, SOP_MSG);
  2136. } else {
  2137. start_src_ams(pd, false);
  2138. }
  2139. }
  2140. static void enter_state_hard_reset(struct usbpd *pd)
  2141. {
  2142. union power_supply_propval val = {0};
  2143. /* are we still connected? */
  2144. if (pd->typec_mode == QTI_POWER_SUPPLY_TYPEC_NONE) {
  2145. pd->current_pr = PR_NONE;
  2146. kick_sm(pd, 0);
  2147. return;
  2148. }
  2149. val.intval = 1;
  2150. usbpd_set_psy_iio_property(pd,
  2151. POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
  2152. if (pd->current_pr == PR_SINK) {
  2153. if (pd->requested_current) {
  2154. val.intval = pd->requested_current = 0;
  2155. usbpd_set_psy_iio_property(pd,
  2156. POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
  2157. }
  2158. val.intval = pd->requested_voltage = 5000000;
  2159. usbpd_set_psy_iio_property(pd,
  2160. POWER_SUPPLY_PROP_PD_VOLTAGE_MIN, &val);
  2161. }
  2162. pd_send_hard_reset(pd);
  2163. pd->in_explicit_contract = false;
  2164. pd->rdo = 0;
  2165. rx_msg_cleanup(pd);
  2166. reset_vdm_state(pd);
  2167. kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
  2168. if (pd->current_pr == PR_SRC) {
  2169. pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT;
  2170. kick_sm(pd, PS_HARD_RESET_TIME);
  2171. } else {
  2172. usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
  2173. }
  2174. }
  2175. static void handle_state_soft_reset(struct usbpd *pd,
  2176. struct rx_msg *rx_msg)
  2177. {
  2178. int ret;
  2179. pd_reset_protocol(pd);
  2180. ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
  2181. if (ret) {
  2182. usbpd_set_state(pd, pd->current_pr == PR_SRC ?
  2183. PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
  2184. return;
  2185. }
  2186. usbpd_set_state(pd, pd->current_pr == PR_SRC ?
  2187. PE_SRC_SEND_CAPABILITIES :
  2188. PE_SNK_WAIT_FOR_CAPABILITIES);
  2189. }
  2190. static void handle_state_src_transition_to_default(struct usbpd *pd,
  2191. struct rx_msg *rx_msg)
  2192. {
  2193. if (pd->vconn_enabled)
  2194. regulator_disable(pd->vconn);
  2195. pd->vconn_enabled = false;
  2196. if (pd->vbus_enabled)
  2197. regulator_disable(pd->vbus);
  2198. pd->vbus_enabled = false;
  2199. if (pd->current_dr != DR_DFP) {
  2200. extcon_set_state_sync(pd->extcon, EXTCON_USB, 0);
  2201. pd->current_dr = DR_DFP;
  2202. pd->pdphy_ops->update_roles(pd->current_dr, pd->current_pr);
  2203. }
  2204. /* PE_UNKNOWN will turn on VBUS and go back to PE_SRC_STARTUP */
  2205. pd->current_state = PE_UNKNOWN;
  2206. kick_sm(pd, SRC_RECOVER_TIME);
  2207. }
  2208. static void enter_state_snk_startup(struct usbpd *pd)
  2209. {
  2210. int ret;
  2211. struct pd_phy_params phy_params = {
  2212. .signal_cb = phy_sig_received,
  2213. .msg_rx_cb = phy_msg_received,
  2214. .shutdown_cb = phy_shutdown,
  2215. .frame_filter_val = FRAME_FILTER_EN_SOP |
  2216. FRAME_FILTER_EN_HARD_RESET,
  2217. };
  2218. union power_supply_propval val = {0};
  2219. if (pd->current_dr == DR_NONE || pd->current_dr == DR_UFP) {
  2220. pd->current_dr = DR_UFP;
  2221. ret = usbpd_get_psy_iio_property(pd,
  2222. POWER_SUPPLY_PROP_REAL_TYPE, &val);
  2223. if (!ret) {
  2224. usbpd_dbg(&pd->dev, "type:%d\n", val.intval);
  2225. if (val.intval == POWER_SUPPLY_TYPE_USB ||
  2226. val.intval == POWER_SUPPLY_TYPE_USB_CDP)
  2227. start_usb_peripheral(pd);
  2228. }
  2229. typec_set_data_role(pd->typec_port, TYPEC_DEVICE);
  2230. }
  2231. typec_set_pwr_role(pd->typec_port, TYPEC_SINK);
  2232. if (!pd->partner) {
  2233. typec_set_pwr_opmode(pd->typec_port,
  2234. pd->typec_mode - QTI_POWER_SUPPLY_TYPEC_SOURCE_DEFAULT);
  2235. memset(&pd->partner_identity, 0, sizeof(pd->partner_identity));
  2236. pd->partner_desc.usb_pd = false;
  2237. pd->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
  2238. pd->partner = typec_register_partner(pd->typec_port,
  2239. &pd->partner_desc);
  2240. }
  2241. if (usbpd_startup_common(pd, &phy_params))
  2242. return;
  2243. pd->current_voltage = pd->requested_voltage = 5000000;
  2244. val.intval = pd->requested_voltage; /* set max range to 5V */
  2245. usbpd_set_psy_iio_property(pd,
  2246. POWER_SUPPLY_PROP_PD_VOLTAGE_MAX, &val);
  2247. if (!pd->vbus_present) {
  2248. pd->current_state = PE_SNK_DISCOVERY;
  2249. /* max time for hard reset to turn vbus back on */
  2250. kick_sm(pd, SNK_HARD_RESET_VBUS_ON_TIME);
  2251. return;
  2252. }
  2253. usbpd_set_state(pd, PE_SNK_WAIT_FOR_CAPABILITIES);
  2254. }
  2255. static void handle_state_snk_startup(struct usbpd *pd, struct rx_msg *rx_msg)
  2256. {
  2257. usbpd_set_state(pd, PE_SNK_STARTUP);
  2258. }
  2259. static void handle_state_snk_discovery(struct usbpd *pd, struct rx_msg *rx_msg)
  2260. {
  2261. if (!rx_msg) {
  2262. if (pd->vbus_present)
  2263. usbpd_set_state(pd,
  2264. PE_SNK_WAIT_FOR_CAPABILITIES);
  2265. /*
  2266. * Handle disconnection in the middle of PR_Swap.
  2267. * Since in psy_changed() if pd->in_pr_swap is true
  2268. * we ignore the typec_mode==NONE change since that is
  2269. * expected to happen. However if the cable really did
  2270. * get disconnected we need to check for it here after
  2271. * waiting for VBUS presence times out.
  2272. */
  2273. if (!pd->typec_mode) {
  2274. pd->current_pr = PR_NONE;
  2275. kick_sm(pd, 0);
  2276. }
  2277. return;
  2278. }
  2279. pd->current_state = PE_SNK_WAIT_FOR_CAPABILITIES;
  2280. handle_state_snk_wait_for_capabilities(pd, rx_msg);
  2281. }
  2282. static void enter_state_snk_wait_for_capabilities(struct usbpd *pd)
  2283. {
  2284. unsigned long flags;
  2285. spin_lock_irqsave(&pd->rx_lock, flags);
  2286. if (list_empty(&pd->rx_q))
  2287. kick_sm(pd, SINK_WAIT_CAP_TIME);
  2288. spin_unlock_irqrestore(&pd->rx_lock, flags);
  2289. }
  2290. static void handle_state_snk_wait_for_capabilities(struct usbpd *pd,
  2291. struct rx_msg *rx_msg)
  2292. {
  2293. union power_supply_propval val = {0};
  2294. pd->in_pr_swap = false;
  2295. val.intval = 0;
  2296. usbpd_set_psy_iio_property(pd,
  2297. POWER_SUPPLY_PROP_PR_SWAP, &val);
  2298. if (IS_DATA(rx_msg, MSG_SOURCE_CAPABILITIES)) {
  2299. val.intval = 0;
  2300. usbpd_set_psy_iio_property(pd,
  2301. POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
  2302. /* save the PDOs so userspace can further evaluate */
  2303. memset(&pd->received_pdos, 0,
  2304. sizeof(pd->received_pdos));
  2305. memcpy(&pd->received_pdos, rx_msg->payload,
  2306. min_t(size_t, rx_msg->data_len,
  2307. sizeof(pd->received_pdos)));
  2308. pd->src_cap_id++;
  2309. usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
  2310. } else if (pd->hard_reset_count < 3) {
  2311. usbpd_set_state(pd, PE_SNK_HARD_RESET);
  2312. } else {
  2313. usbpd_dbg(&pd->dev, "Sink hard reset count exceeded, disabling PD\n");
  2314. val.intval = 0;
  2315. usbpd_set_psy_iio_property(pd,
  2316. POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
  2317. val.intval = QTI_POWER_SUPPLY_PD_INACTIVE;
  2318. usbpd_set_psy_iio_property(pd,
  2319. POWER_SUPPLY_PROP_PD_ACTIVE, &val);
  2320. }
  2321. }
  2322. static void enter_state_snk_evaluate_capability(struct usbpd *pd)
  2323. {
  2324. int ret;
  2325. pd->hard_reset_count = 0;
  2326. /* evaluate PDOs and select one */
  2327. ret = pd_eval_src_caps(pd);
  2328. if (ret < 0) {
  2329. usbpd_err(&pd->dev, "Invalid src_caps received. Skipping request\n");
  2330. return;
  2331. }
  2332. pd->pd_connected = true; /* we know peer is PD capable */
  2333. usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY);
  2334. }
  2335. static void enter_state_snk_select_capability(struct usbpd *pd)
  2336. {
  2337. int ret;
  2338. log_decoded_request(pd, pd->rdo);
  2339. ret = pd_send_msg(pd, MSG_REQUEST, &pd->rdo, 1, SOP_MSG);
  2340. if (ret) {
  2341. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2342. return;
  2343. }
  2344. /* wait for ACCEPT */
  2345. kick_sm(pd, SENDER_RESPONSE_TIME);
  2346. }
  2347. static void handle_state_snk_select_capability(struct usbpd *pd,
  2348. struct rx_msg *rx_msg)
  2349. {
  2350. int ret;
  2351. union power_supply_propval val = {0};
  2352. if (IS_CTRL(rx_msg, MSG_ACCEPT)) {
  2353. u32 pdo = pd->received_pdos[pd->requested_pdo - 1];
  2354. bool same_pps = (pd->selected_pdo == pd->requested_pdo)
  2355. && (PD_SRC_PDO_TYPE(pdo) ==
  2356. PD_SRC_PDO_TYPE_AUGMENTED);
  2357. usbpd_set_state(pd, PE_SNK_TRANSITION_SINK);
  2358. /* prepare for voltage increase/decrease */
  2359. val.intval = pd->requested_voltage;
  2360. usbpd_set_psy_iio_property(pd,
  2361. pd->requested_voltage >= pd->current_voltage ?
  2362. POWER_SUPPLY_PROP_PD_VOLTAGE_MAX :
  2363. POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
  2364. &val);
  2365. /*
  2366. * if changing voltages (not within the same PPS PDO),
  2367. * we must lower input current to pSnkStdby (2.5W).
  2368. * Calculate it and set PD_CURRENT_MAX accordingly.
  2369. */
  2370. if (!same_pps &&
  2371. pd->requested_voltage != pd->current_voltage) {
  2372. int mv = max(pd->requested_voltage,
  2373. pd->current_voltage) / 1000;
  2374. val.intval = (2500000 / mv) * 1000;
  2375. usbpd_set_psy_iio_property(pd,
  2376. POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
  2377. } else {
  2378. /* decreasing current? */
  2379. ret = usbpd_get_psy_iio_property(pd,
  2380. POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
  2381. if (!ret &&
  2382. pd->requested_current < val.intval) {
  2383. val.intval =
  2384. pd->requested_current * 1000;
  2385. usbpd_set_psy_iio_property(pd,
  2386. POWER_SUPPLY_PROP_PD_CURRENT_MAX,
  2387. &val);
  2388. }
  2389. }
  2390. pd->selected_pdo = pd->requested_pdo;
  2391. } else if (IS_CTRL(rx_msg, MSG_REJECT) ||
  2392. IS_CTRL(rx_msg, MSG_WAIT)) {
  2393. if (pd->in_explicit_contract)
  2394. usbpd_set_state(pd, PE_SNK_READY);
  2395. else
  2396. usbpd_set_state(pd,
  2397. PE_SNK_WAIT_FOR_CAPABILITIES);
  2398. } else if (rx_msg) {
  2399. usbpd_err(&pd->dev, "Invalid response to sink request\n");
  2400. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2401. } else {
  2402. /* timed out; go to hard reset */
  2403. usbpd_set_state(pd, PE_SNK_HARD_RESET);
  2404. }
  2405. }
  2406. static void enter_state_snk_transition_sink(struct usbpd *pd)
  2407. {
  2408. /* wait for PS_RDY */
  2409. kick_sm(pd, PS_TRANSITION_TIME);
  2410. }
  2411. static void handle_state_snk_transition_sink(struct usbpd *pd,
  2412. struct rx_msg *rx_msg)
  2413. {
  2414. union power_supply_propval val = {0};
  2415. if (IS_CTRL(rx_msg, MSG_PS_RDY)) {
  2416. val.intval = pd->requested_voltage;
  2417. usbpd_set_psy_iio_property(pd,
  2418. pd->requested_voltage >= pd->current_voltage ?
  2419. POWER_SUPPLY_PROP_PD_VOLTAGE_MIN :
  2420. POWER_SUPPLY_PROP_PD_VOLTAGE_MAX, &val);
  2421. pd->current_voltage = pd->requested_voltage;
  2422. /* resume charging */
  2423. val.intval = pd->requested_current * 1000; /* mA->uA */
  2424. usbpd_set_psy_iio_property(pd,
  2425. POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
  2426. usbpd_set_state(pd, PE_SNK_READY);
  2427. } else {
  2428. /* timed out; go to hard reset */
  2429. usbpd_set_state(pd, PE_SNK_HARD_RESET);
  2430. }
  2431. }
  2432. static void enter_state_snk_ready(struct usbpd *pd)
  2433. {
  2434. pd->in_explicit_contract = true;
  2435. if (pd->vdm_tx)
  2436. kick_sm(pd, 0);
  2437. else if (pd->current_dr == DR_DFP && pd->vdm_state == VDM_NONE)
  2438. usbpd_send_svdm(pd, USBPD_SID,
  2439. USBPD_SVDM_DISCOVER_IDENTITY,
  2440. SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
  2441. kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
  2442. complete(&pd->is_ready);
  2443. typec_set_pwr_opmode(pd->typec_port, TYPEC_PWR_MODE_PD);
  2444. }
  2445. static bool handle_ctrl_snk_ready(struct usbpd *pd, struct rx_msg *rx_msg)
  2446. {
  2447. int ret;
  2448. switch (PD_MSG_HDR_TYPE(rx_msg->hdr)) {
  2449. case MSG_GET_SINK_CAP:
  2450. ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
  2451. pd->sink_caps, pd->num_sink_caps,
  2452. SOP_MSG);
  2453. if (ret)
  2454. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2455. break;
  2456. case MSG_GET_SOURCE_CAP:
  2457. ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES,
  2458. default_src_caps,
  2459. ARRAY_SIZE(default_src_caps), SOP_MSG);
  2460. if (ret)
  2461. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2462. break;
  2463. case MSG_DR_SWAP:
  2464. if (pd->vdm_state == MODE_ENTERED) {
  2465. usbpd_set_state(pd, PE_SNK_HARD_RESET);
  2466. break;
  2467. }
  2468. ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
  2469. if (ret) {
  2470. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2471. break;
  2472. }
  2473. dr_swap(pd);
  2474. break;
  2475. case MSG_PR_SWAP:
  2476. /* TODO: should we Reject in certain circumstances? */
  2477. ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
  2478. if (ret) {
  2479. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2480. break;
  2481. }
  2482. usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
  2483. break;
  2484. case MSG_VCONN_SWAP:
  2485. ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
  2486. if (ret) {
  2487. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2488. break;
  2489. }
  2490. vconn_swap(pd);
  2491. break;
  2492. case MSG_GET_SOURCE_CAP_EXTENDED:
  2493. handle_get_src_cap_extended(pd);
  2494. break;
  2495. case MSG_ACCEPT:
  2496. case MSG_REJECT:
  2497. case MSG_WAIT:
  2498. usbpd_warn(&pd->dev, "Unexpected message\n");
  2499. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2500. break;
  2501. default:
  2502. return false;
  2503. }
  2504. return true;
  2505. }
  2506. static bool handle_data_snk_ready(struct usbpd *pd, struct rx_msg *rx_msg)
  2507. {
  2508. u32 ado;
  2509. switch (PD_MSG_HDR_TYPE(rx_msg->hdr)) {
  2510. case MSG_SOURCE_CAPABILITIES:
  2511. /* save the PDOs so userspace can further evaluate */
  2512. memset(&pd->received_pdos, 0,
  2513. sizeof(pd->received_pdos));
  2514. memcpy(&pd->received_pdos, rx_msg->payload,
  2515. min_t(size_t, rx_msg->data_len,
  2516. sizeof(pd->received_pdos)));
  2517. pd->src_cap_id++;
  2518. usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
  2519. break;
  2520. case MSG_VDM:
  2521. handle_vdm_rx(pd, rx_msg);
  2522. break;
  2523. case MSG_ALERT:
  2524. if (rx_msg->data_len != sizeof(ado)) {
  2525. usbpd_err(&pd->dev, "Invalid ado\n");
  2526. break;
  2527. }
  2528. memcpy(&ado, rx_msg->payload, sizeof(ado));
  2529. usbpd_dbg(&pd->dev, "Received Alert 0x%08x\n", ado);
  2530. /*
  2531. * Don't send Get_Status right away so we can coalesce
  2532. * multiple Alerts. 150ms should be enough to not get
  2533. * in the way of any other AMS that might happen.
  2534. */
  2535. pd->send_get_status = true;
  2536. kick_sm(pd, 150);
  2537. break;
  2538. case MSG_BATTERY_STATUS:
  2539. if (rx_msg->data_len != sizeof(pd->battery_sts_dobj)) {
  2540. usbpd_err(&pd->dev, "Invalid bat sts dobj\n");
  2541. break;
  2542. }
  2543. memcpy(&pd->battery_sts_dobj, rx_msg->payload,
  2544. sizeof(pd->battery_sts_dobj));
  2545. complete(&pd->is_ready);
  2546. break;
  2547. default:
  2548. return false;
  2549. }
  2550. return true;
  2551. }
  2552. static bool handle_ext_snk_ready(struct usbpd *pd, struct rx_msg *rx_msg)
  2553. {
  2554. switch (PD_MSG_HDR_TYPE(rx_msg->hdr)) {
  2555. case MSG_SOURCE_CAPABILITIES_EXTENDED:
  2556. if (rx_msg->data_len != PD_SRC_CAP_EXT_DB_LEN) {
  2557. usbpd_err(&pd->dev, "Invalid src cap ext db\n");
  2558. break;
  2559. }
  2560. memcpy(&pd->src_cap_ext_db, rx_msg->payload,
  2561. sizeof(pd->src_cap_ext_db));
  2562. complete(&pd->is_ready);
  2563. break;
  2564. case MSG_PPS_STATUS:
  2565. if (rx_msg->data_len != sizeof(pd->pps_status_db)) {
  2566. usbpd_err(&pd->dev, "Invalid pps status db\n");
  2567. break;
  2568. }
  2569. memcpy(&pd->pps_status_db, rx_msg->payload,
  2570. sizeof(pd->pps_status_db));
  2571. complete(&pd->is_ready);
  2572. break;
  2573. case MSG_STATUS:
  2574. if (rx_msg->data_len > PD_STATUS_DB_LEN)
  2575. usbpd_err(&pd->dev, "Invalid status db length:%d\n",
  2576. rx_msg->data_len);
  2577. memset(&pd->status_db, 0, sizeof(pd->status_db));
  2578. memcpy(&pd->status_db, rx_msg->payload,
  2579. min((size_t)rx_msg->data_len, sizeof(pd->status_db)));
  2580. kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
  2581. complete(&pd->is_ready);
  2582. break;
  2583. case MSG_BATTERY_CAPABILITIES:
  2584. if (rx_msg->data_len != PD_BATTERY_CAP_DB_LEN) {
  2585. usbpd_err(&pd->dev, "Invalid battery cap db\n");
  2586. break;
  2587. }
  2588. memcpy(&pd->battery_cap_db, rx_msg->payload,
  2589. sizeof(pd->battery_cap_db));
  2590. complete(&pd->is_ready);
  2591. break;
  2592. case MSG_GET_BATTERY_CAP:
  2593. handle_get_battery_cap(pd, rx_msg);
  2594. break;
  2595. case MSG_GET_BATTERY_STATUS:
  2596. handle_get_battery_status(pd, rx_msg);
  2597. break;
  2598. default:
  2599. return false;
  2600. }
  2601. return true;
  2602. }
  2603. static void handle_snk_ready_tx(struct usbpd *pd, struct rx_msg *rx_msg)
  2604. {
  2605. int ret;
  2606. if (pd->send_get_src_cap_ext) {
  2607. pd->send_get_src_cap_ext = false;
  2608. ret = pd_send_msg(pd, MSG_GET_SOURCE_CAP_EXTENDED, NULL,
  2609. 0, SOP_MSG);
  2610. if (ret) {
  2611. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2612. return;
  2613. }
  2614. kick_sm(pd, SENDER_RESPONSE_TIME);
  2615. } else if (pd->send_get_pps_status) {
  2616. pd->send_get_pps_status = false;
  2617. ret = pd_send_msg(pd, MSG_GET_PPS_STATUS, NULL,
  2618. 0, SOP_MSG);
  2619. if (ret) {
  2620. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2621. return;
  2622. }
  2623. kick_sm(pd, SENDER_RESPONSE_TIME);
  2624. } else if (pd->send_get_status) {
  2625. pd->send_get_status = false;
  2626. ret = pd_send_msg(pd, MSG_GET_STATUS, NULL, 0, SOP_MSG);
  2627. if (ret) {
  2628. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2629. return;
  2630. }
  2631. kick_sm(pd, SENDER_RESPONSE_TIME);
  2632. } else if (pd->send_get_battery_cap) {
  2633. pd->send_get_battery_cap = false;
  2634. ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_CAP,
  2635. &pd->get_battery_cap_db, 1, SOP_MSG);
  2636. if (ret) {
  2637. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2638. return;
  2639. }
  2640. kick_sm(pd, SENDER_RESPONSE_TIME);
  2641. } else if (pd->send_get_battery_status) {
  2642. pd->send_get_battery_status = false;
  2643. ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_STATUS,
  2644. &pd->get_battery_status_db, 1, SOP_MSG);
  2645. if (ret) {
  2646. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2647. return;
  2648. }
  2649. kick_sm(pd, SENDER_RESPONSE_TIME);
  2650. } else if (pd->send_request) {
  2651. pd->send_request = false;
  2652. usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY);
  2653. } else if (pd->send_pr_swap) {
  2654. pd->send_pr_swap = false;
  2655. ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG);
  2656. if (ret) {
  2657. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2658. return;
  2659. }
  2660. pd->current_state = PE_PRS_SNK_SRC_SEND_SWAP;
  2661. kick_sm(pd, SENDER_RESPONSE_TIME);
  2662. } else if (pd->send_dr_swap) {
  2663. pd->send_dr_swap = false;
  2664. ret = pd_send_msg(pd, MSG_DR_SWAP, NULL, 0, SOP_MSG);
  2665. if (ret) {
  2666. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2667. return;
  2668. }
  2669. pd->current_state = PE_DRS_SEND_DR_SWAP;
  2670. kick_sm(pd, SENDER_RESPONSE_TIME);
  2671. } else {
  2672. handle_vdm_tx(pd, SOP_MSG);
  2673. }
  2674. }
  2675. static void handle_state_snk_ready(struct usbpd *pd, struct rx_msg *rx_msg)
  2676. {
  2677. int ret;
  2678. if (rx_msg && !PD_MSG_HDR_COUNT(rx_msg->hdr) &&
  2679. handle_ctrl_snk_ready(pd, rx_msg)) {
  2680. return;
  2681. } else if (rx_msg && !PD_MSG_HDR_IS_EXTENDED(rx_msg->hdr) &&
  2682. handle_data_snk_ready(pd, rx_msg)) {
  2683. return;
  2684. } else if (rx_msg && PD_MSG_HDR_IS_EXTENDED(rx_msg->hdr) &&
  2685. handle_ext_snk_ready(pd, rx_msg)) {
  2686. /* continue to handle tx */
  2687. } else if (rx_msg && !IS_CTRL(rx_msg, MSG_NOT_SUPPORTED)) {
  2688. usbpd_dbg(&pd->dev, "Unsupported message\n");
  2689. ret = pd_send_msg(pd, pd->spec_rev == USBPD_REV_30 ?
  2690. MSG_NOT_SUPPORTED : MSG_REJECT,
  2691. NULL, 0, SOP_MSG);
  2692. if (ret)
  2693. usbpd_set_state(pd, PE_SEND_SOFT_RESET);
  2694. return;
  2695. }
  2696. if (is_sink_tx_ok(pd))
  2697. handle_snk_ready_tx(pd, rx_msg);
  2698. }
  2699. static void enter_state_snk_transition_to_default(struct usbpd *pd)
  2700. {
  2701. if (pd->current_dr != DR_UFP) {
  2702. stop_usb_host(pd);
  2703. start_usb_peripheral(pd);
  2704. pd->current_dr = DR_UFP;
  2705. pd->pdphy_ops->update_roles(pd->current_dr, pd->current_pr);
  2706. }
  2707. if (pd->vconn_enabled) {
  2708. regulator_disable(pd->vconn);
  2709. pd->vconn_enabled = false;
  2710. }
  2711. /* max time for hard reset to turn vbus off */
  2712. kick_sm(pd, SNK_HARD_RESET_VBUS_OFF_TIME);
  2713. }
  2714. static void handle_state_snk_transition_to_default(struct usbpd *pd,
  2715. struct rx_msg *rx_msg)
  2716. {
  2717. usbpd_set_state(pd, PE_SNK_STARTUP);
  2718. }
  2719. static void handle_state_drs_send_dr_swap(struct usbpd *pd,
  2720. struct rx_msg *rx_msg)
  2721. {
  2722. if (IS_CTRL(rx_msg, MSG_ACCEPT))
  2723. dr_swap(pd);
  2724. usbpd_set_state(pd, pd->current_pr == PR_SRC ?
  2725. PE_SRC_READY : PE_SNK_READY);
  2726. }
  2727. static void handle_state_prs_snk_src_send_swap(struct usbpd *pd,
  2728. struct rx_msg *rx_msg)
  2729. {
  2730. if (!IS_CTRL(rx_msg, MSG_ACCEPT)) {
  2731. pd->current_state = PE_SNK_READY;
  2732. return;
  2733. }
  2734. usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
  2735. }
  2736. static void enter_state_prs_snk_src_transition_to_off(struct usbpd *pd)
  2737. {
  2738. union power_supply_propval val = {0};
  2739. val.intval = pd->in_pr_swap = true;
  2740. usbpd_set_psy_iio_property(pd,
  2741. POWER_SUPPLY_PROP_PR_SWAP, &val);
  2742. /* lock in current mode */
  2743. set_power_role(pd, pd->current_pr);
  2744. val.intval = pd->requested_current = 0; /* suspend charging */
  2745. usbpd_set_psy_iio_property(pd,
  2746. POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
  2747. pd->in_explicit_contract = false;
  2748. /*
  2749. * need to update PR bit in message header so that
  2750. * proper GoodCRC is sent when receiving next PS_RDY
  2751. */
  2752. pd->pdphy_ops->update_roles(pd->current_dr, PR_SRC);
  2753. /* wait for PS_RDY */
  2754. kick_sm(pd, PS_SOURCE_OFF);
  2755. }
  2756. static void handle_state_prs_snk_src_transition_to_off(struct usbpd *pd,
  2757. struct rx_msg *rx_msg)
  2758. {
  2759. if (!IS_CTRL(rx_msg, MSG_PS_RDY)) {
  2760. usbpd_set_state(pd, PE_ERROR_RECOVERY);
  2761. return;
  2762. }
  2763. /* PE_PRS_SNK_SRC_Assert_Rp */
  2764. pd->current_pr = PR_SRC;
  2765. set_power_role(pd, pd->current_pr);
  2766. pd->current_state = PE_PRS_SNK_SRC_SOURCE_ON;
  2767. handle_state_prs_snk_src_source_on(pd, rx_msg);
  2768. }
  2769. static void handle_state_prs_snk_src_source_on(struct usbpd *pd,
  2770. struct rx_msg *rx_msg)
  2771. {
  2772. int ret;
  2773. enable_vbus(pd);
  2774. ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
  2775. if (ret) {
  2776. usbpd_set_state(pd, PE_ERROR_RECOVERY);
  2777. return;
  2778. }
  2779. usbpd_set_state(pd, PE_SRC_STARTUP);
  2780. }
  2781. static void handle_state_prs_src_snk_send_swap(struct usbpd *pd,
  2782. struct rx_msg *rx_msg)
  2783. {
  2784. if (!IS_CTRL(rx_msg, MSG_ACCEPT)) {
  2785. pd->current_state = PE_SRC_READY;
  2786. return;
  2787. }
  2788. usbpd_set_state(pd, PE_PRS_SRC_SNK_TRANSITION_TO_OFF);
  2789. }
  2790. static void enter_state_prs_src_snk_transition_to_off(struct usbpd *pd)
  2791. {
  2792. union power_supply_propval val = {0};
  2793. val.intval = pd->in_pr_swap = true;
  2794. usbpd_set_psy_iio_property(pd,
  2795. POWER_SUPPLY_PROP_PR_SWAP, &val);
  2796. pd->in_explicit_contract = false;
  2797. /* lock in current mode */
  2798. set_power_role(pd, pd->current_pr);
  2799. kick_sm(pd, SRC_TRANSITION_TIME);
  2800. }
  2801. static void handle_state_prs_src_snk_transition_to_off(struct usbpd *pd,
  2802. struct rx_msg *rx_msg)
  2803. {
  2804. int ret;
  2805. if (pd->vbus_enabled) {
  2806. regulator_disable(pd->vbus);
  2807. pd->vbus_enabled = false;
  2808. }
  2809. /* PE_PRS_SRC_SNK_Assert_Rd */
  2810. pd->current_pr = PR_SINK;
  2811. set_power_role(pd, pd->current_pr);
  2812. pd->pdphy_ops->update_roles(pd->current_dr, pd->current_pr);
  2813. /* allow time for Vbus discharge, must be < tSrcSwapStdby */
  2814. msleep(500);
  2815. ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
  2816. if (ret) {
  2817. usbpd_set_state(pd, PE_ERROR_RECOVERY);
  2818. return;
  2819. }
  2820. pd->current_state = PE_PRS_SRC_SNK_WAIT_SOURCE_ON;
  2821. kick_sm(pd, PS_SOURCE_ON);
  2822. }
  2823. static void handle_state_prs_src_snk_wait_source_on(struct usbpd *pd,
  2824. struct rx_msg *rx_msg)
  2825. {
  2826. if (IS_CTRL(rx_msg, MSG_PS_RDY))
  2827. usbpd_set_state(pd, PE_SNK_STARTUP);
  2828. else
  2829. usbpd_set_state(pd, PE_ERROR_RECOVERY);
  2830. }
  2831. static void enter_state_send_soft_reset(struct usbpd *pd)
  2832. {
  2833. int ret;
  2834. pd_reset_protocol(pd);
  2835. ret = pd_send_msg(pd, MSG_SOFT_RESET, NULL, 0, SOP_MSG);
  2836. if (ret) {
  2837. usbpd_set_state(pd, pd->current_pr == PR_SRC ?
  2838. PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
  2839. return;
  2840. }
  2841. /* wait for ACCEPT */
  2842. kick_sm(pd, SENDER_RESPONSE_TIME);
  2843. }
  2844. static void handle_state_send_soft_reset(struct usbpd *pd,
  2845. struct rx_msg *rx_msg)
  2846. {
  2847. if (IS_CTRL(rx_msg, MSG_ACCEPT)) {
  2848. usbpd_set_state(pd, pd->current_pr == PR_SRC ?
  2849. PE_SRC_SEND_CAPABILITIES :
  2850. PE_SNK_WAIT_FOR_CAPABILITIES);
  2851. } else {
  2852. usbpd_err(&pd->dev, "%s: Did not see Accept, do Hard Reset\n",
  2853. usbpd_state_strings[pd->current_state]);
  2854. usbpd_set_state(pd, pd->current_pr == PR_SRC ?
  2855. PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
  2856. }
  2857. }
  2858. static void handle_state_vcs_wait_for_vconn(struct usbpd *pd,
  2859. struct rx_msg *rx_msg)
  2860. {
  2861. if (IS_CTRL(rx_msg, MSG_PS_RDY)) {
  2862. /*
  2863. * hopefully redundant check but in case not enabled
  2864. * avoids unbalanced regulator disable count
  2865. */
  2866. if (pd->vconn_enabled)
  2867. regulator_disable(pd->vconn);
  2868. pd->vconn_enabled = false;
  2869. pd->current_state = pd->current_pr == PR_SRC ?
  2870. PE_SRC_READY : PE_SNK_READY;
  2871. } else {
  2872. /* timed out; go to hard reset */
  2873. usbpd_set_state(pd, pd->current_pr == PR_SRC ?
  2874. PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
  2875. }
  2876. }
  2877. /* Enters new state and executes actions on entry */
  2878. static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
  2879. {
  2880. if (pd->hard_reset_recvd) /* let usbpd_sm handle it */
  2881. return;
  2882. usbpd_dbg(&pd->dev, "%s -> %s\n",
  2883. usbpd_state_strings[pd->current_state],
  2884. usbpd_state_strings[next_state]);
  2885. pd->current_state = next_state;
  2886. if (pd->current_state < PE_MAX_STATES &&
  2887. state_handlers[pd->current_state].enter_state)
  2888. state_handlers[pd->current_state].enter_state(pd);
  2889. else
  2890. usbpd_dbg(&pd->dev, "No action for state %s\n",
  2891. usbpd_state_strings[pd->current_state]);
  2892. }
  2893. static const struct usbpd_state_handler state_handlers[] = {
  2894. [PE_UNKNOWN] = {NULL, handle_state_unknown},
  2895. [PE_ERROR_RECOVERY] = {enter_state_error_recovery, NULL},
  2896. [PE_SRC_DISABLED] = {enter_state_src_disabled, NULL},
  2897. [PE_SRC_STARTUP] = {enter_state_src_startup, handle_state_src_startup},
  2898. [PE_SRC_STARTUP_WAIT_FOR_VDM_RESP] = {NULL,
  2899. handle_state_src_startup_wait_for_vdm_resp},
  2900. [PE_SRC_SEND_CAPABILITIES] = {enter_state_src_send_capabilities,
  2901. handle_state_src_send_capabilities},
  2902. [PE_SRC_SEND_CAPABILITIES_WAIT] = {NULL,
  2903. handle_state_src_send_capabilities_wait},
  2904. [PE_SRC_NEGOTIATE_CAPABILITY] = {enter_state_src_negotiate_capability,
  2905. NULL},
  2906. [PE_SRC_READY] = {enter_state_src_ready, handle_state_src_ready},
  2907. [PE_SRC_HARD_RESET] = {enter_state_hard_reset, NULL},
  2908. [PE_SRC_SOFT_RESET] = {NULL, handle_state_soft_reset},
  2909. [PE_SRC_TRANSITION_TO_DEFAULT] = {NULL,
  2910. handle_state_src_transition_to_default},
  2911. [PE_SNK_STARTUP] = {enter_state_snk_startup,
  2912. handle_state_snk_startup},
  2913. [PE_SNK_DISCOVERY] = {NULL, handle_state_snk_discovery},
  2914. [PE_SNK_WAIT_FOR_CAPABILITIES] = {
  2915. enter_state_snk_wait_for_capabilities,
  2916. handle_state_snk_wait_for_capabilities},
  2917. [PE_SNK_EVALUATE_CAPABILITY] = {enter_state_snk_evaluate_capability,
  2918. NULL},
  2919. [PE_SNK_SELECT_CAPABILITY] = {enter_state_snk_select_capability,
  2920. handle_state_snk_select_capability},
  2921. [PE_SNK_TRANSITION_SINK] = {enter_state_snk_transition_sink,
  2922. handle_state_snk_transition_sink},
  2923. [PE_SNK_READY] = {enter_state_snk_ready, handle_state_snk_ready},
  2924. [PE_SNK_HARD_RESET] = {enter_state_hard_reset, NULL},
  2925. [PE_SNK_SOFT_RESET] = {NULL, handle_state_soft_reset},
  2926. [PE_SNK_TRANSITION_TO_DEFAULT] = {
  2927. enter_state_snk_transition_to_default,
  2928. handle_state_snk_transition_to_default},
  2929. [PE_DRS_SEND_DR_SWAP] = {NULL, handle_state_drs_send_dr_swap},
  2930. [PE_PRS_SNK_SRC_SEND_SWAP] = {NULL, handle_state_prs_snk_src_send_swap},
  2931. [PE_PRS_SNK_SRC_TRANSITION_TO_OFF] = {
  2932. enter_state_prs_snk_src_transition_to_off,
  2933. handle_state_prs_snk_src_transition_to_off},
  2934. [PE_PRS_SNK_SRC_SOURCE_ON] = {NULL, handle_state_prs_snk_src_source_on},
  2935. [PE_PRS_SRC_SNK_SEND_SWAP] = {NULL, handle_state_prs_src_snk_send_swap},
  2936. [PE_PRS_SRC_SNK_TRANSITION_TO_OFF] = {
  2937. enter_state_prs_src_snk_transition_to_off,
  2938. handle_state_prs_src_snk_transition_to_off},
  2939. [PE_PRS_SRC_SNK_WAIT_SOURCE_ON] = {NULL,
  2940. handle_state_prs_src_snk_wait_source_on},
  2941. [PE_SEND_SOFT_RESET] = {enter_state_send_soft_reset,
  2942. handle_state_send_soft_reset},
  2943. [PE_VCS_WAIT_FOR_VCONN] = {NULL, handle_state_vcs_wait_for_vconn},
  2944. };
  2945. static void handle_disconnect(struct usbpd *pd)
  2946. {
  2947. union power_supply_propval val = {0};
  2948. if (pd->vconn_enabled) {
  2949. regulator_disable(pd->vconn);
  2950. pd->vconn_enabled = false;
  2951. }
  2952. usbpd_info(&pd->dev, "USB Type-C disconnect\n");
  2953. if (pd->pd_phy_opened) {
  2954. pd->pdphy_ops->close();
  2955. pd->pd_phy_opened = false;
  2956. }
  2957. pd->in_pr_swap = false;
  2958. pd->pd_connected = false;
  2959. pd->in_explicit_contract = false;
  2960. pd->hard_reset_recvd = false;
  2961. pd->caps_count = 0;
  2962. pd->hard_reset_count = 0;
  2963. pd->requested_voltage = 0;
  2964. pd->requested_current = 0;
  2965. pd->selected_pdo = pd->requested_pdo = 0;
  2966. pd->peer_usb_comm = pd->peer_pr_swap = pd->peer_dr_swap = false;
  2967. memset(&pd->received_pdos, 0, sizeof(pd->received_pdos));
  2968. rx_msg_cleanup(pd);
  2969. usbpd_set_psy_iio_property(pd,
  2970. POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
  2971. usbpd_set_psy_iio_property(pd,
  2972. POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED, &val);
  2973. usbpd_set_psy_iio_property(pd,
  2974. POWER_SUPPLY_PROP_PD_ACTIVE, &val);
  2975. if (pd->vbus_enabled) {
  2976. regulator_disable(pd->vbus);
  2977. pd->vbus_enabled = false;
  2978. }
  2979. reset_vdm_state(pd);
  2980. if (pd->current_dr == DR_UFP)
  2981. stop_usb_peripheral(pd);
  2982. else if (pd->current_dr == DR_DFP)
  2983. stop_usb_host(pd);
  2984. pd->current_dr = DR_NONE;
  2985. if (pd->current_state == PE_ERROR_RECOVERY)
  2986. /* forced disconnect, wait before resetting to DRP */
  2987. usleep_range(ERROR_RECOVERY_TIME * USEC_PER_MSEC,
  2988. (ERROR_RECOVERY_TIME + 5) * USEC_PER_MSEC);
  2989. val.intval = 0;
  2990. usbpd_set_psy_iio_property(pd,
  2991. POWER_SUPPLY_PROP_PR_SWAP, &val);
  2992. /* set due to dual_role class "mode" change */
  2993. if (pd->forced_pr != QTI_POWER_SUPPLY_TYPEC_PR_NONE)
  2994. val.intval = pd->forced_pr;
  2995. else
  2996. /* Set CC back to DRP toggle */
  2997. val.intval = QTI_POWER_SUPPLY_TYPEC_PR_DUAL;
  2998. usbpd_set_psy_iio_property(pd,
  2999. POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
  3000. pd->forced_pr = QTI_POWER_SUPPLY_TYPEC_PR_NONE;
  3001. pd->current_state = PE_UNKNOWN;
  3002. pd_reset_protocol(pd);
  3003. kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
  3004. typec_unregister_partner(pd->partner);
  3005. pd->partner = NULL;
  3006. if (pd->has_dp)
  3007. pd->has_dp = false;
  3008. }
  3009. static void handle_hard_reset(struct usbpd *pd)
  3010. {
  3011. union power_supply_propval val = {0};
  3012. pd->hard_reset_recvd = false;
  3013. if (pd->requested_current) {
  3014. val.intval = pd->requested_current = 0;
  3015. usbpd_set_psy_iio_property(pd,
  3016. POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
  3017. }
  3018. pd->requested_voltage = 5000000;
  3019. val.intval = pd->requested_voltage;
  3020. usbpd_set_psy_iio_property(pd,
  3021. POWER_SUPPLY_PROP_PD_VOLTAGE_MIN, &val);
  3022. pd->in_pr_swap = false;
  3023. val.intval = 0;
  3024. usbpd_set_psy_iio_property(pd,
  3025. POWER_SUPPLY_PROP_PR_SWAP, &val);
  3026. pd->in_explicit_contract = false;
  3027. pd->selected_pdo = pd->requested_pdo = 0;
  3028. pd->rdo = 0;
  3029. rx_msg_cleanup(pd);
  3030. reset_vdm_state(pd);
  3031. kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
  3032. if (pd->current_pr == PR_SINK) {
  3033. usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
  3034. } else {
  3035. s64 delta = ktime_ms_delta(ktime_get(),
  3036. pd->hard_reset_recvd_time);
  3037. pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT;
  3038. if (delta >= PS_HARD_RESET_TIME)
  3039. kick_sm(pd, 0);
  3040. else
  3041. kick_sm(pd, PS_HARD_RESET_TIME - (int)delta);
  3042. }
  3043. }
  3044. /* Handles current state and determines transitions */
  3045. static void usbpd_sm(struct work_struct *w)
  3046. {
  3047. struct usbpd *pd = container_of(w, struct usbpd, sm_work);
  3048. int ret;
  3049. struct rx_msg *rx_msg = NULL;
  3050. unsigned long flags;
  3051. usbpd_dbg(&pd->dev, "handle state %s\n",
  3052. usbpd_state_strings[pd->current_state]);
  3053. hrtimer_cancel(&pd->timer);
  3054. pd->sm_queued = false;
  3055. spin_lock_irqsave(&pd->rx_lock, flags);
  3056. if (!list_empty(&pd->rx_q)) {
  3057. rx_msg = list_first_entry(&pd->rx_q, struct rx_msg, entry);
  3058. list_del(&rx_msg->entry);
  3059. }
  3060. spin_unlock_irqrestore(&pd->rx_lock, flags);
  3061. /* Disconnect? */
  3062. if (pd->current_pr == PR_NONE) {
  3063. if (pd->current_state == PE_UNKNOWN &&
  3064. pd->current_dr == DR_NONE)
  3065. goto sm_done;
  3066. handle_disconnect(pd);
  3067. goto sm_done;
  3068. }
  3069. /* Hard reset? */
  3070. if (pd->hard_reset_recvd) {
  3071. handle_hard_reset(pd);
  3072. goto sm_done;
  3073. }
  3074. /* Soft reset? */
  3075. if (IS_CTRL(rx_msg, MSG_SOFT_RESET)) {
  3076. usbpd_dbg(&pd->dev, "Handle soft reset\n");
  3077. if (pd->current_pr == PR_SRC)
  3078. pd->current_state = PE_SRC_SOFT_RESET;
  3079. else if (pd->current_pr == PR_SINK)
  3080. pd->current_state = PE_SNK_SOFT_RESET;
  3081. }
  3082. if (pd->current_state < PE_MAX_STATES &&
  3083. state_handlers[pd->current_state].handle_state)
  3084. state_handlers[pd->current_state].handle_state(pd, rx_msg);
  3085. else
  3086. usbpd_err(&pd->dev, "Unhandled state %s\n",
  3087. usbpd_state_strings[pd->current_state]);
  3088. sm_done:
  3089. kfree(rx_msg);
  3090. spin_lock_irqsave(&pd->rx_lock, flags);
  3091. ret = list_empty(&pd->rx_q);
  3092. spin_unlock_irqrestore(&pd->rx_lock, flags);
  3093. /* requeue if there are any new/pending RX messages */
  3094. if (!ret && !pd->sm_queued)
  3095. kick_sm(pd, 0);
  3096. if (!pd->sm_queued)
  3097. pm_relax(&pd->dev);
  3098. }
  3099. static inline const char *src_current(enum power_supply_typec_mode typec_mode)
  3100. {
  3101. switch (typec_mode) {
  3102. case QTI_POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
  3103. return "default";
  3104. case QTI_POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
  3105. return "medium - 1.5A";
  3106. case QTI_POWER_SUPPLY_TYPEC_SOURCE_HIGH:
  3107. return "high - 3.0A";
  3108. default:
  3109. return "";
  3110. }
  3111. }
  3112. static int usbpd_process_typec_mode(struct usbpd *pd,
  3113. enum power_supply_typec_mode typec_mode)
  3114. {
  3115. switch (typec_mode) {
  3116. /* Disconnect */
  3117. case QTI_POWER_SUPPLY_TYPEC_NONE:
  3118. if (pd->in_pr_swap) {
  3119. usbpd_dbg(&pd->dev, "Ignoring disconnect due to PR swap\n");
  3120. return 0;
  3121. }
  3122. pd->current_pr = PR_NONE;
  3123. break;
  3124. /* Sink states */
  3125. case QTI_POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
  3126. case QTI_POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
  3127. case QTI_POWER_SUPPLY_TYPEC_SOURCE_HIGH:
  3128. usbpd_info(&pd->dev, "Type-C Source (%s) connected\n",
  3129. src_current(typec_mode));
  3130. /* if waiting for SinkTxOk to start an AMS */
  3131. if (pd->spec_rev == USBPD_REV_30 &&
  3132. typec_mode == QTI_POWER_SUPPLY_TYPEC_SOURCE_HIGH &&
  3133. (pd->send_pr_swap || pd->send_dr_swap || pd->vdm_tx))
  3134. break;
  3135. if (pd->current_pr == PR_SINK)
  3136. return 0;
  3137. /*
  3138. * Unexpected if not in PR swap; need to force disconnect from
  3139. * source so we can turn off VBUS, Vconn, PD PHY etc.
  3140. */
  3141. if (pd->current_pr == PR_SRC) {
  3142. usbpd_info(&pd->dev, "Forcing disconnect from source mode\n");
  3143. pd->current_pr = PR_NONE;
  3144. break;
  3145. }
  3146. pd->current_pr = PR_SINK;
  3147. break;
  3148. /* Source states */
  3149. case QTI_POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE:
  3150. case QTI_POWER_SUPPLY_TYPEC_SINK:
  3151. usbpd_info(&pd->dev, "Type-C Sink%s connected\n",
  3152. typec_mode == QTI_POWER_SUPPLY_TYPEC_SINK ?
  3153. "" : " (powered)");
  3154. if (pd->current_pr == PR_SRC)
  3155. return 0;
  3156. pd->current_pr = PR_SRC;
  3157. break;
  3158. case QTI_POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY:
  3159. usbpd_info(&pd->dev, "Type-C Debug Accessory connected\n");
  3160. break;
  3161. case QTI_POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER:
  3162. usbpd_info(&pd->dev, "Type-C Analog Audio Adapter connected\n");
  3163. break;
  3164. default:
  3165. usbpd_warn(&pd->dev, "Unsupported typec mode:%d\n",
  3166. typec_mode);
  3167. break;
  3168. }
  3169. return 1; /* kick state machine */
  3170. }
  3171. static void psy_changed_notifier_work(struct work_struct *w)
  3172. {
  3173. struct usbpd *pd = container_of(w, struct usbpd, psy_chg_work);
  3174. union power_supply_propval val;
  3175. enum power_supply_typec_mode typec_mode;
  3176. int ret;
  3177. int usb_extcon_state;
  3178. ret = usbpd_get_psy_iio_property(pd,
  3179. POWER_SUPPLY_PROP_TYPEC_MODE, &val);
  3180. if (ret) {
  3181. usbpd_err(&pd->dev, "Unable to read USB TYPEC_MODE: %d\n", ret);
  3182. return;
  3183. }
  3184. typec_mode = val.intval;
  3185. ret = usbpd_get_psy_iio_property(pd,
  3186. POWER_SUPPLY_PROP_PE_START, &val);
  3187. if (ret) {
  3188. usbpd_err(&pd->dev, "Unable to read USB PROP_PE_START: %d\n",
  3189. ret);
  3190. return;
  3191. }
  3192. /* Don't proceed if PE_START=0; start USB directly if needed */
  3193. if (!val.intval && !pd->pd_connected &&
  3194. typec_mode >= QTI_POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
  3195. ret = usbpd_get_psy_iio_property(pd,
  3196. POWER_SUPPLY_PROP_REAL_TYPE, &val);
  3197. if (ret) {
  3198. usbpd_err(&pd->dev, "Unable to read USB TYPE: %d\n",
  3199. ret);
  3200. return;
  3201. }
  3202. if (val.intval == POWER_SUPPLY_TYPE_USB ||
  3203. val.intval == POWER_SUPPLY_TYPE_USB_CDP ||
  3204. val.intval == QTI_POWER_SUPPLY_TYPE_USB_FLOAT) {
  3205. usbpd_dbg(&pd->dev, "typec mode:%d type:%d\n",
  3206. typec_mode, val.intval);
  3207. pd->typec_mode = typec_mode;
  3208. queue_work(pd->wq, &pd->start_periph_work);
  3209. }
  3210. return;
  3211. }
  3212. ret = power_supply_get_property(pd->usb_psy,
  3213. POWER_SUPPLY_PROP_PRESENT, &val);
  3214. if (ret) {
  3215. usbpd_err(&pd->dev, "Unable to read USB PRESENT: %d\n", ret);
  3216. return;
  3217. }
  3218. pd->vbus_present = val.intval;
  3219. /*
  3220. * For sink hard reset, state machine needs to know when VBUS changes
  3221. * - when in PE_SNK_TRANSITION_TO_DEFAULT, notify when VBUS falls
  3222. * - when in PE_SNK_DISCOVERY, notify when VBUS rises
  3223. */
  3224. if (typec_mode && ((!pd->vbus_present &&
  3225. pd->current_state == PE_SNK_TRANSITION_TO_DEFAULT) ||
  3226. (pd->vbus_present && pd->current_state == PE_SNK_DISCOVERY))) {
  3227. usbpd_dbg(&pd->dev, "hard reset: typec mode:%d present:%d\n",
  3228. typec_mode, pd->vbus_present);
  3229. pd->typec_mode = typec_mode;
  3230. if (!work_busy(&pd->sm_work))
  3231. kick_sm(pd, 0);
  3232. else
  3233. usbpd_dbg(&pd->dev, "usbpd_sm already running\n");
  3234. return;
  3235. }
  3236. if (pd->typec_mode == typec_mode) {
  3237. if (!((pd->current_dr == DR_NONE) || (pd->current_dr == DR_UFP)))
  3238. return;
  3239. usb_extcon_state = extcon_get_state(pd->extcon, EXTCON_USB);
  3240. if (usb_extcon_state == 0) {
  3241. ret = usbpd_get_psy_iio_property(pd, POWER_SUPPLY_PROP_REAL_TYPE,
  3242. &val);
  3243. if (ret) {
  3244. usbpd_err(&pd->dev, "Unable to read USB PROP_REAL_TYPE: %d\n",
  3245. ret);
  3246. return;
  3247. }
  3248. if (val.intval == POWER_SUPPLY_TYPE_USB ||
  3249. val.intval == POWER_SUPPLY_TYPE_USB_CDP ||
  3250. val.intval == QTI_POWER_SUPPLY_TYPE_USB_FLOAT)
  3251. queue_work(pd->wq, &pd->start_periph_work);
  3252. }
  3253. return;
  3254. }
  3255. pd->typec_mode = typec_mode;
  3256. usbpd_dbg(&pd->dev, "typec mode:%d present:%d orientation:%d\n",
  3257. typec_mode, pd->vbus_present,
  3258. usbpd_get_plug_orientation(pd));
  3259. ret = usbpd_process_typec_mode(pd, typec_mode);
  3260. /* queue state machine due to CC state change */
  3261. if (ret)
  3262. kick_sm(pd, 0);
  3263. }
  3264. static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
  3265. {
  3266. struct usbpd *pd = container_of(nb, struct usbpd, psy_nb);
  3267. if (ptr != pd->usb_psy || evt != PSY_EVENT_PROP_CHANGED)
  3268. return 0;
  3269. queue_work(pd->wq, &pd->psy_chg_work);
  3270. return 0;
  3271. }
  3272. static int usbpd_do_swap(struct usbpd *pd, bool dr_swap,
  3273. unsigned int timeout_ms)
  3274. {
  3275. if (pd->current_state != PE_SRC_READY &&
  3276. pd->current_state != PE_SNK_READY) {
  3277. usbpd_err(&pd->dev, "%s_role swap not allowed: PD not in Ready state\n",
  3278. dr_swap ? "data" : "power");
  3279. return -EAGAIN;
  3280. }
  3281. mutex_lock(&pd->swap_lock);
  3282. reinit_completion(&pd->is_ready);
  3283. if (dr_swap)
  3284. pd->send_dr_swap = true;
  3285. else
  3286. pd->send_pr_swap = true;
  3287. if (pd->current_state == PE_SRC_READY && !in_src_ams(pd))
  3288. start_src_ams(pd, true);
  3289. else
  3290. kick_sm(pd, 0);
  3291. /* wait for operation to complete */
  3292. if (!wait_for_completion_timeout(&pd->is_ready,
  3293. msecs_to_jiffies(timeout_ms))) {
  3294. usbpd_err(&pd->dev, "%s_role swap timed out\n",
  3295. dr_swap ? "data" : "power");
  3296. mutex_unlock(&pd->swap_lock);
  3297. return -ETIMEDOUT;
  3298. }
  3299. mutex_unlock(&pd->swap_lock);
  3300. return 0;
  3301. }
  3302. static int usbpd_typec_dr_set(struct typec_port *port,
  3303. enum typec_data_role role)
  3304. {
  3305. struct usbpd *pd = (struct usbpd *)typec_get_drvdata(port);
  3306. bool do_swap = false;
  3307. int ret;
  3308. usbpd_dbg(&pd->dev, "Setting data role to %d\n", role);
  3309. if (role == TYPEC_HOST) {
  3310. if (pd->current_dr == DR_UFP)
  3311. do_swap = true;
  3312. } else if (role == TYPEC_DEVICE) {
  3313. if (pd->current_dr == DR_DFP)
  3314. do_swap = true;
  3315. } else {
  3316. usbpd_warn(&pd->dev, "invalid role\n");
  3317. return -EINVAL;
  3318. }
  3319. if (do_swap) {
  3320. ret = usbpd_do_swap(pd, true, 100);
  3321. if (ret)
  3322. return ret;
  3323. if ((role == TYPEC_HOST && pd->current_dr != DR_DFP) ||
  3324. (role == TYPEC_DEVICE && pd->current_dr != DR_UFP)) {
  3325. usbpd_err(&pd->dev, "incorrect state (%s) after data_role swap\n",
  3326. pd->current_dr == DR_DFP ?
  3327. "dfp" : "ufp");
  3328. return -EPROTO;
  3329. }
  3330. }
  3331. return 0;
  3332. }
  3333. static int usbpd_typec_pr_set(struct typec_port *port,
  3334. enum typec_role role)
  3335. {
  3336. struct usbpd *pd = (struct usbpd *)typec_get_drvdata(port);
  3337. bool do_swap = false;
  3338. int ret;
  3339. usbpd_dbg(&pd->dev, "Setting power role to %d\n", role);
  3340. if (role == TYPEC_SOURCE) {
  3341. if (pd->current_pr == PR_SINK)
  3342. do_swap = true;
  3343. } else if (role == TYPEC_SINK) {
  3344. if (pd->current_pr == PR_SRC)
  3345. do_swap = true;
  3346. } else {
  3347. usbpd_warn(&pd->dev, "invalid role\n");
  3348. return -EINVAL;
  3349. }
  3350. if (do_swap) {
  3351. ret = usbpd_do_swap(pd, false, 2000);
  3352. if (ret)
  3353. return ret;
  3354. if ((role == TYPEC_SOURCE && pd->current_pr != PR_SRC) ||
  3355. (role == TYPEC_SINK && pd->current_pr != PR_SINK)) {
  3356. usbpd_err(&pd->dev, "incorrect state (%s) after power_role swap\n",
  3357. pd->current_pr == PR_SRC ?
  3358. "source" : "sink");
  3359. return -EPROTO;
  3360. }
  3361. }
  3362. return 0;
  3363. }
  3364. static int usbpd_typec_port_type_set(struct typec_port *port,
  3365. enum typec_port_type type)
  3366. {
  3367. struct usbpd *pd = (struct usbpd *)typec_get_drvdata(port);
  3368. union power_supply_propval value;
  3369. int wait_count = 5;
  3370. usbpd_dbg(&pd->dev, "Setting mode to %d\n", type);
  3371. if (type == TYPEC_PORT_DRP)
  3372. return 0;
  3373. /*
  3374. * Forces disconnect on CC and re-establishes connection.
  3375. * This does not use PD-based PR/DR swap
  3376. */
  3377. if (type == TYPEC_PORT_SNK)
  3378. pd->forced_pr = QTI_POWER_SUPPLY_TYPEC_PR_SINK;
  3379. else if (type == TYPEC_PORT_SRC)
  3380. pd->forced_pr = QTI_POWER_SUPPLY_TYPEC_PR_SOURCE;
  3381. /* new mode will be applied in disconnect handler */
  3382. set_power_role(pd, PR_NONE);
  3383. /* wait until it takes effect */
  3384. while (pd->forced_pr != QTI_POWER_SUPPLY_TYPEC_PR_NONE && --wait_count)
  3385. msleep(20);
  3386. if (!wait_count)
  3387. goto reset_drp;
  3388. /* if we cannot have a valid connection, fallback to old role */
  3389. wait_count = 5;
  3390. while (pd->current_pr == PR_NONE && --wait_count)
  3391. msleep(300);
  3392. if (!wait_count)
  3393. goto reset_drp;
  3394. return 0;
  3395. reset_drp:
  3396. usbpd_err(&pd->dev, "setting mode timed out\n");
  3397. /* Setting it to DRP. HW can figure out new mode */
  3398. value.intval = QTI_POWER_SUPPLY_TYPEC_PR_DUAL;
  3399. usbpd_set_psy_iio_property(pd,
  3400. POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &value);
  3401. return -ETIMEDOUT;
  3402. }
  3403. static int usbpd_uevent(struct device *dev, struct kobj_uevent_env *env)
  3404. {
  3405. struct usbpd *pd = dev_get_drvdata(dev);
  3406. int i;
  3407. add_uevent_var(env, "DATA_ROLE=%s", pd->current_dr == DR_DFP ?
  3408. "dfp" : "ufp");
  3409. if (pd->current_pr == PR_SINK) {
  3410. add_uevent_var(env, "POWER_ROLE=sink");
  3411. add_uevent_var(env, "SRC_CAP_ID=%d", pd->src_cap_id);
  3412. for (i = 0; i < ARRAY_SIZE(pd->received_pdos); i++)
  3413. add_uevent_var(env, "PDO%d=%08x", i,
  3414. pd->received_pdos[i]);
  3415. add_uevent_var(env, "REQUESTED_PDO=%d", pd->requested_pdo);
  3416. add_uevent_var(env, "SELECTED_PDO=%d", pd->selected_pdo);
  3417. } else {
  3418. add_uevent_var(env, "POWER_ROLE=source");
  3419. for (i = 0; i < ARRAY_SIZE(default_src_caps); i++)
  3420. add_uevent_var(env, "PDO%d=%08x", i,
  3421. default_src_caps[i]);
  3422. }
  3423. add_uevent_var(env, "RDO=%08x", pd->rdo);
  3424. add_uevent_var(env, "CONTRACT=%s", pd->in_explicit_contract ?
  3425. "explicit" : "implicit");
  3426. add_uevent_var(env, "ALT_MODE=%d", pd->vdm_state == MODE_ENTERED);
  3427. add_uevent_var(env, "SDB=%02x %02x %02x %02x %02x %02x",
  3428. pd->status_db[0], pd->status_db[1], pd->status_db[2],
  3429. pd->status_db[3], pd->status_db[4], pd->status_db[5]);
  3430. return 0;
  3431. }
  3432. static ssize_t contract_show(struct device *dev, struct device_attribute *attr,
  3433. char *buf)
  3434. {
  3435. struct usbpd *pd = dev_get_drvdata(dev);
  3436. return scnprintf(buf, PAGE_SIZE, "%s\n",
  3437. pd->in_explicit_contract ? "explicit" : "implicit");
  3438. }
  3439. static DEVICE_ATTR_RO(contract);
  3440. static ssize_t current_pr_show(struct device *dev,
  3441. struct device_attribute *attr, char *buf)
  3442. {
  3443. struct usbpd *pd = dev_get_drvdata(dev);
  3444. const char *pr = "none";
  3445. if (pd->current_pr == PR_SINK)
  3446. pr = "sink";
  3447. else if (pd->current_pr == PR_SRC)
  3448. pr = "source";
  3449. return scnprintf(buf, PAGE_SIZE, "%s\n", pr);
  3450. }
  3451. static DEVICE_ATTR_RO(current_pr);
  3452. static ssize_t initial_pr_show(struct device *dev,
  3453. struct device_attribute *attr, char *buf)
  3454. {
  3455. struct usbpd *pd = dev_get_drvdata(dev);
  3456. const char *pr = "none";
  3457. if (pd->typec_mode >= QTI_POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
  3458. pr = "sink";
  3459. else if (pd->typec_mode >= QTI_POWER_SUPPLY_TYPEC_SINK)
  3460. pr = "source";
  3461. return scnprintf(buf, PAGE_SIZE, "%s\n", pr);
  3462. }
  3463. static DEVICE_ATTR_RO(initial_pr);
  3464. static ssize_t current_dr_show(struct device *dev,
  3465. struct device_attribute *attr, char *buf)
  3466. {
  3467. struct usbpd *pd = dev_get_drvdata(dev);
  3468. const char *dr = "none";
  3469. if (pd->current_dr == DR_UFP)
  3470. dr = "ufp";
  3471. else if (pd->current_dr == DR_DFP)
  3472. dr = "dfp";
  3473. return scnprintf(buf, PAGE_SIZE, "%s\n", dr);
  3474. }
  3475. static DEVICE_ATTR_RO(current_dr);
  3476. static ssize_t initial_dr_show(struct device *dev,
  3477. struct device_attribute *attr, char *buf)
  3478. {
  3479. struct usbpd *pd = dev_get_drvdata(dev);
  3480. const char *dr = "none";
  3481. if (pd->typec_mode >= QTI_POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
  3482. dr = "ufp";
  3483. else if (pd->typec_mode >= QTI_POWER_SUPPLY_TYPEC_SINK)
  3484. dr = "dfp";
  3485. return scnprintf(buf, PAGE_SIZE, "%s\n", dr);
  3486. }
  3487. static DEVICE_ATTR_RO(initial_dr);
  3488. static ssize_t src_cap_id_show(struct device *dev,
  3489. struct device_attribute *attr, char *buf)
  3490. {
  3491. struct usbpd *pd = dev_get_drvdata(dev);
  3492. return scnprintf(buf, PAGE_SIZE, "%d\n", pd->src_cap_id);
  3493. }
  3494. static DEVICE_ATTR_RO(src_cap_id);
  3495. /* Dump received source PDOs in human-readable format */
  3496. static ssize_t pdo_h_show(struct device *dev, struct device_attribute *attr,
  3497. char *buf)
  3498. {
  3499. struct usbpd *pd = dev_get_drvdata(dev);
  3500. int i;
  3501. ssize_t cnt = 0;
  3502. for (i = 0; i < ARRAY_SIZE(pd->received_pdos); i++) {
  3503. u32 pdo = pd->received_pdos[i];
  3504. if (pdo == 0)
  3505. break;
  3506. cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt, "PDO %d\n", i + 1);
  3507. if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_FIXED) {
  3508. cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
  3509. "\tFixed supply\n"
  3510. "\tDual-Role Power:%d\n"
  3511. "\tUSB Suspend Supported:%d\n"
  3512. "\tExternally Powered:%d\n"
  3513. "\tUSB Communications Capable:%d\n"
  3514. "\tData Role Swap:%d\n"
  3515. "\tPeak Current:%d\n"
  3516. "\tVoltage:%d (mV)\n"
  3517. "\tMax Current:%d (mA)\n",
  3518. PD_SRC_PDO_FIXED_PR_SWAP(pdo),
  3519. PD_SRC_PDO_FIXED_USB_SUSP(pdo),
  3520. PD_SRC_PDO_FIXED_EXT_POWERED(pdo),
  3521. PD_SRC_PDO_FIXED_USB_COMM(pdo),
  3522. PD_SRC_PDO_FIXED_DR_SWAP(pdo),
  3523. PD_SRC_PDO_FIXED_PEAK_CURR(pdo),
  3524. PD_SRC_PDO_FIXED_VOLTAGE(pdo) * 50,
  3525. PD_SRC_PDO_FIXED_MAX_CURR(pdo) * 10);
  3526. } else if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_BATTERY) {
  3527. cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
  3528. "\tBattery supply\n"
  3529. "\tMax Voltage:%d (mV)\n"
  3530. "\tMin Voltage:%d (mV)\n"
  3531. "\tMax Power:%d (mW)\n",
  3532. PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo) * 50,
  3533. PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo) * 50,
  3534. PD_SRC_PDO_VAR_BATT_MAX(pdo) * 250);
  3535. } else if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_VARIABLE) {
  3536. cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
  3537. "\tVariable supply\n"
  3538. "\tMax Voltage:%d (mV)\n"
  3539. "\tMin Voltage:%d (mV)\n"
  3540. "\tMax Current:%d (mA)\n",
  3541. PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo) * 50,
  3542. PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo) * 50,
  3543. PD_SRC_PDO_VAR_BATT_MAX(pdo) * 10);
  3544. } else if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_AUGMENTED) {
  3545. cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
  3546. "\tProgrammable Power supply\n"
  3547. "\tMax Voltage:%d (mV)\n"
  3548. "\tMin Voltage:%d (mV)\n"
  3549. "\tMax Current:%d (mA)\n",
  3550. PD_APDO_MAX_VOLT(pdo) * 100,
  3551. PD_APDO_MIN_VOLT(pdo) * 100,
  3552. PD_APDO_MAX_CURR(pdo) * 50);
  3553. } else {
  3554. cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
  3555. "Invalid PDO\n");
  3556. }
  3557. cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt, "\n");
  3558. }
  3559. return cnt;
  3560. }
  3561. static DEVICE_ATTR_RO(pdo_h);
  3562. static ssize_t pdo_n_show(struct device *dev, struct device_attribute *attr,
  3563. char *buf);
  3564. #define PDO_ATTR(n) { \
  3565. .attr = { .name = __stringify(pdo##n), .mode = 0444 }, \
  3566. .show = pdo_n_show, \
  3567. }
  3568. static struct device_attribute dev_attr_pdos[] = {
  3569. PDO_ATTR(1),
  3570. PDO_ATTR(2),
  3571. PDO_ATTR(3),
  3572. PDO_ATTR(4),
  3573. PDO_ATTR(5),
  3574. PDO_ATTR(6),
  3575. PDO_ATTR(7),
  3576. };
  3577. static ssize_t pdo_n_show(struct device *dev, struct device_attribute *attr,
  3578. char *buf)
  3579. {
  3580. struct usbpd *pd = dev_get_drvdata(dev);
  3581. int i;
  3582. for (i = 0; i < ARRAY_SIZE(dev_attr_pdos); i++)
  3583. if (attr == &dev_attr_pdos[i])
  3584. /* dump the PDO as a hex string */
  3585. return scnprintf(buf, PAGE_SIZE, "%08x\n",
  3586. pd->received_pdos[i]);
  3587. usbpd_err(&pd->dev, "Invalid PDO index\n");
  3588. return -EINVAL;
  3589. }
  3590. static ssize_t select_pdo_store(struct device *dev,
  3591. struct device_attribute *attr, const char *buf, size_t size)
  3592. {
  3593. struct usbpd *pd = dev_get_drvdata(dev);
  3594. int src_cap_id;
  3595. int pdo, uv = 0, ua = 0;
  3596. int ret;
  3597. mutex_lock(&pd->swap_lock);
  3598. /* Only allowed if we are already in explicit sink contract */
  3599. if (pd->current_state != PE_SNK_READY) {
  3600. usbpd_err(&pd->dev, "Cannot select new PDO yet\n");
  3601. ret = -EBUSY;
  3602. goto out;
  3603. }
  3604. ret = sscanf(buf, "%d %d %d %d", &src_cap_id, &pdo, &uv, &ua);
  3605. if (ret != 2 && ret != 4) {
  3606. usbpd_err(&pd->dev, "Must specify <src cap id> <PDO> [<uV> <uA>]\n");
  3607. ret = -EINVAL;
  3608. goto out;
  3609. }
  3610. if (src_cap_id != pd->src_cap_id) {
  3611. usbpd_err(&pd->dev, "src_cap_id mismatch. Requested:%d, current:%d\n",
  3612. src_cap_id, pd->src_cap_id);
  3613. ret = -EINVAL;
  3614. goto out;
  3615. }
  3616. if (pdo < 1 || pdo > 7) {
  3617. usbpd_err(&pd->dev, "invalid PDO:%d\n", pdo);
  3618. ret = -EINVAL;
  3619. goto out;
  3620. }
  3621. ret = pd_select_pdo(pd, pdo, uv, ua);
  3622. if (ret)
  3623. goto out;
  3624. reinit_completion(&pd->is_ready);
  3625. pd->send_request = true;
  3626. kick_sm(pd, 0);
  3627. /* wait for operation to complete */
  3628. if (!wait_for_completion_timeout(&pd->is_ready,
  3629. msecs_to_jiffies(1000))) {
  3630. usbpd_err(&pd->dev, "request timed out\n");
  3631. ret = -ETIMEDOUT;
  3632. goto out;
  3633. }
  3634. /* determine if request was accepted/rejected */
  3635. if (pd->selected_pdo != pd->requested_pdo ||
  3636. pd->current_voltage != pd->requested_voltage) {
  3637. usbpd_err(&pd->dev, "request rejected\n");
  3638. ret = -ECONNREFUSED;
  3639. }
  3640. out:
  3641. pd->send_request = false;
  3642. mutex_unlock(&pd->swap_lock);
  3643. return ret ? ret : size;
  3644. }
  3645. static ssize_t select_pdo_show(struct device *dev,
  3646. struct device_attribute *attr, char *buf)
  3647. {
  3648. struct usbpd *pd = dev_get_drvdata(dev);
  3649. return scnprintf(buf, PAGE_SIZE, "%d\n", pd->selected_pdo);
  3650. }
  3651. static DEVICE_ATTR_RW(select_pdo);
  3652. static ssize_t rdo_show(struct device *dev, struct device_attribute *attr,
  3653. char *buf)
  3654. {
  3655. struct usbpd *pd = dev_get_drvdata(dev);
  3656. /* dump the RDO as a hex string */
  3657. return scnprintf(buf, PAGE_SIZE, "%08x\n", pd->rdo);
  3658. }
  3659. static DEVICE_ATTR_RO(rdo);
  3660. static ssize_t rdo_h_show(struct device *dev, struct device_attribute *attr,
  3661. char *buf)
  3662. {
  3663. struct usbpd *pd = dev_get_drvdata(dev);
  3664. int pos = PD_RDO_OBJ_POS(pd->rdo);
  3665. int type = PD_SRC_PDO_TYPE(pd->received_pdos[pos - 1]);
  3666. int len;
  3667. len = scnprintf(buf, PAGE_SIZE, "Request Data Object\n"
  3668. "\tObj Pos:%d\n"
  3669. "\tGiveback:%d\n"
  3670. "\tCapability Mismatch:%d\n"
  3671. "\tUSB Communications Capable:%d\n"
  3672. "\tNo USB Suspend:%d\n",
  3673. PD_RDO_OBJ_POS(pd->rdo),
  3674. PD_RDO_GIVEBACK(pd->rdo),
  3675. PD_RDO_MISMATCH(pd->rdo),
  3676. PD_RDO_USB_COMM(pd->rdo),
  3677. PD_RDO_NO_USB_SUSP(pd->rdo));
  3678. switch (type) {
  3679. case PD_SRC_PDO_TYPE_FIXED:
  3680. case PD_SRC_PDO_TYPE_VARIABLE:
  3681. len += scnprintf(buf + len, PAGE_SIZE - len,
  3682. "(Fixed/Variable)\n"
  3683. "\tOperating Current:%d (mA)\n"
  3684. "\t%s Current:%d (mA)\n",
  3685. PD_RDO_FIXED_CURR(pd->rdo) * 10,
  3686. PD_RDO_GIVEBACK(pd->rdo) ? "Min" : "Max",
  3687. PD_RDO_FIXED_CURR_MINMAX(pd->rdo) * 10);
  3688. break;
  3689. case PD_SRC_PDO_TYPE_BATTERY:
  3690. len += scnprintf(buf + len, PAGE_SIZE - len,
  3691. "(Battery)\n"
  3692. "\tOperating Power:%d (mW)\n"
  3693. "\t%s Power:%d (mW)\n",
  3694. PD_RDO_FIXED_CURR(pd->rdo) * 250,
  3695. PD_RDO_GIVEBACK(pd->rdo) ? "Min" : "Max",
  3696. PD_RDO_FIXED_CURR_MINMAX(pd->rdo) * 250);
  3697. break;
  3698. case PD_SRC_PDO_TYPE_AUGMENTED:
  3699. len += scnprintf(buf + len, PAGE_SIZE - len,
  3700. "(Programmable)\n"
  3701. "\tOutput Voltage:%d (mV)\n"
  3702. "\tOperating Current:%d (mA)\n",
  3703. PD_RDO_PROG_VOLTAGE(pd->rdo) * 20,
  3704. PD_RDO_PROG_CURR(pd->rdo) * 50);
  3705. break;
  3706. }
  3707. return len;
  3708. }
  3709. static DEVICE_ATTR_RO(rdo_h);
  3710. static ssize_t hard_reset_store(struct device *dev,
  3711. struct device_attribute *attr, const char *buf, size_t size)
  3712. {
  3713. struct usbpd *pd = dev_get_drvdata(dev);
  3714. u32 val = 0;
  3715. if (kstrtou32(buf, 0, &val) || val != 1)
  3716. return -EINVAL;
  3717. if (val)
  3718. usbpd_set_state(pd, pd->current_pr == PR_SRC ?
  3719. PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
  3720. return size;
  3721. }
  3722. static DEVICE_ATTR_WO(hard_reset);
  3723. static int trigger_tx_msg(struct usbpd *pd, bool *msg_tx_flag)
  3724. {
  3725. int ret = 0;
  3726. /* Only allowed if we are already in explicit sink contract */
  3727. if (pd->current_state != PE_SNK_READY) {
  3728. usbpd_err(&pd->dev, "Cannot send msg\n");
  3729. ret = -EBUSY;
  3730. goto out;
  3731. }
  3732. reinit_completion(&pd->is_ready);
  3733. *msg_tx_flag = true;
  3734. kick_sm(pd, 0);
  3735. /* wait for operation to complete */
  3736. if (!wait_for_completion_timeout(&pd->is_ready,
  3737. msecs_to_jiffies(1000))) {
  3738. usbpd_err(&pd->dev, "request timed out\n");
  3739. ret = -ETIMEDOUT;
  3740. }
  3741. out:
  3742. *msg_tx_flag = false;
  3743. return ret;
  3744. }
  3745. static ssize_t get_src_cap_ext_show(struct device *dev,
  3746. struct device_attribute *attr, char *buf)
  3747. {
  3748. int i, ret, len = 0;
  3749. struct usbpd *pd = dev_get_drvdata(dev);
  3750. if (pd->spec_rev == USBPD_REV_20)
  3751. return -EINVAL;
  3752. ret = trigger_tx_msg(pd, &pd->send_get_src_cap_ext);
  3753. if (ret)
  3754. return ret;
  3755. for (i = 0; i < PD_SRC_CAP_EXT_DB_LEN; i++)
  3756. len += scnprintf(buf + len, PAGE_SIZE - len, "%s0x%02x",
  3757. i ? " " : "", pd->src_cap_ext_db[i]);
  3758. len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
  3759. return len;
  3760. }
  3761. static DEVICE_ATTR_RO(get_src_cap_ext);
  3762. static ssize_t get_status_show(struct device *dev,
  3763. struct device_attribute *attr, char *buf)
  3764. {
  3765. int i, ret, len = 0;
  3766. struct usbpd *pd = dev_get_drvdata(dev);
  3767. if (pd->spec_rev == USBPD_REV_20)
  3768. return -EINVAL;
  3769. ret = trigger_tx_msg(pd, &pd->send_get_status);
  3770. if (ret)
  3771. return ret;
  3772. for (i = 0; i < PD_STATUS_DB_LEN; i++)
  3773. len += scnprintf(buf + len, PAGE_SIZE - len, "%s0x%02x",
  3774. i ? " " : "", pd->status_db[i]);
  3775. len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
  3776. return len;
  3777. }
  3778. static DEVICE_ATTR_RO(get_status);
  3779. static ssize_t get_pps_status_show(struct device *dev,
  3780. struct device_attribute *attr, char *buf)
  3781. {
  3782. int ret;
  3783. struct usbpd *pd = dev_get_drvdata(dev);
  3784. if (pd->spec_rev == USBPD_REV_20)
  3785. return -EINVAL;
  3786. ret = trigger_tx_msg(pd, &pd->send_get_pps_status);
  3787. if (ret)
  3788. return ret;
  3789. return scnprintf(buf, PAGE_SIZE, "0x%08x\n", pd->pps_status_db);
  3790. }
  3791. static DEVICE_ATTR_RO(get_pps_status);
  3792. static ssize_t get_battery_cap_store(struct device *dev,
  3793. struct device_attribute *attr, const char *buf, size_t size)
  3794. {
  3795. struct usbpd *pd = dev_get_drvdata(dev);
  3796. u32 val;
  3797. int ret;
  3798. if (pd->spec_rev == USBPD_REV_20 || kstrtou32(buf, 0, &val)
  3799. || val != 1) {
  3800. pd->get_battery_cap_db = -EINVAL;
  3801. return -EINVAL;
  3802. }
  3803. pd->get_battery_cap_db = val;
  3804. ret = trigger_tx_msg(pd, &pd->send_get_battery_cap);
  3805. return ret ? ret : size;
  3806. }
  3807. static ssize_t get_battery_cap_show(struct device *dev,
  3808. struct device_attribute *attr, char *buf)
  3809. {
  3810. int i, len = 0;
  3811. struct usbpd *pd = dev_get_drvdata(dev);
  3812. if (pd->get_battery_cap_db == -EINVAL)
  3813. return -EINVAL;
  3814. for (i = 0; i < PD_BATTERY_CAP_DB_LEN; i++)
  3815. len += scnprintf(buf + len, PAGE_SIZE - len, "%s0x%02x",
  3816. i ? " " : "", pd->battery_cap_db[i]);
  3817. len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
  3818. return len;
  3819. }
  3820. static DEVICE_ATTR_RW(get_battery_cap);
  3821. static ssize_t get_battery_status_store(struct device *dev,
  3822. struct device_attribute *attr, const char *buf, size_t size)
  3823. {
  3824. struct usbpd *pd = dev_get_drvdata(dev);
  3825. u32 val;
  3826. int ret;
  3827. if (pd->spec_rev == USBPD_REV_20 || kstrtou32(buf, 0, &val)
  3828. || val != 1) {
  3829. pd->get_battery_status_db = -EINVAL;
  3830. return -EINVAL;
  3831. }
  3832. pd->get_battery_status_db = val;
  3833. ret = trigger_tx_msg(pd, &pd->send_get_battery_status);
  3834. return ret ? ret : size;
  3835. }
  3836. static ssize_t get_battery_status_show(struct device *dev,
  3837. struct device_attribute *attr, char *buf)
  3838. {
  3839. struct usbpd *pd = dev_get_drvdata(dev);
  3840. if (pd->get_battery_status_db == -EINVAL)
  3841. return -EINVAL;
  3842. return scnprintf(buf, PAGE_SIZE, "0x%08x\n", pd->battery_sts_dobj);
  3843. }
  3844. static DEVICE_ATTR_RW(get_battery_status);
  3845. static struct attribute *usbpd_attrs[] = {
  3846. &dev_attr_contract.attr,
  3847. &dev_attr_initial_pr.attr,
  3848. &dev_attr_current_pr.attr,
  3849. &dev_attr_initial_dr.attr,
  3850. &dev_attr_current_dr.attr,
  3851. &dev_attr_src_cap_id.attr,
  3852. &dev_attr_pdo_h.attr,
  3853. &dev_attr_pdos[0].attr,
  3854. &dev_attr_pdos[1].attr,
  3855. &dev_attr_pdos[2].attr,
  3856. &dev_attr_pdos[3].attr,
  3857. &dev_attr_pdos[4].attr,
  3858. &dev_attr_pdos[5].attr,
  3859. &dev_attr_pdos[6].attr,
  3860. &dev_attr_select_pdo.attr,
  3861. &dev_attr_rdo.attr,
  3862. &dev_attr_rdo_h.attr,
  3863. &dev_attr_hard_reset.attr,
  3864. &dev_attr_get_src_cap_ext.attr,
  3865. &dev_attr_get_status.attr,
  3866. &dev_attr_get_pps_status.attr,
  3867. &dev_attr_get_battery_cap.attr,
  3868. &dev_attr_get_battery_status.attr,
  3869. NULL,
  3870. };
  3871. ATTRIBUTE_GROUPS(usbpd);
  3872. static struct class usbpd_class = {
  3873. .name = "usbpd",
  3874. .owner = THIS_MODULE,
  3875. .dev_uevent = usbpd_uevent,
  3876. .dev_groups = usbpd_groups,
  3877. };
  3878. static int match_usbpd_device(struct device *dev, const void *data)
  3879. {
  3880. return dev->parent == data;
  3881. }
  3882. static void devm_usbpd_put(struct device *dev, void *res)
  3883. {
  3884. struct usbpd **ppd = res;
  3885. put_device(&(*ppd)->dev);
  3886. }
  3887. struct usbpd *devm_usbpd_get_by_phandle(struct device *dev, const char *phandle)
  3888. {
  3889. struct usbpd **ptr, *pd = NULL;
  3890. struct device_node *pd_np;
  3891. struct platform_device *pdev;
  3892. struct device *pd_dev;
  3893. if (!usbpd_class.p) /* usbpd_init() not yet called */
  3894. return ERR_PTR(-EAGAIN);
  3895. if (!dev->of_node)
  3896. return ERR_PTR(-EINVAL);
  3897. pd_np = of_parse_phandle(dev->of_node, phandle, 0);
  3898. if (!pd_np)
  3899. return ERR_PTR(-ENXIO);
  3900. pdev = of_find_device_by_node(pd_np);
  3901. if (!pdev)
  3902. return ERR_PTR(-ENODEV);
  3903. pd_dev = class_find_device(&usbpd_class, NULL, &pdev->dev,
  3904. match_usbpd_device);
  3905. if (!pd_dev) {
  3906. platform_device_put(pdev);
  3907. /* device was found but maybe hadn't probed yet, so defer */
  3908. return ERR_PTR(-EPROBE_DEFER);
  3909. }
  3910. ptr = devres_alloc(devm_usbpd_put, sizeof(*ptr), GFP_KERNEL);
  3911. if (!ptr) {
  3912. put_device(pd_dev);
  3913. platform_device_put(pdev);
  3914. return ERR_PTR(-ENOMEM);
  3915. }
  3916. pd = dev_get_drvdata(pd_dev);
  3917. if (!pd)
  3918. return ERR_PTR(-EPROBE_DEFER);
  3919. *ptr = pd;
  3920. devres_add(dev, ptr);
  3921. return pd;
  3922. }
  3923. EXPORT_SYMBOL(devm_usbpd_get_by_phandle);
  3924. static void usbpd_release(struct device *dev)
  3925. {
  3926. struct usbpd *pd = container_of(dev, struct usbpd, dev);
  3927. kfree(pd);
  3928. }
  3929. static int num_pd_instances;
  3930. const struct typec_operations typec_ops = {
  3931. .dr_set = usbpd_typec_dr_set,
  3932. .pr_set = usbpd_typec_pr_set,
  3933. .port_type_set = usbpd_typec_port_type_set,
  3934. };
  3935. /**
  3936. * usbpd_create - Create a new instance of USB PD protocol/policy engine
  3937. * @parent - parent device to associate with
  3938. *
  3939. * This creates a new usbpd class device which manages the state of a
  3940. * USB PD-capable port. The parent device that is passed in should be
  3941. * associated with the physical device port, e.g. a PD PHY.
  3942. *
  3943. * Return: struct usbpd pointer, or an ERR_PTR value
  3944. */
  3945. struct usbpd *usbpd_create(struct device *parent,
  3946. struct pd_phy_ops *pdphy_ops)
  3947. {
  3948. int ret;
  3949. struct usbpd *pd;
  3950. union power_supply_propval val = {0};
  3951. int i;
  3952. pd = kzalloc(sizeof(*pd), GFP_KERNEL);
  3953. if (!pd)
  3954. return ERR_PTR(-ENOMEM);
  3955. device_initialize(&pd->dev);
  3956. pd->dev.class = &usbpd_class;
  3957. pd->dev.parent = parent;
  3958. pd->dev.release = usbpd_release;
  3959. dev_set_drvdata(&pd->dev, pd);
  3960. ret = dev_set_name(&pd->dev, "usbpd%d", num_pd_instances++);
  3961. if (ret)
  3962. goto free_pd;
  3963. ret = device_init_wakeup(&pd->dev, true);
  3964. if (ret)
  3965. goto free_pd;
  3966. ret = device_add(&pd->dev);
  3967. if (ret)
  3968. goto free_pd;
  3969. pd->wq = alloc_ordered_workqueue("usbpd_wq", WQ_FREEZABLE | WQ_HIGHPRI);
  3970. if (!pd->wq) {
  3971. ret = -ENOMEM;
  3972. goto del_pd;
  3973. }
  3974. INIT_WORK(&pd->sm_work, usbpd_sm);
  3975. INIT_WORK(&pd->psy_chg_work, psy_changed_notifier_work);
  3976. INIT_WORK(&pd->start_periph_work, start_usb_peripheral_work);
  3977. hrtimer_init(&pd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  3978. pd->timer.function = pd_timeout;
  3979. mutex_init(&pd->swap_lock);
  3980. mutex_init(&pd->svid_handler_lock);
  3981. pd->usb_psy = power_supply_get_by_name("usb");
  3982. if (!pd->usb_psy) {
  3983. usbpd_dbg(&pd->dev, "Could not get USB power_supply, deferring probe\n");
  3984. ret = -EPROBE_DEFER;
  3985. goto destroy_wq;
  3986. }
  3987. if (!pd->bat_psy)
  3988. pd->bat_psy = power_supply_get_by_name("battery");
  3989. if (pd->bat_psy) {
  3990. if (!power_supply_get_property(pd->bat_psy,
  3991. POWER_SUPPLY_PROP_VOLTAGE_MAX, &val))
  3992. pd->bat_voltage_max = val.intval;
  3993. if (!power_supply_get_property(pd->bat_psy,
  3994. POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, &val))
  3995. pd->bat_charge_full = val.intval;
  3996. }
  3997. for (i = 0; i < POWER_SUPPLY_IIO_PROP_MAX; i++) {
  3998. pd->iio_channels[i] = devm_iio_channel_get(parent,
  3999. usbpd_iio_channel_map[i]);
  4000. if (IS_ERR(pd->iio_channels[i])) {
  4001. usbpd_err(&pd->dev, "failed to get %s iio channel\n",
  4002. usbpd_iio_channel_map[i]);
  4003. ret = PTR_ERR(pd->iio_channels[i]);
  4004. goto put_psy;
  4005. }
  4006. }
  4007. /*
  4008. * associate extcon with the parent dev as it could have a DT
  4009. * node which will be useful for extcon_get_edev_by_phandle()
  4010. */
  4011. pd->extcon = devm_extcon_dev_allocate(parent, usbpd_extcon_cable);
  4012. if (IS_ERR(pd->extcon)) {
  4013. usbpd_err(&pd->dev, "failed to allocate extcon device\n");
  4014. ret = PTR_ERR(pd->extcon);
  4015. goto put_psy;
  4016. }
  4017. ret = devm_extcon_dev_register(parent, pd->extcon);
  4018. if (ret) {
  4019. usbpd_err(&pd->dev, "failed to register extcon device\n");
  4020. goto put_psy;
  4021. }
  4022. /* Support reporting polarity and speed via properties */
  4023. extcon_set_property_capability(pd->extcon, EXTCON_USB,
  4024. EXTCON_PROP_USB_TYPEC_POLARITY);
  4025. extcon_set_property_capability(pd->extcon, EXTCON_USB,
  4026. EXTCON_PROP_USB_SS);
  4027. extcon_set_property_capability(pd->extcon, EXTCON_USB_HOST,
  4028. EXTCON_PROP_USB_TYPEC_POLARITY);
  4029. extcon_set_property_capability(pd->extcon, EXTCON_USB_HOST,
  4030. EXTCON_PROP_USB_SS);
  4031. pd->num_sink_caps = device_property_read_u32_array(parent,
  4032. "qcom,default-sink-caps", NULL, 0);
  4033. if (pd->num_sink_caps > 0) {
  4034. int i;
  4035. u32 sink_caps[14];
  4036. if (pd->num_sink_caps % 2 || pd->num_sink_caps > 14) {
  4037. ret = -EINVAL;
  4038. usbpd_err(&pd->dev, "default-sink-caps must be specified as voltage/current, max 7 pairs\n");
  4039. goto put_psy;
  4040. }
  4041. ret = device_property_read_u32_array(parent,
  4042. "qcom,default-sink-caps", sink_caps,
  4043. pd->num_sink_caps);
  4044. if (ret) {
  4045. usbpd_err(&pd->dev, "Error reading default-sink-caps\n");
  4046. goto put_psy;
  4047. }
  4048. pd->num_sink_caps /= 2;
  4049. for (i = 0; i < pd->num_sink_caps; i++) {
  4050. int v = sink_caps[i * 2] / 50;
  4051. int c = sink_caps[i * 2 + 1] / 10;
  4052. pd->sink_caps[i] =
  4053. PD_SNK_PDO_FIXED(0, 0, 0, 0, 0, v, c);
  4054. }
  4055. /* First PDO includes additional capabilities */
  4056. pd->sink_caps[0] |= PD_SNK_PDO_FIXED(1, 0, 0, 1, 1, 0, 0);
  4057. } else {
  4058. memcpy(pd->sink_caps, default_snk_caps,
  4059. sizeof(default_snk_caps));
  4060. pd->num_sink_caps = ARRAY_SIZE(default_snk_caps);
  4061. }
  4062. /*
  4063. * Register a Type-C class instance (/sys/class/typec/portX).
  4064. * Note this is different than the /sys/class/usbpd/ created above.
  4065. */
  4066. pd->typec_caps.type = TYPEC_PORT_DRP;
  4067. pd->typec_caps.data = TYPEC_PORT_DRD;
  4068. pd->typec_caps.revision = 0x0130;
  4069. pd->typec_caps.pd_revision = 0x0300;
  4070. pd->typec_caps.ops = &typec_ops;
  4071. pd->typec_caps.driver_data = pd;
  4072. pd->partner_desc.identity = &pd->partner_identity;
  4073. ret = get_connector_type(pd);
  4074. if (ret < 0)
  4075. goto put_psy;
  4076. /* For non-TypeC connector, it will be handled elsewhere */
  4077. if (ret != QTI_POWER_SUPPLY_CONNECTOR_MICRO_USB) {
  4078. pd->typec_port = typec_register_port(parent, &pd->typec_caps);
  4079. if (IS_ERR(pd->typec_port)) {
  4080. usbpd_err(&pd->dev, "could not register typec port\n");
  4081. goto put_psy;
  4082. }
  4083. }
  4084. pd->pdphy_ops = pdphy_ops;
  4085. pd->current_pr = PR_NONE;
  4086. pd->current_dr = DR_NONE;
  4087. list_add_tail(&pd->instance, &_usbpd);
  4088. spin_lock_init(&pd->rx_lock);
  4089. INIT_LIST_HEAD(&pd->rx_q);
  4090. INIT_LIST_HEAD(&pd->svid_handlers);
  4091. init_completion(&pd->is_ready);
  4092. init_completion(&pd->tx_chunk_request);
  4093. pd->psy_nb.notifier_call = psy_changed;
  4094. ret = power_supply_reg_notifier(&pd->psy_nb);
  4095. if (ret)
  4096. goto del_inst;
  4097. /* force read initial power_supply values */
  4098. psy_changed(&pd->psy_nb, PSY_EVENT_PROP_CHANGED, pd->usb_psy);
  4099. return pd;
  4100. del_inst:
  4101. list_del(&pd->instance);
  4102. put_psy:
  4103. power_supply_put(pd->usb_psy);
  4104. destroy_wq:
  4105. destroy_workqueue(pd->wq);
  4106. del_pd:
  4107. device_del(&pd->dev);
  4108. free_pd:
  4109. num_pd_instances--;
  4110. put_device(&pd->dev);
  4111. return ERR_PTR(ret);
  4112. }
  4113. EXPORT_SYMBOL(usbpd_create);
  4114. /**
  4115. * usbpd_destroy - Removes and frees a usbpd instance
  4116. * @pd: the instance to destroy
  4117. */
  4118. void usbpd_destroy(struct usbpd *pd)
  4119. {
  4120. if (!pd)
  4121. return;
  4122. list_del(&pd->instance);
  4123. power_supply_unreg_notifier(&pd->psy_nb);
  4124. power_supply_put(pd->usb_psy);
  4125. if (pd->bat_psy)
  4126. power_supply_put(pd->bat_psy);
  4127. destroy_workqueue(pd->wq);
  4128. device_unregister(&pd->dev);
  4129. }
  4130. EXPORT_SYMBOL(usbpd_destroy);
  4131. static int __init usbpd_init(void)
  4132. {
  4133. usbpd_ipc_log = ipc_log_context_create(NUM_LOG_PAGES, "usb_pd", 0);
  4134. return class_register(&usbpd_class);
  4135. }
  4136. module_init(usbpd_init);
  4137. static void __exit usbpd_exit(void)
  4138. {
  4139. class_unregister(&usbpd_class);
  4140. }
  4141. module_exit(usbpd_exit);
  4142. MODULE_DESCRIPTION("USB Power Delivery Policy Engine");
  4143. MODULE_LICENSE("GPL");