ipw2200.c 322 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /******************************************************************************
  3. Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
  4. 802.11 status code portion of this file from ethereal-0.10.6:
  5. Copyright 2000, Axis Communications AB
  6. Ethereal - Network traffic analyzer
  7. By Gerald Combs <[email protected]>
  8. Copyright 1998 Gerald Combs
  9. Contact Information:
  10. Intel Linux Wireless <[email protected]>
  11. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  12. ******************************************************************************/
  13. #include <linux/sched.h>
  14. #include <linux/slab.h>
  15. #include <net/cfg80211-wext.h>
  16. #include "ipw2200.h"
  17. #include "ipw.h"
  18. #ifndef KBUILD_EXTMOD
  19. #define VK "k"
  20. #else
  21. #define VK
  22. #endif
  23. #ifdef CONFIG_IPW2200_DEBUG
  24. #define VD "d"
  25. #else
  26. #define VD
  27. #endif
  28. #ifdef CONFIG_IPW2200_MONITOR
  29. #define VM "m"
  30. #else
  31. #define VM
  32. #endif
  33. #ifdef CONFIG_IPW2200_PROMISCUOUS
  34. #define VP "p"
  35. #else
  36. #define VP
  37. #endif
  38. #ifdef CONFIG_IPW2200_RADIOTAP
  39. #define VR "r"
  40. #else
  41. #define VR
  42. #endif
  43. #ifdef CONFIG_IPW2200_QOS
  44. #define VQ "q"
  45. #else
  46. #define VQ
  47. #endif
  48. #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
  49. #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
  50. #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
  51. #define DRV_VERSION IPW2200_VERSION
  52. #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
  53. MODULE_DESCRIPTION(DRV_DESCRIPTION);
  54. MODULE_VERSION(DRV_VERSION);
  55. MODULE_AUTHOR(DRV_COPYRIGHT);
  56. MODULE_LICENSE("GPL");
  57. MODULE_FIRMWARE("ipw2200-ibss.fw");
  58. #ifdef CONFIG_IPW2200_MONITOR
  59. MODULE_FIRMWARE("ipw2200-sniffer.fw");
  60. #endif
  61. MODULE_FIRMWARE("ipw2200-bss.fw");
  62. static int cmdlog = 0;
  63. static int debug = 0;
  64. static int default_channel = 0;
  65. static int network_mode = 0;
  66. static u32 ipw_debug_level;
  67. static int associate;
  68. static int auto_create = 1;
  69. static int led_support = 1;
  70. static int disable = 0;
  71. static int bt_coexist = 0;
  72. static int hwcrypto = 0;
  73. static int roaming = 1;
  74. static const char ipw_modes[] = {
  75. 'a', 'b', 'g', '?'
  76. };
  77. static int antenna = CFG_SYS_ANTENNA_BOTH;
  78. #ifdef CONFIG_IPW2200_PROMISCUOUS
  79. static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
  80. #endif
  81. static struct ieee80211_rate ipw2200_rates[] = {
  82. { .bitrate = 10 },
  83. { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
  84. { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
  85. { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
  86. { .bitrate = 60 },
  87. { .bitrate = 90 },
  88. { .bitrate = 120 },
  89. { .bitrate = 180 },
  90. { .bitrate = 240 },
  91. { .bitrate = 360 },
  92. { .bitrate = 480 },
  93. { .bitrate = 540 }
  94. };
  95. #define ipw2200_a_rates (ipw2200_rates + 4)
  96. #define ipw2200_num_a_rates 8
  97. #define ipw2200_bg_rates (ipw2200_rates + 0)
  98. #define ipw2200_num_bg_rates 12
  99. /* Ugly macro to convert literal channel numbers into their mhz equivalents
  100. * There are certianly some conditions that will break this (like feeding it '30')
  101. * but they shouldn't arise since nothing talks on channel 30. */
  102. #define ieee80211chan2mhz(x) \
  103. (((x) <= 14) ? \
  104. (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
  105. ((x) + 1000) * 5)
  106. #ifdef CONFIG_IPW2200_QOS
  107. static int qos_enable = 0;
  108. static int qos_burst_enable = 0;
  109. static int qos_no_ack_mask = 0;
  110. static int burst_duration_CCK = 0;
  111. static int burst_duration_OFDM = 0;
  112. static struct libipw_qos_parameters def_qos_parameters_OFDM = {
  113. {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
  114. QOS_TX3_CW_MIN_OFDM},
  115. {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
  116. QOS_TX3_CW_MAX_OFDM},
  117. {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
  118. {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
  119. {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
  120. QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
  121. };
  122. static struct libipw_qos_parameters def_qos_parameters_CCK = {
  123. {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
  124. QOS_TX3_CW_MIN_CCK},
  125. {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
  126. QOS_TX3_CW_MAX_CCK},
  127. {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
  128. {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
  129. {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
  130. QOS_TX3_TXOP_LIMIT_CCK}
  131. };
  132. static struct libipw_qos_parameters def_parameters_OFDM = {
  133. {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
  134. DEF_TX3_CW_MIN_OFDM},
  135. {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
  136. DEF_TX3_CW_MAX_OFDM},
  137. {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
  138. {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
  139. {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
  140. DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
  141. };
  142. static struct libipw_qos_parameters def_parameters_CCK = {
  143. {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
  144. DEF_TX3_CW_MIN_CCK},
  145. {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
  146. DEF_TX3_CW_MAX_CCK},
  147. {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
  148. {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
  149. {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
  150. DEF_TX3_TXOP_LIMIT_CCK}
  151. };
  152. static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
  153. static int from_priority_to_tx_queue[] = {
  154. IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
  155. IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
  156. };
  157. static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
  158. static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
  159. *qos_param);
  160. static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
  161. *qos_param);
  162. #endif /* CONFIG_IPW2200_QOS */
  163. static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
  164. static void ipw_remove_current_network(struct ipw_priv *priv);
  165. static void ipw_rx(struct ipw_priv *priv);
  166. static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
  167. struct clx2_tx_queue *txq, int qindex);
  168. static int ipw_queue_reset(struct ipw_priv *priv);
  169. static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, const void *buf,
  170. int len, int sync);
  171. static void ipw_tx_queue_free(struct ipw_priv *);
  172. static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
  173. static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
  174. static void ipw_rx_queue_replenish(void *);
  175. static int ipw_up(struct ipw_priv *);
  176. static void ipw_bg_up(struct work_struct *work);
  177. static void ipw_down(struct ipw_priv *);
  178. static void ipw_bg_down(struct work_struct *work);
  179. static int ipw_config(struct ipw_priv *);
  180. static int init_supported_rates(struct ipw_priv *priv,
  181. struct ipw_supported_rates *prates);
  182. static void ipw_set_hwcrypto_keys(struct ipw_priv *);
  183. static void ipw_send_wep_keys(struct ipw_priv *, int);
  184. static int snprint_line(char *buf, size_t count,
  185. const u8 * data, u32 len, u32 ofs)
  186. {
  187. int out, i, j, l;
  188. char c;
  189. out = scnprintf(buf, count, "%08X", ofs);
  190. for (l = 0, i = 0; i < 2; i++) {
  191. out += scnprintf(buf + out, count - out, " ");
  192. for (j = 0; j < 8 && l < len; j++, l++)
  193. out += scnprintf(buf + out, count - out, "%02X ",
  194. data[(i * 8 + j)]);
  195. for (; j < 8; j++)
  196. out += scnprintf(buf + out, count - out, " ");
  197. }
  198. out += scnprintf(buf + out, count - out, " ");
  199. for (l = 0, i = 0; i < 2; i++) {
  200. out += scnprintf(buf + out, count - out, " ");
  201. for (j = 0; j < 8 && l < len; j++, l++) {
  202. c = data[(i * 8 + j)];
  203. if (!isascii(c) || !isprint(c))
  204. c = '.';
  205. out += scnprintf(buf + out, count - out, "%c", c);
  206. }
  207. for (; j < 8; j++)
  208. out += scnprintf(buf + out, count - out, " ");
  209. }
  210. return out;
  211. }
  212. static void printk_buf(int level, const u8 * data, u32 len)
  213. {
  214. char line[81];
  215. u32 ofs = 0;
  216. if (!(ipw_debug_level & level))
  217. return;
  218. while (len) {
  219. snprint_line(line, sizeof(line), &data[ofs],
  220. min(len, 16U), ofs);
  221. printk(KERN_DEBUG "%s\n", line);
  222. ofs += 16;
  223. len -= min(len, 16U);
  224. }
  225. }
  226. static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
  227. {
  228. size_t out = size;
  229. u32 ofs = 0;
  230. int total = 0;
  231. while (size && len) {
  232. out = snprint_line(output, size, &data[ofs],
  233. min_t(size_t, len, 16U), ofs);
  234. ofs += 16;
  235. output += out;
  236. size -= out;
  237. len -= min_t(size_t, len, 16U);
  238. total += out;
  239. }
  240. return total;
  241. }
  242. /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
  243. static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
  244. #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
  245. /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
  246. static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
  247. #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
  248. /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
  249. static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
  250. static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
  251. {
  252. IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
  253. __LINE__, (u32) (b), (u32) (c));
  254. _ipw_write_reg8(a, b, c);
  255. }
  256. /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
  257. static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
  258. static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
  259. {
  260. IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
  261. __LINE__, (u32) (b), (u32) (c));
  262. _ipw_write_reg16(a, b, c);
  263. }
  264. /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
  265. static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
  266. static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
  267. {
  268. IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
  269. __LINE__, (u32) (b), (u32) (c));
  270. _ipw_write_reg32(a, b, c);
  271. }
  272. /* 8-bit direct write (low 4K) */
  273. static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
  274. u8 val)
  275. {
  276. writeb(val, ipw->hw_base + ofs);
  277. }
  278. /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
  279. #define ipw_write8(ipw, ofs, val) do { \
  280. IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
  281. __LINE__, (u32)(ofs), (u32)(val)); \
  282. _ipw_write8(ipw, ofs, val); \
  283. } while (0)
  284. /* 16-bit direct write (low 4K) */
  285. static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
  286. u16 val)
  287. {
  288. writew(val, ipw->hw_base + ofs);
  289. }
  290. /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
  291. #define ipw_write16(ipw, ofs, val) do { \
  292. IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
  293. __LINE__, (u32)(ofs), (u32)(val)); \
  294. _ipw_write16(ipw, ofs, val); \
  295. } while (0)
  296. /* 32-bit direct write (low 4K) */
  297. static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
  298. u32 val)
  299. {
  300. writel(val, ipw->hw_base + ofs);
  301. }
  302. /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
  303. #define ipw_write32(ipw, ofs, val) do { \
  304. IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
  305. __LINE__, (u32)(ofs), (u32)(val)); \
  306. _ipw_write32(ipw, ofs, val); \
  307. } while (0)
  308. /* 8-bit direct read (low 4K) */
  309. static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
  310. {
  311. return readb(ipw->hw_base + ofs);
  312. }
  313. /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
  314. #define ipw_read8(ipw, ofs) ({ \
  315. IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
  316. (u32)(ofs)); \
  317. _ipw_read8(ipw, ofs); \
  318. })
  319. /* 16-bit direct read (low 4K) */
  320. static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
  321. {
  322. return readw(ipw->hw_base + ofs);
  323. }
  324. /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
  325. #define ipw_read16(ipw, ofs) ({ \
  326. IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
  327. (u32)(ofs)); \
  328. _ipw_read16(ipw, ofs); \
  329. })
  330. /* 32-bit direct read (low 4K) */
  331. static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
  332. {
  333. return readl(ipw->hw_base + ofs);
  334. }
  335. /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
  336. #define ipw_read32(ipw, ofs) ({ \
  337. IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
  338. (u32)(ofs)); \
  339. _ipw_read32(ipw, ofs); \
  340. })
  341. static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
  342. /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
  343. #define ipw_read_indirect(a, b, c, d) ({ \
  344. IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
  345. __LINE__, (u32)(b), (u32)(d)); \
  346. _ipw_read_indirect(a, b, c, d); \
  347. })
  348. /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
  349. static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
  350. int num);
  351. #define ipw_write_indirect(a, b, c, d) do { \
  352. IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
  353. __LINE__, (u32)(b), (u32)(d)); \
  354. _ipw_write_indirect(a, b, c, d); \
  355. } while (0)
  356. /* 32-bit indirect write (above 4K) */
  357. static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
  358. {
  359. IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
  360. _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
  361. _ipw_write32(priv, IPW_INDIRECT_DATA, value);
  362. }
  363. /* 8-bit indirect write (above 4K) */
  364. static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
  365. {
  366. u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
  367. u32 dif_len = reg - aligned_addr;
  368. IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
  369. _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
  370. _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
  371. }
  372. /* 16-bit indirect write (above 4K) */
  373. static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
  374. {
  375. u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
  376. u32 dif_len = (reg - aligned_addr) & (~0x1ul);
  377. IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
  378. _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
  379. _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
  380. }
  381. /* 8-bit indirect read (above 4K) */
  382. static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
  383. {
  384. u32 word;
  385. _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
  386. IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
  387. word = _ipw_read32(priv, IPW_INDIRECT_DATA);
  388. return (word >> ((reg & 0x3) * 8)) & 0xff;
  389. }
  390. /* 32-bit indirect read (above 4K) */
  391. static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
  392. {
  393. u32 value;
  394. IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
  395. _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
  396. value = _ipw_read32(priv, IPW_INDIRECT_DATA);
  397. IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
  398. return value;
  399. }
  400. /* General purpose, no alignment requirement, iterative (multi-byte) read, */
  401. /* for area above 1st 4K of SRAM/reg space */
  402. static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
  403. int num)
  404. {
  405. u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
  406. u32 dif_len = addr - aligned_addr;
  407. u32 i;
  408. IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
  409. if (num <= 0) {
  410. return;
  411. }
  412. /* Read the first dword (or portion) byte by byte */
  413. if (unlikely(dif_len)) {
  414. _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
  415. /* Start reading at aligned_addr + dif_len */
  416. for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
  417. *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
  418. aligned_addr += 4;
  419. }
  420. /* Read all of the middle dwords as dwords, with auto-increment */
  421. _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
  422. for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
  423. *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
  424. /* Read the last dword (or portion) byte by byte */
  425. if (unlikely(num)) {
  426. _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
  427. for (i = 0; num > 0; i++, num--)
  428. *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
  429. }
  430. }
  431. /* General purpose, no alignment requirement, iterative (multi-byte) write, */
  432. /* for area above 1st 4K of SRAM/reg space */
  433. static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
  434. int num)
  435. {
  436. u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
  437. u32 dif_len = addr - aligned_addr;
  438. u32 i;
  439. IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
  440. if (num <= 0) {
  441. return;
  442. }
  443. /* Write the first dword (or portion) byte by byte */
  444. if (unlikely(dif_len)) {
  445. _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
  446. /* Start writing at aligned_addr + dif_len */
  447. for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
  448. _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
  449. aligned_addr += 4;
  450. }
  451. /* Write all of the middle dwords as dwords, with auto-increment */
  452. _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
  453. for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
  454. _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
  455. /* Write the last dword (or portion) byte by byte */
  456. if (unlikely(num)) {
  457. _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
  458. for (i = 0; num > 0; i++, num--, buf++)
  459. _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
  460. }
  461. }
  462. /* General purpose, no alignment requirement, iterative (multi-byte) write, */
  463. /* for 1st 4K of SRAM/regs space */
  464. static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
  465. int num)
  466. {
  467. memcpy_toio((priv->hw_base + addr), buf, num);
  468. }
  469. /* Set bit(s) in low 4K of SRAM/regs */
  470. static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
  471. {
  472. ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
  473. }
  474. /* Clear bit(s) in low 4K of SRAM/regs */
  475. static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
  476. {
  477. ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
  478. }
  479. static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
  480. {
  481. if (priv->status & STATUS_INT_ENABLED)
  482. return;
  483. priv->status |= STATUS_INT_ENABLED;
  484. ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
  485. }
  486. static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
  487. {
  488. if (!(priv->status & STATUS_INT_ENABLED))
  489. return;
  490. priv->status &= ~STATUS_INT_ENABLED;
  491. ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
  492. }
  493. static inline void ipw_enable_interrupts(struct ipw_priv *priv)
  494. {
  495. unsigned long flags;
  496. spin_lock_irqsave(&priv->irq_lock, flags);
  497. __ipw_enable_interrupts(priv);
  498. spin_unlock_irqrestore(&priv->irq_lock, flags);
  499. }
  500. static inline void ipw_disable_interrupts(struct ipw_priv *priv)
  501. {
  502. unsigned long flags;
  503. spin_lock_irqsave(&priv->irq_lock, flags);
  504. __ipw_disable_interrupts(priv);
  505. spin_unlock_irqrestore(&priv->irq_lock, flags);
  506. }
  507. static char *ipw_error_desc(u32 val)
  508. {
  509. switch (val) {
  510. case IPW_FW_ERROR_OK:
  511. return "ERROR_OK";
  512. case IPW_FW_ERROR_FAIL:
  513. return "ERROR_FAIL";
  514. case IPW_FW_ERROR_MEMORY_UNDERFLOW:
  515. return "MEMORY_UNDERFLOW";
  516. case IPW_FW_ERROR_MEMORY_OVERFLOW:
  517. return "MEMORY_OVERFLOW";
  518. case IPW_FW_ERROR_BAD_PARAM:
  519. return "BAD_PARAM";
  520. case IPW_FW_ERROR_BAD_CHECKSUM:
  521. return "BAD_CHECKSUM";
  522. case IPW_FW_ERROR_NMI_INTERRUPT:
  523. return "NMI_INTERRUPT";
  524. case IPW_FW_ERROR_BAD_DATABASE:
  525. return "BAD_DATABASE";
  526. case IPW_FW_ERROR_ALLOC_FAIL:
  527. return "ALLOC_FAIL";
  528. case IPW_FW_ERROR_DMA_UNDERRUN:
  529. return "DMA_UNDERRUN";
  530. case IPW_FW_ERROR_DMA_STATUS:
  531. return "DMA_STATUS";
  532. case IPW_FW_ERROR_DINO_ERROR:
  533. return "DINO_ERROR";
  534. case IPW_FW_ERROR_EEPROM_ERROR:
  535. return "EEPROM_ERROR";
  536. case IPW_FW_ERROR_SYSASSERT:
  537. return "SYSASSERT";
  538. case IPW_FW_ERROR_FATAL_ERROR:
  539. return "FATAL_ERROR";
  540. default:
  541. return "UNKNOWN_ERROR";
  542. }
  543. }
  544. static void ipw_dump_error_log(struct ipw_priv *priv,
  545. struct ipw_fw_error *error)
  546. {
  547. u32 i;
  548. if (!error) {
  549. IPW_ERROR("Error allocating and capturing error log. "
  550. "Nothing to dump.\n");
  551. return;
  552. }
  553. IPW_ERROR("Start IPW Error Log Dump:\n");
  554. IPW_ERROR("Status: 0x%08X, Config: %08X\n",
  555. error->status, error->config);
  556. for (i = 0; i < error->elem_len; i++)
  557. IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
  558. ipw_error_desc(error->elem[i].desc),
  559. error->elem[i].time,
  560. error->elem[i].blink1,
  561. error->elem[i].blink2,
  562. error->elem[i].link1,
  563. error->elem[i].link2, error->elem[i].data);
  564. for (i = 0; i < error->log_len; i++)
  565. IPW_ERROR("%i\t0x%08x\t%i\n",
  566. error->log[i].time,
  567. error->log[i].data, error->log[i].event);
  568. }
  569. static inline int ipw_is_init(struct ipw_priv *priv)
  570. {
  571. return (priv->status & STATUS_INIT) ? 1 : 0;
  572. }
  573. static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
  574. {
  575. u32 addr, field_info, field_len, field_count, total_len;
  576. IPW_DEBUG_ORD("ordinal = %i\n", ord);
  577. if (!priv || !val || !len) {
  578. IPW_DEBUG_ORD("Invalid argument\n");
  579. return -EINVAL;
  580. }
  581. /* verify device ordinal tables have been initialized */
  582. if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
  583. IPW_DEBUG_ORD("Access ordinals before initialization\n");
  584. return -EINVAL;
  585. }
  586. switch (IPW_ORD_TABLE_ID_MASK & ord) {
  587. case IPW_ORD_TABLE_0_MASK:
  588. /*
  589. * TABLE 0: Direct access to a table of 32 bit values
  590. *
  591. * This is a very simple table with the data directly
  592. * read from the table
  593. */
  594. /* remove the table id from the ordinal */
  595. ord &= IPW_ORD_TABLE_VALUE_MASK;
  596. /* boundary check */
  597. if (ord > priv->table0_len) {
  598. IPW_DEBUG_ORD("ordinal value (%i) longer then "
  599. "max (%i)\n", ord, priv->table0_len);
  600. return -EINVAL;
  601. }
  602. /* verify we have enough room to store the value */
  603. if (*len < sizeof(u32)) {
  604. IPW_DEBUG_ORD("ordinal buffer length too small, "
  605. "need %zd\n", sizeof(u32));
  606. return -EINVAL;
  607. }
  608. IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
  609. ord, priv->table0_addr + (ord << 2));
  610. *len = sizeof(u32);
  611. ord <<= 2;
  612. *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
  613. break;
  614. case IPW_ORD_TABLE_1_MASK:
  615. /*
  616. * TABLE 1: Indirect access to a table of 32 bit values
  617. *
  618. * This is a fairly large table of u32 values each
  619. * representing starting addr for the data (which is
  620. * also a u32)
  621. */
  622. /* remove the table id from the ordinal */
  623. ord &= IPW_ORD_TABLE_VALUE_MASK;
  624. /* boundary check */
  625. if (ord > priv->table1_len) {
  626. IPW_DEBUG_ORD("ordinal value too long\n");
  627. return -EINVAL;
  628. }
  629. /* verify we have enough room to store the value */
  630. if (*len < sizeof(u32)) {
  631. IPW_DEBUG_ORD("ordinal buffer length too small, "
  632. "need %zd\n", sizeof(u32));
  633. return -EINVAL;
  634. }
  635. *((u32 *) val) =
  636. ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
  637. *len = sizeof(u32);
  638. break;
  639. case IPW_ORD_TABLE_2_MASK:
  640. /*
  641. * TABLE 2: Indirect access to a table of variable sized values
  642. *
  643. * This table consist of six values, each containing
  644. * - dword containing the starting offset of the data
  645. * - dword containing the lengh in the first 16bits
  646. * and the count in the second 16bits
  647. */
  648. /* remove the table id from the ordinal */
  649. ord &= IPW_ORD_TABLE_VALUE_MASK;
  650. /* boundary check */
  651. if (ord > priv->table2_len) {
  652. IPW_DEBUG_ORD("ordinal value too long\n");
  653. return -EINVAL;
  654. }
  655. /* get the address of statistic */
  656. addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
  657. /* get the second DW of statistics ;
  658. * two 16-bit words - first is length, second is count */
  659. field_info =
  660. ipw_read_reg32(priv,
  661. priv->table2_addr + (ord << 3) +
  662. sizeof(u32));
  663. /* get each entry length */
  664. field_len = *((u16 *) & field_info);
  665. /* get number of entries */
  666. field_count = *(((u16 *) & field_info) + 1);
  667. /* abort if not enough memory */
  668. total_len = field_len * field_count;
  669. if (total_len > *len) {
  670. *len = total_len;
  671. return -EINVAL;
  672. }
  673. *len = total_len;
  674. if (!total_len)
  675. return 0;
  676. IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
  677. "field_info = 0x%08x\n",
  678. addr, total_len, field_info);
  679. ipw_read_indirect(priv, addr, val, total_len);
  680. break;
  681. default:
  682. IPW_DEBUG_ORD("Invalid ordinal!\n");
  683. return -EINVAL;
  684. }
  685. return 0;
  686. }
  687. static void ipw_init_ordinals(struct ipw_priv *priv)
  688. {
  689. priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
  690. priv->table0_len = ipw_read32(priv, priv->table0_addr);
  691. IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
  692. priv->table0_addr, priv->table0_len);
  693. priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
  694. priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
  695. IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
  696. priv->table1_addr, priv->table1_len);
  697. priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
  698. priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
  699. priv->table2_len &= 0x0000ffff; /* use first two bytes */
  700. IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
  701. priv->table2_addr, priv->table2_len);
  702. }
  703. static u32 ipw_register_toggle(u32 reg)
  704. {
  705. reg &= ~IPW_START_STANDBY;
  706. if (reg & IPW_GATE_ODMA)
  707. reg &= ~IPW_GATE_ODMA;
  708. if (reg & IPW_GATE_IDMA)
  709. reg &= ~IPW_GATE_IDMA;
  710. if (reg & IPW_GATE_ADMA)
  711. reg &= ~IPW_GATE_ADMA;
  712. return reg;
  713. }
  714. /*
  715. * LED behavior:
  716. * - On radio ON, turn on any LEDs that require to be on during start
  717. * - On initialization, start unassociated blink
  718. * - On association, disable unassociated blink
  719. * - On disassociation, start unassociated blink
  720. * - On radio OFF, turn off any LEDs started during radio on
  721. *
  722. */
  723. #define LD_TIME_LINK_ON msecs_to_jiffies(300)
  724. #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
  725. #define LD_TIME_ACT_ON msecs_to_jiffies(250)
  726. static void ipw_led_link_on(struct ipw_priv *priv)
  727. {
  728. unsigned long flags;
  729. u32 led;
  730. /* If configured to not use LEDs, or nic_type is 1,
  731. * then we don't toggle a LINK led */
  732. if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
  733. return;
  734. spin_lock_irqsave(&priv->lock, flags);
  735. if (!(priv->status & STATUS_RF_KILL_MASK) &&
  736. !(priv->status & STATUS_LED_LINK_ON)) {
  737. IPW_DEBUG_LED("Link LED On\n");
  738. led = ipw_read_reg32(priv, IPW_EVENT_REG);
  739. led |= priv->led_association_on;
  740. led = ipw_register_toggle(led);
  741. IPW_DEBUG_LED("Reg: 0x%08X\n", led);
  742. ipw_write_reg32(priv, IPW_EVENT_REG, led);
  743. priv->status |= STATUS_LED_LINK_ON;
  744. /* If we aren't associated, schedule turning the LED off */
  745. if (!(priv->status & STATUS_ASSOCIATED))
  746. schedule_delayed_work(&priv->led_link_off,
  747. LD_TIME_LINK_ON);
  748. }
  749. spin_unlock_irqrestore(&priv->lock, flags);
  750. }
  751. static void ipw_bg_led_link_on(struct work_struct *work)
  752. {
  753. struct ipw_priv *priv =
  754. container_of(work, struct ipw_priv, led_link_on.work);
  755. mutex_lock(&priv->mutex);
  756. ipw_led_link_on(priv);
  757. mutex_unlock(&priv->mutex);
  758. }
  759. static void ipw_led_link_off(struct ipw_priv *priv)
  760. {
  761. unsigned long flags;
  762. u32 led;
  763. /* If configured not to use LEDs, or nic type is 1,
  764. * then we don't goggle the LINK led. */
  765. if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
  766. return;
  767. spin_lock_irqsave(&priv->lock, flags);
  768. if (priv->status & STATUS_LED_LINK_ON) {
  769. led = ipw_read_reg32(priv, IPW_EVENT_REG);
  770. led &= priv->led_association_off;
  771. led = ipw_register_toggle(led);
  772. IPW_DEBUG_LED("Reg: 0x%08X\n", led);
  773. ipw_write_reg32(priv, IPW_EVENT_REG, led);
  774. IPW_DEBUG_LED("Link LED Off\n");
  775. priv->status &= ~STATUS_LED_LINK_ON;
  776. /* If we aren't associated and the radio is on, schedule
  777. * turning the LED on (blink while unassociated) */
  778. if (!(priv->status & STATUS_RF_KILL_MASK) &&
  779. !(priv->status & STATUS_ASSOCIATED))
  780. schedule_delayed_work(&priv->led_link_on,
  781. LD_TIME_LINK_OFF);
  782. }
  783. spin_unlock_irqrestore(&priv->lock, flags);
  784. }
  785. static void ipw_bg_led_link_off(struct work_struct *work)
  786. {
  787. struct ipw_priv *priv =
  788. container_of(work, struct ipw_priv, led_link_off.work);
  789. mutex_lock(&priv->mutex);
  790. ipw_led_link_off(priv);
  791. mutex_unlock(&priv->mutex);
  792. }
  793. static void __ipw_led_activity_on(struct ipw_priv *priv)
  794. {
  795. u32 led;
  796. if (priv->config & CFG_NO_LED)
  797. return;
  798. if (priv->status & STATUS_RF_KILL_MASK)
  799. return;
  800. if (!(priv->status & STATUS_LED_ACT_ON)) {
  801. led = ipw_read_reg32(priv, IPW_EVENT_REG);
  802. led |= priv->led_activity_on;
  803. led = ipw_register_toggle(led);
  804. IPW_DEBUG_LED("Reg: 0x%08X\n", led);
  805. ipw_write_reg32(priv, IPW_EVENT_REG, led);
  806. IPW_DEBUG_LED("Activity LED On\n");
  807. priv->status |= STATUS_LED_ACT_ON;
  808. cancel_delayed_work(&priv->led_act_off);
  809. schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
  810. } else {
  811. /* Reschedule LED off for full time period */
  812. cancel_delayed_work(&priv->led_act_off);
  813. schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
  814. }
  815. }
  816. #if 0
  817. void ipw_led_activity_on(struct ipw_priv *priv)
  818. {
  819. unsigned long flags;
  820. spin_lock_irqsave(&priv->lock, flags);
  821. __ipw_led_activity_on(priv);
  822. spin_unlock_irqrestore(&priv->lock, flags);
  823. }
  824. #endif /* 0 */
  825. static void ipw_led_activity_off(struct ipw_priv *priv)
  826. {
  827. unsigned long flags;
  828. u32 led;
  829. if (priv->config & CFG_NO_LED)
  830. return;
  831. spin_lock_irqsave(&priv->lock, flags);
  832. if (priv->status & STATUS_LED_ACT_ON) {
  833. led = ipw_read_reg32(priv, IPW_EVENT_REG);
  834. led &= priv->led_activity_off;
  835. led = ipw_register_toggle(led);
  836. IPW_DEBUG_LED("Reg: 0x%08X\n", led);
  837. ipw_write_reg32(priv, IPW_EVENT_REG, led);
  838. IPW_DEBUG_LED("Activity LED Off\n");
  839. priv->status &= ~STATUS_LED_ACT_ON;
  840. }
  841. spin_unlock_irqrestore(&priv->lock, flags);
  842. }
  843. static void ipw_bg_led_activity_off(struct work_struct *work)
  844. {
  845. struct ipw_priv *priv =
  846. container_of(work, struct ipw_priv, led_act_off.work);
  847. mutex_lock(&priv->mutex);
  848. ipw_led_activity_off(priv);
  849. mutex_unlock(&priv->mutex);
  850. }
  851. static void ipw_led_band_on(struct ipw_priv *priv)
  852. {
  853. unsigned long flags;
  854. u32 led;
  855. /* Only nic type 1 supports mode LEDs */
  856. if (priv->config & CFG_NO_LED ||
  857. priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
  858. return;
  859. spin_lock_irqsave(&priv->lock, flags);
  860. led = ipw_read_reg32(priv, IPW_EVENT_REG);
  861. if (priv->assoc_network->mode == IEEE_A) {
  862. led |= priv->led_ofdm_on;
  863. led &= priv->led_association_off;
  864. IPW_DEBUG_LED("Mode LED On: 802.11a\n");
  865. } else if (priv->assoc_network->mode == IEEE_G) {
  866. led |= priv->led_ofdm_on;
  867. led |= priv->led_association_on;
  868. IPW_DEBUG_LED("Mode LED On: 802.11g\n");
  869. } else {
  870. led &= priv->led_ofdm_off;
  871. led |= priv->led_association_on;
  872. IPW_DEBUG_LED("Mode LED On: 802.11b\n");
  873. }
  874. led = ipw_register_toggle(led);
  875. IPW_DEBUG_LED("Reg: 0x%08X\n", led);
  876. ipw_write_reg32(priv, IPW_EVENT_REG, led);
  877. spin_unlock_irqrestore(&priv->lock, flags);
  878. }
  879. static void ipw_led_band_off(struct ipw_priv *priv)
  880. {
  881. unsigned long flags;
  882. u32 led;
  883. /* Only nic type 1 supports mode LEDs */
  884. if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
  885. return;
  886. spin_lock_irqsave(&priv->lock, flags);
  887. led = ipw_read_reg32(priv, IPW_EVENT_REG);
  888. led &= priv->led_ofdm_off;
  889. led &= priv->led_association_off;
  890. led = ipw_register_toggle(led);
  891. IPW_DEBUG_LED("Reg: 0x%08X\n", led);
  892. ipw_write_reg32(priv, IPW_EVENT_REG, led);
  893. spin_unlock_irqrestore(&priv->lock, flags);
  894. }
  895. static void ipw_led_radio_on(struct ipw_priv *priv)
  896. {
  897. ipw_led_link_on(priv);
  898. }
  899. static void ipw_led_radio_off(struct ipw_priv *priv)
  900. {
  901. ipw_led_activity_off(priv);
  902. ipw_led_link_off(priv);
  903. }
  904. static void ipw_led_link_up(struct ipw_priv *priv)
  905. {
  906. /* Set the Link Led on for all nic types */
  907. ipw_led_link_on(priv);
  908. }
  909. static void ipw_led_link_down(struct ipw_priv *priv)
  910. {
  911. ipw_led_activity_off(priv);
  912. ipw_led_link_off(priv);
  913. if (priv->status & STATUS_RF_KILL_MASK)
  914. ipw_led_radio_off(priv);
  915. }
  916. static void ipw_led_init(struct ipw_priv *priv)
  917. {
  918. priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
  919. /* Set the default PINs for the link and activity leds */
  920. priv->led_activity_on = IPW_ACTIVITY_LED;
  921. priv->led_activity_off = ~(IPW_ACTIVITY_LED);
  922. priv->led_association_on = IPW_ASSOCIATED_LED;
  923. priv->led_association_off = ~(IPW_ASSOCIATED_LED);
  924. /* Set the default PINs for the OFDM leds */
  925. priv->led_ofdm_on = IPW_OFDM_LED;
  926. priv->led_ofdm_off = ~(IPW_OFDM_LED);
  927. switch (priv->nic_type) {
  928. case EEPROM_NIC_TYPE_1:
  929. /* In this NIC type, the LEDs are reversed.... */
  930. priv->led_activity_on = IPW_ASSOCIATED_LED;
  931. priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
  932. priv->led_association_on = IPW_ACTIVITY_LED;
  933. priv->led_association_off = ~(IPW_ACTIVITY_LED);
  934. if (!(priv->config & CFG_NO_LED))
  935. ipw_led_band_on(priv);
  936. /* And we don't blink link LEDs for this nic, so
  937. * just return here */
  938. return;
  939. case EEPROM_NIC_TYPE_3:
  940. case EEPROM_NIC_TYPE_2:
  941. case EEPROM_NIC_TYPE_4:
  942. case EEPROM_NIC_TYPE_0:
  943. break;
  944. default:
  945. IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
  946. priv->nic_type);
  947. priv->nic_type = EEPROM_NIC_TYPE_0;
  948. break;
  949. }
  950. if (!(priv->config & CFG_NO_LED)) {
  951. if (priv->status & STATUS_ASSOCIATED)
  952. ipw_led_link_on(priv);
  953. else
  954. ipw_led_link_off(priv);
  955. }
  956. }
  957. static void ipw_led_shutdown(struct ipw_priv *priv)
  958. {
  959. ipw_led_activity_off(priv);
  960. ipw_led_link_off(priv);
  961. ipw_led_band_off(priv);
  962. cancel_delayed_work(&priv->led_link_on);
  963. cancel_delayed_work(&priv->led_link_off);
  964. cancel_delayed_work(&priv->led_act_off);
  965. }
  966. /*
  967. * The following adds a new attribute to the sysfs representation
  968. * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
  969. * used for controlling the debug level.
  970. *
  971. * See the level definitions in ipw for details.
  972. */
  973. static ssize_t debug_level_show(struct device_driver *d, char *buf)
  974. {
  975. return sprintf(buf, "0x%08X\n", ipw_debug_level);
  976. }
  977. static ssize_t debug_level_store(struct device_driver *d, const char *buf,
  978. size_t count)
  979. {
  980. char *p = (char *)buf;
  981. u32 val;
  982. if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
  983. p++;
  984. if (p[0] == 'x' || p[0] == 'X')
  985. p++;
  986. val = simple_strtoul(p, &p, 16);
  987. } else
  988. val = simple_strtoul(p, &p, 10);
  989. if (p == buf)
  990. printk(KERN_INFO DRV_NAME
  991. ": %s is not in hex or decimal form.\n", buf);
  992. else
  993. ipw_debug_level = val;
  994. return strnlen(buf, count);
  995. }
  996. static DRIVER_ATTR_RW(debug_level);
  997. static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
  998. {
  999. /* length = 1st dword in log */
  1000. return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
  1001. }
  1002. static void ipw_capture_event_log(struct ipw_priv *priv,
  1003. u32 log_len, struct ipw_event *log)
  1004. {
  1005. u32 base;
  1006. if (log_len) {
  1007. base = ipw_read32(priv, IPW_EVENT_LOG);
  1008. ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
  1009. (u8 *) log, sizeof(*log) * log_len);
  1010. }
  1011. }
  1012. static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
  1013. {
  1014. struct ipw_fw_error *error;
  1015. u32 log_len = ipw_get_event_log_len(priv);
  1016. u32 base = ipw_read32(priv, IPW_ERROR_LOG);
  1017. u32 elem_len = ipw_read_reg32(priv, base);
  1018. error = kmalloc(sizeof(*error) +
  1019. sizeof(*error->elem) * elem_len +
  1020. sizeof(*error->log) * log_len, GFP_ATOMIC);
  1021. if (!error) {
  1022. IPW_ERROR("Memory allocation for firmware error log "
  1023. "failed.\n");
  1024. return NULL;
  1025. }
  1026. error->jiffies = jiffies;
  1027. error->status = priv->status;
  1028. error->config = priv->config;
  1029. error->elem_len = elem_len;
  1030. error->log_len = log_len;
  1031. error->elem = (struct ipw_error_elem *)error->payload;
  1032. error->log = (struct ipw_event *)(error->elem + elem_len);
  1033. ipw_capture_event_log(priv, log_len, error->log);
  1034. if (elem_len)
  1035. ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
  1036. sizeof(*error->elem) * elem_len);
  1037. return error;
  1038. }
  1039. static ssize_t event_log_show(struct device *d,
  1040. struct device_attribute *attr, char *buf)
  1041. {
  1042. struct ipw_priv *priv = dev_get_drvdata(d);
  1043. u32 log_len = ipw_get_event_log_len(priv);
  1044. u32 log_size;
  1045. struct ipw_event *log;
  1046. u32 len = 0, i;
  1047. /* not using min() because of its strict type checking */
  1048. log_size = PAGE_SIZE / sizeof(*log) > log_len ?
  1049. sizeof(*log) * log_len : PAGE_SIZE;
  1050. log = kzalloc(log_size, GFP_KERNEL);
  1051. if (!log) {
  1052. IPW_ERROR("Unable to allocate memory for log\n");
  1053. return 0;
  1054. }
  1055. log_len = log_size / sizeof(*log);
  1056. ipw_capture_event_log(priv, log_len, log);
  1057. len += scnprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
  1058. for (i = 0; i < log_len; i++)
  1059. len += scnprintf(buf + len, PAGE_SIZE - len,
  1060. "\n%08X%08X%08X",
  1061. log[i].time, log[i].event, log[i].data);
  1062. len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
  1063. kfree(log);
  1064. return len;
  1065. }
  1066. static DEVICE_ATTR_RO(event_log);
  1067. static ssize_t error_show(struct device *d,
  1068. struct device_attribute *attr, char *buf)
  1069. {
  1070. struct ipw_priv *priv = dev_get_drvdata(d);
  1071. u32 len = 0, i;
  1072. if (!priv->error)
  1073. return 0;
  1074. len += scnprintf(buf + len, PAGE_SIZE - len,
  1075. "%08lX%08X%08X%08X",
  1076. priv->error->jiffies,
  1077. priv->error->status,
  1078. priv->error->config, priv->error->elem_len);
  1079. for (i = 0; i < priv->error->elem_len; i++)
  1080. len += scnprintf(buf + len, PAGE_SIZE - len,
  1081. "\n%08X%08X%08X%08X%08X%08X%08X",
  1082. priv->error->elem[i].time,
  1083. priv->error->elem[i].desc,
  1084. priv->error->elem[i].blink1,
  1085. priv->error->elem[i].blink2,
  1086. priv->error->elem[i].link1,
  1087. priv->error->elem[i].link2,
  1088. priv->error->elem[i].data);
  1089. len += scnprintf(buf + len, PAGE_SIZE - len,
  1090. "\n%08X", priv->error->log_len);
  1091. for (i = 0; i < priv->error->log_len; i++)
  1092. len += scnprintf(buf + len, PAGE_SIZE - len,
  1093. "\n%08X%08X%08X",
  1094. priv->error->log[i].time,
  1095. priv->error->log[i].event,
  1096. priv->error->log[i].data);
  1097. len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
  1098. return len;
  1099. }
  1100. static ssize_t error_store(struct device *d,
  1101. struct device_attribute *attr,
  1102. const char *buf, size_t count)
  1103. {
  1104. struct ipw_priv *priv = dev_get_drvdata(d);
  1105. kfree(priv->error);
  1106. priv->error = NULL;
  1107. return count;
  1108. }
  1109. static DEVICE_ATTR_RW(error);
  1110. static ssize_t cmd_log_show(struct device *d,
  1111. struct device_attribute *attr, char *buf)
  1112. {
  1113. struct ipw_priv *priv = dev_get_drvdata(d);
  1114. u32 len = 0, i;
  1115. if (!priv->cmdlog)
  1116. return 0;
  1117. for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
  1118. (i != priv->cmdlog_pos) && (len < PAGE_SIZE);
  1119. i = (i + 1) % priv->cmdlog_len) {
  1120. len +=
  1121. scnprintf(buf + len, PAGE_SIZE - len,
  1122. "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
  1123. priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
  1124. priv->cmdlog[i].cmd.len);
  1125. len +=
  1126. snprintk_buf(buf + len, PAGE_SIZE - len,
  1127. (u8 *) priv->cmdlog[i].cmd.param,
  1128. priv->cmdlog[i].cmd.len);
  1129. len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
  1130. }
  1131. len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
  1132. return len;
  1133. }
  1134. static DEVICE_ATTR_RO(cmd_log);
  1135. #ifdef CONFIG_IPW2200_PROMISCUOUS
  1136. static void ipw_prom_free(struct ipw_priv *priv);
  1137. static int ipw_prom_alloc(struct ipw_priv *priv);
  1138. static ssize_t rtap_iface_store(struct device *d,
  1139. struct device_attribute *attr,
  1140. const char *buf, size_t count)
  1141. {
  1142. struct ipw_priv *priv = dev_get_drvdata(d);
  1143. int rc = 0;
  1144. if (count < 1)
  1145. return -EINVAL;
  1146. switch (buf[0]) {
  1147. case '0':
  1148. if (!rtap_iface)
  1149. return count;
  1150. if (netif_running(priv->prom_net_dev)) {
  1151. IPW_WARNING("Interface is up. Cannot unregister.\n");
  1152. return count;
  1153. }
  1154. ipw_prom_free(priv);
  1155. rtap_iface = 0;
  1156. break;
  1157. case '1':
  1158. if (rtap_iface)
  1159. return count;
  1160. rc = ipw_prom_alloc(priv);
  1161. if (!rc)
  1162. rtap_iface = 1;
  1163. break;
  1164. default:
  1165. return -EINVAL;
  1166. }
  1167. if (rc) {
  1168. IPW_ERROR("Failed to register promiscuous network "
  1169. "device (error %d).\n", rc);
  1170. }
  1171. return count;
  1172. }
  1173. static ssize_t rtap_iface_show(struct device *d,
  1174. struct device_attribute *attr,
  1175. char *buf)
  1176. {
  1177. struct ipw_priv *priv = dev_get_drvdata(d);
  1178. if (rtap_iface)
  1179. return sprintf(buf, "%s", priv->prom_net_dev->name);
  1180. else {
  1181. buf[0] = '-';
  1182. buf[1] = '1';
  1183. buf[2] = '\0';
  1184. return 3;
  1185. }
  1186. }
  1187. static DEVICE_ATTR_ADMIN_RW(rtap_iface);
  1188. static ssize_t rtap_filter_store(struct device *d,
  1189. struct device_attribute *attr,
  1190. const char *buf, size_t count)
  1191. {
  1192. struct ipw_priv *priv = dev_get_drvdata(d);
  1193. if (!priv->prom_priv) {
  1194. IPW_ERROR("Attempting to set filter without "
  1195. "rtap_iface enabled.\n");
  1196. return -EPERM;
  1197. }
  1198. priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
  1199. IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
  1200. BIT_ARG16(priv->prom_priv->filter));
  1201. return count;
  1202. }
  1203. static ssize_t rtap_filter_show(struct device *d,
  1204. struct device_attribute *attr,
  1205. char *buf)
  1206. {
  1207. struct ipw_priv *priv = dev_get_drvdata(d);
  1208. return sprintf(buf, "0x%04X",
  1209. priv->prom_priv ? priv->prom_priv->filter : 0);
  1210. }
  1211. static DEVICE_ATTR_ADMIN_RW(rtap_filter);
  1212. #endif
  1213. static ssize_t scan_age_show(struct device *d, struct device_attribute *attr,
  1214. char *buf)
  1215. {
  1216. struct ipw_priv *priv = dev_get_drvdata(d);
  1217. return sprintf(buf, "%d\n", priv->ieee->scan_age);
  1218. }
  1219. static ssize_t scan_age_store(struct device *d, struct device_attribute *attr,
  1220. const char *buf, size_t count)
  1221. {
  1222. struct ipw_priv *priv = dev_get_drvdata(d);
  1223. struct net_device *dev = priv->net_dev;
  1224. char buffer[] = "00000000";
  1225. unsigned long len =
  1226. (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
  1227. unsigned long val;
  1228. char *p = buffer;
  1229. IPW_DEBUG_INFO("enter\n");
  1230. strncpy(buffer, buf, len);
  1231. buffer[len] = 0;
  1232. if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
  1233. p++;
  1234. if (p[0] == 'x' || p[0] == 'X')
  1235. p++;
  1236. val = simple_strtoul(p, &p, 16);
  1237. } else
  1238. val = simple_strtoul(p, &p, 10);
  1239. if (p == buffer) {
  1240. IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
  1241. } else {
  1242. priv->ieee->scan_age = val;
  1243. IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
  1244. }
  1245. IPW_DEBUG_INFO("exit\n");
  1246. return len;
  1247. }
  1248. static DEVICE_ATTR_RW(scan_age);
  1249. static ssize_t led_show(struct device *d, struct device_attribute *attr,
  1250. char *buf)
  1251. {
  1252. struct ipw_priv *priv = dev_get_drvdata(d);
  1253. return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
  1254. }
  1255. static ssize_t led_store(struct device *d, struct device_attribute *attr,
  1256. const char *buf, size_t count)
  1257. {
  1258. struct ipw_priv *priv = dev_get_drvdata(d);
  1259. IPW_DEBUG_INFO("enter\n");
  1260. if (count == 0)
  1261. return 0;
  1262. if (*buf == 0) {
  1263. IPW_DEBUG_LED("Disabling LED control.\n");
  1264. priv->config |= CFG_NO_LED;
  1265. ipw_led_shutdown(priv);
  1266. } else {
  1267. IPW_DEBUG_LED("Enabling LED control.\n");
  1268. priv->config &= ~CFG_NO_LED;
  1269. ipw_led_init(priv);
  1270. }
  1271. IPW_DEBUG_INFO("exit\n");
  1272. return count;
  1273. }
  1274. static DEVICE_ATTR_RW(led);
  1275. static ssize_t status_show(struct device *d,
  1276. struct device_attribute *attr, char *buf)
  1277. {
  1278. struct ipw_priv *p = dev_get_drvdata(d);
  1279. return sprintf(buf, "0x%08x\n", (int)p->status);
  1280. }
  1281. static DEVICE_ATTR_RO(status);
  1282. static ssize_t cfg_show(struct device *d, struct device_attribute *attr,
  1283. char *buf)
  1284. {
  1285. struct ipw_priv *p = dev_get_drvdata(d);
  1286. return sprintf(buf, "0x%08x\n", (int)p->config);
  1287. }
  1288. static DEVICE_ATTR_RO(cfg);
  1289. static ssize_t nic_type_show(struct device *d,
  1290. struct device_attribute *attr, char *buf)
  1291. {
  1292. struct ipw_priv *priv = dev_get_drvdata(d);
  1293. return sprintf(buf, "TYPE: %d\n", priv->nic_type);
  1294. }
  1295. static DEVICE_ATTR_RO(nic_type);
  1296. static ssize_t ucode_version_show(struct device *d,
  1297. struct device_attribute *attr, char *buf)
  1298. {
  1299. u32 len = sizeof(u32), tmp = 0;
  1300. struct ipw_priv *p = dev_get_drvdata(d);
  1301. if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
  1302. return 0;
  1303. return sprintf(buf, "0x%08x\n", tmp);
  1304. }
  1305. static DEVICE_ATTR_RO(ucode_version);
  1306. static ssize_t rtc_show(struct device *d, struct device_attribute *attr,
  1307. char *buf)
  1308. {
  1309. u32 len = sizeof(u32), tmp = 0;
  1310. struct ipw_priv *p = dev_get_drvdata(d);
  1311. if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
  1312. return 0;
  1313. return sprintf(buf, "0x%08x\n", tmp);
  1314. }
  1315. static DEVICE_ATTR_RO(rtc);
  1316. /*
  1317. * Add a device attribute to view/control the delay between eeprom
  1318. * operations.
  1319. */
  1320. static ssize_t eeprom_delay_show(struct device *d,
  1321. struct device_attribute *attr, char *buf)
  1322. {
  1323. struct ipw_priv *p = dev_get_drvdata(d);
  1324. int n = p->eeprom_delay;
  1325. return sprintf(buf, "%i\n", n);
  1326. }
  1327. static ssize_t eeprom_delay_store(struct device *d,
  1328. struct device_attribute *attr,
  1329. const char *buf, size_t count)
  1330. {
  1331. struct ipw_priv *p = dev_get_drvdata(d);
  1332. sscanf(buf, "%i", &p->eeprom_delay);
  1333. return strnlen(buf, count);
  1334. }
  1335. static DEVICE_ATTR_RW(eeprom_delay);
  1336. static ssize_t command_event_reg_show(struct device *d,
  1337. struct device_attribute *attr, char *buf)
  1338. {
  1339. u32 reg = 0;
  1340. struct ipw_priv *p = dev_get_drvdata(d);
  1341. reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
  1342. return sprintf(buf, "0x%08x\n", reg);
  1343. }
  1344. static ssize_t command_event_reg_store(struct device *d,
  1345. struct device_attribute *attr,
  1346. const char *buf, size_t count)
  1347. {
  1348. u32 reg;
  1349. struct ipw_priv *p = dev_get_drvdata(d);
  1350. sscanf(buf, "%x", &reg);
  1351. ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
  1352. return strnlen(buf, count);
  1353. }
  1354. static DEVICE_ATTR_RW(command_event_reg);
  1355. static ssize_t mem_gpio_reg_show(struct device *d,
  1356. struct device_attribute *attr, char *buf)
  1357. {
  1358. u32 reg = 0;
  1359. struct ipw_priv *p = dev_get_drvdata(d);
  1360. reg = ipw_read_reg32(p, 0x301100);
  1361. return sprintf(buf, "0x%08x\n", reg);
  1362. }
  1363. static ssize_t mem_gpio_reg_store(struct device *d,
  1364. struct device_attribute *attr,
  1365. const char *buf, size_t count)
  1366. {
  1367. u32 reg;
  1368. struct ipw_priv *p = dev_get_drvdata(d);
  1369. sscanf(buf, "%x", &reg);
  1370. ipw_write_reg32(p, 0x301100, reg);
  1371. return strnlen(buf, count);
  1372. }
  1373. static DEVICE_ATTR_RW(mem_gpio_reg);
  1374. static ssize_t indirect_dword_show(struct device *d,
  1375. struct device_attribute *attr, char *buf)
  1376. {
  1377. u32 reg = 0;
  1378. struct ipw_priv *priv = dev_get_drvdata(d);
  1379. if (priv->status & STATUS_INDIRECT_DWORD)
  1380. reg = ipw_read_reg32(priv, priv->indirect_dword);
  1381. else
  1382. reg = 0;
  1383. return sprintf(buf, "0x%08x\n", reg);
  1384. }
  1385. static ssize_t indirect_dword_store(struct device *d,
  1386. struct device_attribute *attr,
  1387. const char *buf, size_t count)
  1388. {
  1389. struct ipw_priv *priv = dev_get_drvdata(d);
  1390. sscanf(buf, "%x", &priv->indirect_dword);
  1391. priv->status |= STATUS_INDIRECT_DWORD;
  1392. return strnlen(buf, count);
  1393. }
  1394. static DEVICE_ATTR_RW(indirect_dword);
  1395. static ssize_t indirect_byte_show(struct device *d,
  1396. struct device_attribute *attr, char *buf)
  1397. {
  1398. u8 reg = 0;
  1399. struct ipw_priv *priv = dev_get_drvdata(d);
  1400. if (priv->status & STATUS_INDIRECT_BYTE)
  1401. reg = ipw_read_reg8(priv, priv->indirect_byte);
  1402. else
  1403. reg = 0;
  1404. return sprintf(buf, "0x%02x\n", reg);
  1405. }
  1406. static ssize_t indirect_byte_store(struct device *d,
  1407. struct device_attribute *attr,
  1408. const char *buf, size_t count)
  1409. {
  1410. struct ipw_priv *priv = dev_get_drvdata(d);
  1411. sscanf(buf, "%x", &priv->indirect_byte);
  1412. priv->status |= STATUS_INDIRECT_BYTE;
  1413. return strnlen(buf, count);
  1414. }
  1415. static DEVICE_ATTR_RW(indirect_byte);
  1416. static ssize_t direct_dword_show(struct device *d,
  1417. struct device_attribute *attr, char *buf)
  1418. {
  1419. u32 reg = 0;
  1420. struct ipw_priv *priv = dev_get_drvdata(d);
  1421. if (priv->status & STATUS_DIRECT_DWORD)
  1422. reg = ipw_read32(priv, priv->direct_dword);
  1423. else
  1424. reg = 0;
  1425. return sprintf(buf, "0x%08x\n", reg);
  1426. }
  1427. static ssize_t direct_dword_store(struct device *d,
  1428. struct device_attribute *attr,
  1429. const char *buf, size_t count)
  1430. {
  1431. struct ipw_priv *priv = dev_get_drvdata(d);
  1432. sscanf(buf, "%x", &priv->direct_dword);
  1433. priv->status |= STATUS_DIRECT_DWORD;
  1434. return strnlen(buf, count);
  1435. }
  1436. static DEVICE_ATTR_RW(direct_dword);
  1437. static int rf_kill_active(struct ipw_priv *priv)
  1438. {
  1439. if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
  1440. priv->status |= STATUS_RF_KILL_HW;
  1441. wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
  1442. } else {
  1443. priv->status &= ~STATUS_RF_KILL_HW;
  1444. wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
  1445. }
  1446. return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
  1447. }
  1448. static ssize_t rf_kill_show(struct device *d, struct device_attribute *attr,
  1449. char *buf)
  1450. {
  1451. /* 0 - RF kill not enabled
  1452. 1 - SW based RF kill active (sysfs)
  1453. 2 - HW based RF kill active
  1454. 3 - Both HW and SW baed RF kill active */
  1455. struct ipw_priv *priv = dev_get_drvdata(d);
  1456. int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
  1457. (rf_kill_active(priv) ? 0x2 : 0x0);
  1458. return sprintf(buf, "%i\n", val);
  1459. }
  1460. static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
  1461. {
  1462. if ((disable_radio ? 1 : 0) ==
  1463. ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
  1464. return 0;
  1465. IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
  1466. disable_radio ? "OFF" : "ON");
  1467. if (disable_radio) {
  1468. priv->status |= STATUS_RF_KILL_SW;
  1469. cancel_delayed_work(&priv->request_scan);
  1470. cancel_delayed_work(&priv->request_direct_scan);
  1471. cancel_delayed_work(&priv->request_passive_scan);
  1472. cancel_delayed_work(&priv->scan_event);
  1473. schedule_work(&priv->down);
  1474. } else {
  1475. priv->status &= ~STATUS_RF_KILL_SW;
  1476. if (rf_kill_active(priv)) {
  1477. IPW_DEBUG_RF_KILL("Can not turn radio back on - "
  1478. "disabled by HW switch\n");
  1479. /* Make sure the RF_KILL check timer is running */
  1480. cancel_delayed_work(&priv->rf_kill);
  1481. schedule_delayed_work(&priv->rf_kill,
  1482. round_jiffies_relative(2 * HZ));
  1483. } else
  1484. schedule_work(&priv->up);
  1485. }
  1486. return 1;
  1487. }
  1488. static ssize_t rf_kill_store(struct device *d, struct device_attribute *attr,
  1489. const char *buf, size_t count)
  1490. {
  1491. struct ipw_priv *priv = dev_get_drvdata(d);
  1492. ipw_radio_kill_sw(priv, buf[0] == '1');
  1493. return count;
  1494. }
  1495. static DEVICE_ATTR_RW(rf_kill);
  1496. static ssize_t speed_scan_show(struct device *d, struct device_attribute *attr,
  1497. char *buf)
  1498. {
  1499. struct ipw_priv *priv = dev_get_drvdata(d);
  1500. int pos = 0, len = 0;
  1501. if (priv->config & CFG_SPEED_SCAN) {
  1502. while (priv->speed_scan[pos] != 0)
  1503. len += sprintf(&buf[len], "%d ",
  1504. priv->speed_scan[pos++]);
  1505. return len + sprintf(&buf[len], "\n");
  1506. }
  1507. return sprintf(buf, "0\n");
  1508. }
  1509. static ssize_t speed_scan_store(struct device *d, struct device_attribute *attr,
  1510. const char *buf, size_t count)
  1511. {
  1512. struct ipw_priv *priv = dev_get_drvdata(d);
  1513. int channel, pos = 0;
  1514. const char *p = buf;
  1515. /* list of space separated channels to scan, optionally ending with 0 */
  1516. while ((channel = simple_strtol(p, NULL, 0))) {
  1517. if (pos == MAX_SPEED_SCAN - 1) {
  1518. priv->speed_scan[pos] = 0;
  1519. break;
  1520. }
  1521. if (libipw_is_valid_channel(priv->ieee, channel))
  1522. priv->speed_scan[pos++] = channel;
  1523. else
  1524. IPW_WARNING("Skipping invalid channel request: %d\n",
  1525. channel);
  1526. p = strchr(p, ' ');
  1527. if (!p)
  1528. break;
  1529. while (*p == ' ' || *p == '\t')
  1530. p++;
  1531. }
  1532. if (pos == 0)
  1533. priv->config &= ~CFG_SPEED_SCAN;
  1534. else {
  1535. priv->speed_scan_pos = 0;
  1536. priv->config |= CFG_SPEED_SCAN;
  1537. }
  1538. return count;
  1539. }
  1540. static DEVICE_ATTR_RW(speed_scan);
  1541. static ssize_t net_stats_show(struct device *d, struct device_attribute *attr,
  1542. char *buf)
  1543. {
  1544. struct ipw_priv *priv = dev_get_drvdata(d);
  1545. return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
  1546. }
  1547. static ssize_t net_stats_store(struct device *d, struct device_attribute *attr,
  1548. const char *buf, size_t count)
  1549. {
  1550. struct ipw_priv *priv = dev_get_drvdata(d);
  1551. if (buf[0] == '1')
  1552. priv->config |= CFG_NET_STATS;
  1553. else
  1554. priv->config &= ~CFG_NET_STATS;
  1555. return count;
  1556. }
  1557. static DEVICE_ATTR_RW(net_stats);
  1558. static ssize_t channels_show(struct device *d,
  1559. struct device_attribute *attr,
  1560. char *buf)
  1561. {
  1562. struct ipw_priv *priv = dev_get_drvdata(d);
  1563. const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
  1564. int len = 0, i;
  1565. len = sprintf(&buf[len],
  1566. "Displaying %d channels in 2.4Ghz band "
  1567. "(802.11bg):\n", geo->bg_channels);
  1568. for (i = 0; i < geo->bg_channels; i++) {
  1569. len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
  1570. geo->bg[i].channel,
  1571. geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
  1572. " (radar spectrum)" : "",
  1573. ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
  1574. (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
  1575. ? "" : ", IBSS",
  1576. geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
  1577. "passive only" : "active/passive",
  1578. geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
  1579. "B" : "B/G");
  1580. }
  1581. len += sprintf(&buf[len],
  1582. "Displaying %d channels in 5.2Ghz band "
  1583. "(802.11a):\n", geo->a_channels);
  1584. for (i = 0; i < geo->a_channels; i++) {
  1585. len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
  1586. geo->a[i].channel,
  1587. geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
  1588. " (radar spectrum)" : "",
  1589. ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
  1590. (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
  1591. ? "" : ", IBSS",
  1592. geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
  1593. "passive only" : "active/passive");
  1594. }
  1595. return len;
  1596. }
  1597. static DEVICE_ATTR_ADMIN_RO(channels);
  1598. static void notify_wx_assoc_event(struct ipw_priv *priv)
  1599. {
  1600. union iwreq_data wrqu;
  1601. wrqu.ap_addr.sa_family = ARPHRD_ETHER;
  1602. if (priv->status & STATUS_ASSOCIATED)
  1603. memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
  1604. else
  1605. eth_zero_addr(wrqu.ap_addr.sa_data);
  1606. wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
  1607. }
  1608. static void ipw_irq_tasklet(struct tasklet_struct *t)
  1609. {
  1610. struct ipw_priv *priv = from_tasklet(priv, t, irq_tasklet);
  1611. u32 inta, inta_mask, handled = 0;
  1612. unsigned long flags;
  1613. spin_lock_irqsave(&priv->irq_lock, flags);
  1614. inta = ipw_read32(priv, IPW_INTA_RW);
  1615. inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
  1616. if (inta == 0xFFFFFFFF) {
  1617. /* Hardware disappeared */
  1618. IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
  1619. /* Only handle the cached INTA values */
  1620. inta = 0;
  1621. }
  1622. inta &= (IPW_INTA_MASK_ALL & inta_mask);
  1623. /* Add any cached INTA values that need to be handled */
  1624. inta |= priv->isr_inta;
  1625. spin_unlock_irqrestore(&priv->irq_lock, flags);
  1626. spin_lock_irqsave(&priv->lock, flags);
  1627. /* handle all the justifications for the interrupt */
  1628. if (inta & IPW_INTA_BIT_RX_TRANSFER) {
  1629. ipw_rx(priv);
  1630. handled |= IPW_INTA_BIT_RX_TRANSFER;
  1631. }
  1632. if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
  1633. IPW_DEBUG_HC("Command completed.\n");
  1634. ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
  1635. priv->status &= ~STATUS_HCMD_ACTIVE;
  1636. wake_up_interruptible(&priv->wait_command_queue);
  1637. handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
  1638. }
  1639. if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
  1640. IPW_DEBUG_TX("TX_QUEUE_1\n");
  1641. ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
  1642. handled |= IPW_INTA_BIT_TX_QUEUE_1;
  1643. }
  1644. if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
  1645. IPW_DEBUG_TX("TX_QUEUE_2\n");
  1646. ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
  1647. handled |= IPW_INTA_BIT_TX_QUEUE_2;
  1648. }
  1649. if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
  1650. IPW_DEBUG_TX("TX_QUEUE_3\n");
  1651. ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
  1652. handled |= IPW_INTA_BIT_TX_QUEUE_3;
  1653. }
  1654. if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
  1655. IPW_DEBUG_TX("TX_QUEUE_4\n");
  1656. ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
  1657. handled |= IPW_INTA_BIT_TX_QUEUE_4;
  1658. }
  1659. if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
  1660. IPW_WARNING("STATUS_CHANGE\n");
  1661. handled |= IPW_INTA_BIT_STATUS_CHANGE;
  1662. }
  1663. if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
  1664. IPW_WARNING("TX_PERIOD_EXPIRED\n");
  1665. handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
  1666. }
  1667. if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
  1668. IPW_WARNING("HOST_CMD_DONE\n");
  1669. handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
  1670. }
  1671. if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
  1672. IPW_WARNING("FW_INITIALIZATION_DONE\n");
  1673. handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
  1674. }
  1675. if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
  1676. IPW_WARNING("PHY_OFF_DONE\n");
  1677. handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
  1678. }
  1679. if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
  1680. IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
  1681. priv->status |= STATUS_RF_KILL_HW;
  1682. wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
  1683. wake_up_interruptible(&priv->wait_command_queue);
  1684. priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
  1685. cancel_delayed_work(&priv->request_scan);
  1686. cancel_delayed_work(&priv->request_direct_scan);
  1687. cancel_delayed_work(&priv->request_passive_scan);
  1688. cancel_delayed_work(&priv->scan_event);
  1689. schedule_work(&priv->link_down);
  1690. schedule_delayed_work(&priv->rf_kill, 2 * HZ);
  1691. handled |= IPW_INTA_BIT_RF_KILL_DONE;
  1692. }
  1693. if (inta & IPW_INTA_BIT_FATAL_ERROR) {
  1694. IPW_WARNING("Firmware error detected. Restarting.\n");
  1695. if (priv->error) {
  1696. IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
  1697. if (ipw_debug_level & IPW_DL_FW_ERRORS) {
  1698. struct ipw_fw_error *error =
  1699. ipw_alloc_error_log(priv);
  1700. ipw_dump_error_log(priv, error);
  1701. kfree(error);
  1702. }
  1703. } else {
  1704. priv->error = ipw_alloc_error_log(priv);
  1705. if (priv->error)
  1706. IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
  1707. else
  1708. IPW_DEBUG_FW("Error allocating sysfs 'error' "
  1709. "log.\n");
  1710. if (ipw_debug_level & IPW_DL_FW_ERRORS)
  1711. ipw_dump_error_log(priv, priv->error);
  1712. }
  1713. /* XXX: If hardware encryption is for WPA/WPA2,
  1714. * we have to notify the supplicant. */
  1715. if (priv->ieee->sec.encrypt) {
  1716. priv->status &= ~STATUS_ASSOCIATED;
  1717. notify_wx_assoc_event(priv);
  1718. }
  1719. /* Keep the restart process from trying to send host
  1720. * commands by clearing the INIT status bit */
  1721. priv->status &= ~STATUS_INIT;
  1722. /* Cancel currently queued command. */
  1723. priv->status &= ~STATUS_HCMD_ACTIVE;
  1724. wake_up_interruptible(&priv->wait_command_queue);
  1725. schedule_work(&priv->adapter_restart);
  1726. handled |= IPW_INTA_BIT_FATAL_ERROR;
  1727. }
  1728. if (inta & IPW_INTA_BIT_PARITY_ERROR) {
  1729. IPW_ERROR("Parity error\n");
  1730. handled |= IPW_INTA_BIT_PARITY_ERROR;
  1731. }
  1732. if (handled != inta) {
  1733. IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
  1734. }
  1735. spin_unlock_irqrestore(&priv->lock, flags);
  1736. /* enable all interrupts */
  1737. ipw_enable_interrupts(priv);
  1738. }
  1739. #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
  1740. static char *get_cmd_string(u8 cmd)
  1741. {
  1742. switch (cmd) {
  1743. IPW_CMD(HOST_COMPLETE);
  1744. IPW_CMD(POWER_DOWN);
  1745. IPW_CMD(SYSTEM_CONFIG);
  1746. IPW_CMD(MULTICAST_ADDRESS);
  1747. IPW_CMD(SSID);
  1748. IPW_CMD(ADAPTER_ADDRESS);
  1749. IPW_CMD(PORT_TYPE);
  1750. IPW_CMD(RTS_THRESHOLD);
  1751. IPW_CMD(FRAG_THRESHOLD);
  1752. IPW_CMD(POWER_MODE);
  1753. IPW_CMD(WEP_KEY);
  1754. IPW_CMD(TGI_TX_KEY);
  1755. IPW_CMD(SCAN_REQUEST);
  1756. IPW_CMD(SCAN_REQUEST_EXT);
  1757. IPW_CMD(ASSOCIATE);
  1758. IPW_CMD(SUPPORTED_RATES);
  1759. IPW_CMD(SCAN_ABORT);
  1760. IPW_CMD(TX_FLUSH);
  1761. IPW_CMD(QOS_PARAMETERS);
  1762. IPW_CMD(DINO_CONFIG);
  1763. IPW_CMD(RSN_CAPABILITIES);
  1764. IPW_CMD(RX_KEY);
  1765. IPW_CMD(CARD_DISABLE);
  1766. IPW_CMD(SEED_NUMBER);
  1767. IPW_CMD(TX_POWER);
  1768. IPW_CMD(COUNTRY_INFO);
  1769. IPW_CMD(AIRONET_INFO);
  1770. IPW_CMD(AP_TX_POWER);
  1771. IPW_CMD(CCKM_INFO);
  1772. IPW_CMD(CCX_VER_INFO);
  1773. IPW_CMD(SET_CALIBRATION);
  1774. IPW_CMD(SENSITIVITY_CALIB);
  1775. IPW_CMD(RETRY_LIMIT);
  1776. IPW_CMD(IPW_PRE_POWER_DOWN);
  1777. IPW_CMD(VAP_BEACON_TEMPLATE);
  1778. IPW_CMD(VAP_DTIM_PERIOD);
  1779. IPW_CMD(EXT_SUPPORTED_RATES);
  1780. IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
  1781. IPW_CMD(VAP_QUIET_INTERVALS);
  1782. IPW_CMD(VAP_CHANNEL_SWITCH);
  1783. IPW_CMD(VAP_MANDATORY_CHANNELS);
  1784. IPW_CMD(VAP_CELL_PWR_LIMIT);
  1785. IPW_CMD(VAP_CF_PARAM_SET);
  1786. IPW_CMD(VAP_SET_BEACONING_STATE);
  1787. IPW_CMD(MEASUREMENT);
  1788. IPW_CMD(POWER_CAPABILITY);
  1789. IPW_CMD(SUPPORTED_CHANNELS);
  1790. IPW_CMD(TPC_REPORT);
  1791. IPW_CMD(WME_INFO);
  1792. IPW_CMD(PRODUCTION_COMMAND);
  1793. default:
  1794. return "UNKNOWN";
  1795. }
  1796. }
  1797. #define HOST_COMPLETE_TIMEOUT HZ
  1798. static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
  1799. {
  1800. int rc = 0;
  1801. unsigned long flags;
  1802. unsigned long now, end;
  1803. spin_lock_irqsave(&priv->lock, flags);
  1804. if (priv->status & STATUS_HCMD_ACTIVE) {
  1805. IPW_ERROR("Failed to send %s: Already sending a command.\n",
  1806. get_cmd_string(cmd->cmd));
  1807. spin_unlock_irqrestore(&priv->lock, flags);
  1808. return -EAGAIN;
  1809. }
  1810. priv->status |= STATUS_HCMD_ACTIVE;
  1811. if (priv->cmdlog) {
  1812. priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
  1813. priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
  1814. priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
  1815. memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
  1816. cmd->len);
  1817. priv->cmdlog[priv->cmdlog_pos].retcode = -1;
  1818. }
  1819. IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
  1820. get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
  1821. priv->status);
  1822. #ifndef DEBUG_CMD_WEP_KEY
  1823. if (cmd->cmd == IPW_CMD_WEP_KEY)
  1824. IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
  1825. else
  1826. #endif
  1827. printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
  1828. rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
  1829. if (rc) {
  1830. priv->status &= ~STATUS_HCMD_ACTIVE;
  1831. IPW_ERROR("Failed to send %s: Reason %d\n",
  1832. get_cmd_string(cmd->cmd), rc);
  1833. spin_unlock_irqrestore(&priv->lock, flags);
  1834. goto exit;
  1835. }
  1836. spin_unlock_irqrestore(&priv->lock, flags);
  1837. now = jiffies;
  1838. end = now + HOST_COMPLETE_TIMEOUT;
  1839. again:
  1840. rc = wait_event_interruptible_timeout(priv->wait_command_queue,
  1841. !(priv->
  1842. status & STATUS_HCMD_ACTIVE),
  1843. end - now);
  1844. if (rc < 0) {
  1845. now = jiffies;
  1846. if (time_before(now, end))
  1847. goto again;
  1848. rc = 0;
  1849. }
  1850. if (rc == 0) {
  1851. spin_lock_irqsave(&priv->lock, flags);
  1852. if (priv->status & STATUS_HCMD_ACTIVE) {
  1853. IPW_ERROR("Failed to send %s: Command timed out.\n",
  1854. get_cmd_string(cmd->cmd));
  1855. priv->status &= ~STATUS_HCMD_ACTIVE;
  1856. spin_unlock_irqrestore(&priv->lock, flags);
  1857. rc = -EIO;
  1858. goto exit;
  1859. }
  1860. spin_unlock_irqrestore(&priv->lock, flags);
  1861. } else
  1862. rc = 0;
  1863. if (priv->status & STATUS_RF_KILL_HW) {
  1864. IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
  1865. get_cmd_string(cmd->cmd));
  1866. rc = -EIO;
  1867. goto exit;
  1868. }
  1869. exit:
  1870. if (priv->cmdlog) {
  1871. priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
  1872. priv->cmdlog_pos %= priv->cmdlog_len;
  1873. }
  1874. return rc;
  1875. }
  1876. static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
  1877. {
  1878. struct host_cmd cmd = {
  1879. .cmd = command,
  1880. };
  1881. return __ipw_send_cmd(priv, &cmd);
  1882. }
  1883. static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
  1884. const void *data)
  1885. {
  1886. struct host_cmd cmd = {
  1887. .cmd = command,
  1888. .len = len,
  1889. .param = data,
  1890. };
  1891. return __ipw_send_cmd(priv, &cmd);
  1892. }
  1893. static int ipw_send_host_complete(struct ipw_priv *priv)
  1894. {
  1895. if (!priv) {
  1896. IPW_ERROR("Invalid args\n");
  1897. return -1;
  1898. }
  1899. return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
  1900. }
  1901. static int ipw_send_system_config(struct ipw_priv *priv)
  1902. {
  1903. return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
  1904. sizeof(priv->sys_config),
  1905. &priv->sys_config);
  1906. }
  1907. static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
  1908. {
  1909. if (!priv || !ssid) {
  1910. IPW_ERROR("Invalid args\n");
  1911. return -1;
  1912. }
  1913. return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
  1914. ssid);
  1915. }
  1916. static int ipw_send_adapter_address(struct ipw_priv *priv, const u8 * mac)
  1917. {
  1918. if (!priv || !mac) {
  1919. IPW_ERROR("Invalid args\n");
  1920. return -1;
  1921. }
  1922. IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
  1923. priv->net_dev->name, mac);
  1924. return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
  1925. }
  1926. static void ipw_adapter_restart(void *adapter)
  1927. {
  1928. struct ipw_priv *priv = adapter;
  1929. if (priv->status & STATUS_RF_KILL_MASK)
  1930. return;
  1931. ipw_down(priv);
  1932. if (priv->assoc_network &&
  1933. (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
  1934. ipw_remove_current_network(priv);
  1935. if (ipw_up(priv)) {
  1936. IPW_ERROR("Failed to up device\n");
  1937. return;
  1938. }
  1939. }
  1940. static void ipw_bg_adapter_restart(struct work_struct *work)
  1941. {
  1942. struct ipw_priv *priv =
  1943. container_of(work, struct ipw_priv, adapter_restart);
  1944. mutex_lock(&priv->mutex);
  1945. ipw_adapter_restart(priv);
  1946. mutex_unlock(&priv->mutex);
  1947. }
  1948. static void ipw_abort_scan(struct ipw_priv *priv);
  1949. #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
  1950. static void ipw_scan_check(void *data)
  1951. {
  1952. struct ipw_priv *priv = data;
  1953. if (priv->status & STATUS_SCAN_ABORTING) {
  1954. IPW_DEBUG_SCAN("Scan completion watchdog resetting "
  1955. "adapter after (%dms).\n",
  1956. jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
  1957. schedule_work(&priv->adapter_restart);
  1958. } else if (priv->status & STATUS_SCANNING) {
  1959. IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
  1960. "after (%dms).\n",
  1961. jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
  1962. ipw_abort_scan(priv);
  1963. schedule_delayed_work(&priv->scan_check, HZ);
  1964. }
  1965. }
  1966. static void ipw_bg_scan_check(struct work_struct *work)
  1967. {
  1968. struct ipw_priv *priv =
  1969. container_of(work, struct ipw_priv, scan_check.work);
  1970. mutex_lock(&priv->mutex);
  1971. ipw_scan_check(priv);
  1972. mutex_unlock(&priv->mutex);
  1973. }
  1974. static int ipw_send_scan_request_ext(struct ipw_priv *priv,
  1975. struct ipw_scan_request_ext *request)
  1976. {
  1977. return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
  1978. sizeof(*request), request);
  1979. }
  1980. static int ipw_send_scan_abort(struct ipw_priv *priv)
  1981. {
  1982. if (!priv) {
  1983. IPW_ERROR("Invalid args\n");
  1984. return -1;
  1985. }
  1986. return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
  1987. }
  1988. static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
  1989. {
  1990. struct ipw_sensitivity_calib calib = {
  1991. .beacon_rssi_raw = cpu_to_le16(sens),
  1992. };
  1993. return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
  1994. &calib);
  1995. }
  1996. static int ipw_send_associate(struct ipw_priv *priv,
  1997. struct ipw_associate *associate)
  1998. {
  1999. if (!priv || !associate) {
  2000. IPW_ERROR("Invalid args\n");
  2001. return -1;
  2002. }
  2003. return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
  2004. associate);
  2005. }
  2006. static int ipw_send_supported_rates(struct ipw_priv *priv,
  2007. struct ipw_supported_rates *rates)
  2008. {
  2009. if (!priv || !rates) {
  2010. IPW_ERROR("Invalid args\n");
  2011. return -1;
  2012. }
  2013. return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
  2014. rates);
  2015. }
  2016. static int ipw_set_random_seed(struct ipw_priv *priv)
  2017. {
  2018. u32 val;
  2019. if (!priv) {
  2020. IPW_ERROR("Invalid args\n");
  2021. return -1;
  2022. }
  2023. get_random_bytes(&val, sizeof(val));
  2024. return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
  2025. }
  2026. static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
  2027. {
  2028. __le32 v = cpu_to_le32(phy_off);
  2029. if (!priv) {
  2030. IPW_ERROR("Invalid args\n");
  2031. return -1;
  2032. }
  2033. return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
  2034. }
  2035. static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
  2036. {
  2037. if (!priv || !power) {
  2038. IPW_ERROR("Invalid args\n");
  2039. return -1;
  2040. }
  2041. return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
  2042. }
  2043. static int ipw_set_tx_power(struct ipw_priv *priv)
  2044. {
  2045. const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
  2046. struct ipw_tx_power tx_power;
  2047. s8 max_power;
  2048. int i;
  2049. memset(&tx_power, 0, sizeof(tx_power));
  2050. /* configure device for 'G' band */
  2051. tx_power.ieee_mode = IPW_G_MODE;
  2052. tx_power.num_channels = geo->bg_channels;
  2053. for (i = 0; i < geo->bg_channels; i++) {
  2054. max_power = geo->bg[i].max_power;
  2055. tx_power.channels_tx_power[i].channel_number =
  2056. geo->bg[i].channel;
  2057. tx_power.channels_tx_power[i].tx_power = max_power ?
  2058. min(max_power, priv->tx_power) : priv->tx_power;
  2059. }
  2060. if (ipw_send_tx_power(priv, &tx_power))
  2061. return -EIO;
  2062. /* configure device to also handle 'B' band */
  2063. tx_power.ieee_mode = IPW_B_MODE;
  2064. if (ipw_send_tx_power(priv, &tx_power))
  2065. return -EIO;
  2066. /* configure device to also handle 'A' band */
  2067. if (priv->ieee->abg_true) {
  2068. tx_power.ieee_mode = IPW_A_MODE;
  2069. tx_power.num_channels = geo->a_channels;
  2070. for (i = 0; i < tx_power.num_channels; i++) {
  2071. max_power = geo->a[i].max_power;
  2072. tx_power.channels_tx_power[i].channel_number =
  2073. geo->a[i].channel;
  2074. tx_power.channels_tx_power[i].tx_power = max_power ?
  2075. min(max_power, priv->tx_power) : priv->tx_power;
  2076. }
  2077. if (ipw_send_tx_power(priv, &tx_power))
  2078. return -EIO;
  2079. }
  2080. return 0;
  2081. }
  2082. static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
  2083. {
  2084. struct ipw_rts_threshold rts_threshold = {
  2085. .rts_threshold = cpu_to_le16(rts),
  2086. };
  2087. if (!priv) {
  2088. IPW_ERROR("Invalid args\n");
  2089. return -1;
  2090. }
  2091. return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
  2092. sizeof(rts_threshold), &rts_threshold);
  2093. }
  2094. static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
  2095. {
  2096. struct ipw_frag_threshold frag_threshold = {
  2097. .frag_threshold = cpu_to_le16(frag),
  2098. };
  2099. if (!priv) {
  2100. IPW_ERROR("Invalid args\n");
  2101. return -1;
  2102. }
  2103. return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
  2104. sizeof(frag_threshold), &frag_threshold);
  2105. }
  2106. static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
  2107. {
  2108. __le32 param;
  2109. if (!priv) {
  2110. IPW_ERROR("Invalid args\n");
  2111. return -1;
  2112. }
  2113. /* If on battery, set to 3, if AC set to CAM, else user
  2114. * level */
  2115. switch (mode) {
  2116. case IPW_POWER_BATTERY:
  2117. param = cpu_to_le32(IPW_POWER_INDEX_3);
  2118. break;
  2119. case IPW_POWER_AC:
  2120. param = cpu_to_le32(IPW_POWER_MODE_CAM);
  2121. break;
  2122. default:
  2123. param = cpu_to_le32(mode);
  2124. break;
  2125. }
  2126. return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
  2127. &param);
  2128. }
  2129. static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
  2130. {
  2131. struct ipw_retry_limit retry_limit = {
  2132. .short_retry_limit = slimit,
  2133. .long_retry_limit = llimit
  2134. };
  2135. if (!priv) {
  2136. IPW_ERROR("Invalid args\n");
  2137. return -1;
  2138. }
  2139. return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
  2140. &retry_limit);
  2141. }
  2142. /*
  2143. * The IPW device contains a Microwire compatible EEPROM that stores
  2144. * various data like the MAC address. Usually the firmware has exclusive
  2145. * access to the eeprom, but during device initialization (before the
  2146. * device driver has sent the HostComplete command to the firmware) the
  2147. * device driver has read access to the EEPROM by way of indirect addressing
  2148. * through a couple of memory mapped registers.
  2149. *
  2150. * The following is a simplified implementation for pulling data out of the
  2151. * eeprom, along with some helper functions to find information in
  2152. * the per device private data's copy of the eeprom.
  2153. *
  2154. * NOTE: To better understand how these functions work (i.e what is a chip
  2155. * select and why do have to keep driving the eeprom clock?), read
  2156. * just about any data sheet for a Microwire compatible EEPROM.
  2157. */
  2158. /* write a 32 bit value into the indirect accessor register */
  2159. static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
  2160. {
  2161. ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
  2162. /* the eeprom requires some time to complete the operation */
  2163. udelay(p->eeprom_delay);
  2164. }
  2165. /* perform a chip select operation */
  2166. static void eeprom_cs(struct ipw_priv *priv)
  2167. {
  2168. eeprom_write_reg(priv, 0);
  2169. eeprom_write_reg(priv, EEPROM_BIT_CS);
  2170. eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
  2171. eeprom_write_reg(priv, EEPROM_BIT_CS);
  2172. }
  2173. /* perform a chip select operation */
  2174. static void eeprom_disable_cs(struct ipw_priv *priv)
  2175. {
  2176. eeprom_write_reg(priv, EEPROM_BIT_CS);
  2177. eeprom_write_reg(priv, 0);
  2178. eeprom_write_reg(priv, EEPROM_BIT_SK);
  2179. }
  2180. /* push a single bit down to the eeprom */
  2181. static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
  2182. {
  2183. int d = (bit ? EEPROM_BIT_DI : 0);
  2184. eeprom_write_reg(p, EEPROM_BIT_CS | d);
  2185. eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
  2186. }
  2187. /* push an opcode followed by an address down to the eeprom */
  2188. static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
  2189. {
  2190. int i;
  2191. eeprom_cs(priv);
  2192. eeprom_write_bit(priv, 1);
  2193. eeprom_write_bit(priv, op & 2);
  2194. eeprom_write_bit(priv, op & 1);
  2195. for (i = 7; i >= 0; i--) {
  2196. eeprom_write_bit(priv, addr & (1 << i));
  2197. }
  2198. }
  2199. /* pull 16 bits off the eeprom, one bit at a time */
  2200. static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
  2201. {
  2202. int i;
  2203. u16 r = 0;
  2204. /* Send READ Opcode */
  2205. eeprom_op(priv, EEPROM_CMD_READ, addr);
  2206. /* Send dummy bit */
  2207. eeprom_write_reg(priv, EEPROM_BIT_CS);
  2208. /* Read the byte off the eeprom one bit at a time */
  2209. for (i = 0; i < 16; i++) {
  2210. u32 data = 0;
  2211. eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
  2212. eeprom_write_reg(priv, EEPROM_BIT_CS);
  2213. data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
  2214. r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
  2215. }
  2216. /* Send another dummy bit */
  2217. eeprom_write_reg(priv, 0);
  2218. eeprom_disable_cs(priv);
  2219. return r;
  2220. }
  2221. /* helper function for pulling the mac address out of the private */
  2222. /* data's copy of the eeprom data */
  2223. static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
  2224. {
  2225. memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
  2226. }
  2227. static void ipw_read_eeprom(struct ipw_priv *priv)
  2228. {
  2229. int i;
  2230. __le16 *eeprom = (__le16 *) priv->eeprom;
  2231. IPW_DEBUG_TRACE(">>\n");
  2232. /* read entire contents of eeprom into private buffer */
  2233. for (i = 0; i < 128; i++)
  2234. eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
  2235. IPW_DEBUG_TRACE("<<\n");
  2236. }
  2237. /*
  2238. * Either the device driver (i.e. the host) or the firmware can
  2239. * load eeprom data into the designated region in SRAM. If neither
  2240. * happens then the FW will shutdown with a fatal error.
  2241. *
  2242. * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
  2243. * bit needs region of shared SRAM needs to be non-zero.
  2244. */
  2245. static void ipw_eeprom_init_sram(struct ipw_priv *priv)
  2246. {
  2247. int i;
  2248. IPW_DEBUG_TRACE(">>\n");
  2249. /*
  2250. If the data looks correct, then copy it to our private
  2251. copy. Otherwise let the firmware know to perform the operation
  2252. on its own.
  2253. */
  2254. if (priv->eeprom[EEPROM_VERSION] != 0) {
  2255. IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
  2256. /* write the eeprom data to sram */
  2257. for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
  2258. ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
  2259. /* Do not load eeprom data on fatal error or suspend */
  2260. ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
  2261. } else {
  2262. IPW_DEBUG_INFO("Enabling FW initialization of SRAM\n");
  2263. /* Load eeprom data on fatal error or suspend */
  2264. ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
  2265. }
  2266. IPW_DEBUG_TRACE("<<\n");
  2267. }
  2268. static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
  2269. {
  2270. count >>= 2;
  2271. if (!count)
  2272. return;
  2273. _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
  2274. while (count--)
  2275. _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
  2276. }
  2277. static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
  2278. {
  2279. ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
  2280. CB_NUMBER_OF_ELEMENTS_SMALL *
  2281. sizeof(struct command_block));
  2282. }
  2283. static int ipw_fw_dma_enable(struct ipw_priv *priv)
  2284. { /* start dma engine but no transfers yet */
  2285. IPW_DEBUG_FW(">> :\n");
  2286. /* Start the dma */
  2287. ipw_fw_dma_reset_command_blocks(priv);
  2288. /* Write CB base address */
  2289. ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
  2290. IPW_DEBUG_FW("<< :\n");
  2291. return 0;
  2292. }
  2293. static void ipw_fw_dma_abort(struct ipw_priv *priv)
  2294. {
  2295. u32 control = 0;
  2296. IPW_DEBUG_FW(">> :\n");
  2297. /* set the Stop and Abort bit */
  2298. control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
  2299. ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
  2300. priv->sram_desc.last_cb_index = 0;
  2301. IPW_DEBUG_FW("<<\n");
  2302. }
  2303. static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
  2304. struct command_block *cb)
  2305. {
  2306. u32 address =
  2307. IPW_SHARED_SRAM_DMA_CONTROL +
  2308. (sizeof(struct command_block) * index);
  2309. IPW_DEBUG_FW(">> :\n");
  2310. ipw_write_indirect(priv, address, (u8 *) cb,
  2311. (int)sizeof(struct command_block));
  2312. IPW_DEBUG_FW("<< :\n");
  2313. return 0;
  2314. }
  2315. static int ipw_fw_dma_kick(struct ipw_priv *priv)
  2316. {
  2317. u32 control = 0;
  2318. u32 index = 0;
  2319. IPW_DEBUG_FW(">> :\n");
  2320. for (index = 0; index < priv->sram_desc.last_cb_index; index++)
  2321. ipw_fw_dma_write_command_block(priv, index,
  2322. &priv->sram_desc.cb_list[index]);
  2323. /* Enable the DMA in the CSR register */
  2324. ipw_clear_bit(priv, IPW_RESET_REG,
  2325. IPW_RESET_REG_MASTER_DISABLED |
  2326. IPW_RESET_REG_STOP_MASTER);
  2327. /* Set the Start bit. */
  2328. control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
  2329. ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
  2330. IPW_DEBUG_FW("<< :\n");
  2331. return 0;
  2332. }
  2333. static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
  2334. {
  2335. u32 address;
  2336. u32 register_value = 0;
  2337. u32 cb_fields_address = 0;
  2338. IPW_DEBUG_FW(">> :\n");
  2339. address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
  2340. IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
  2341. /* Read the DMA Controlor register */
  2342. register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
  2343. IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
  2344. /* Print the CB values */
  2345. cb_fields_address = address;
  2346. register_value = ipw_read_reg32(priv, cb_fields_address);
  2347. IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
  2348. cb_fields_address += sizeof(u32);
  2349. register_value = ipw_read_reg32(priv, cb_fields_address);
  2350. IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
  2351. cb_fields_address += sizeof(u32);
  2352. register_value = ipw_read_reg32(priv, cb_fields_address);
  2353. IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
  2354. register_value);
  2355. cb_fields_address += sizeof(u32);
  2356. register_value = ipw_read_reg32(priv, cb_fields_address);
  2357. IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
  2358. IPW_DEBUG_FW(">> :\n");
  2359. }
  2360. static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
  2361. {
  2362. u32 current_cb_address = 0;
  2363. u32 current_cb_index = 0;
  2364. IPW_DEBUG_FW("<< :\n");
  2365. current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
  2366. current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
  2367. sizeof(struct command_block);
  2368. IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
  2369. current_cb_index, current_cb_address);
  2370. IPW_DEBUG_FW(">> :\n");
  2371. return current_cb_index;
  2372. }
  2373. static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
  2374. u32 src_address,
  2375. u32 dest_address,
  2376. u32 length,
  2377. int interrupt_enabled, int is_last)
  2378. {
  2379. u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
  2380. CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
  2381. CB_DEST_SIZE_LONG;
  2382. struct command_block *cb;
  2383. u32 last_cb_element = 0;
  2384. IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
  2385. src_address, dest_address, length);
  2386. if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
  2387. return -1;
  2388. last_cb_element = priv->sram_desc.last_cb_index;
  2389. cb = &priv->sram_desc.cb_list[last_cb_element];
  2390. priv->sram_desc.last_cb_index++;
  2391. /* Calculate the new CB control word */
  2392. if (interrupt_enabled)
  2393. control |= CB_INT_ENABLED;
  2394. if (is_last)
  2395. control |= CB_LAST_VALID;
  2396. control |= length;
  2397. /* Calculate the CB Element's checksum value */
  2398. cb->status = control ^ src_address ^ dest_address;
  2399. /* Copy the Source and Destination addresses */
  2400. cb->dest_addr = dest_address;
  2401. cb->source_addr = src_address;
  2402. /* Copy the Control Word last */
  2403. cb->control = control;
  2404. return 0;
  2405. }
  2406. static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
  2407. int nr, u32 dest_address, u32 len)
  2408. {
  2409. int ret, i;
  2410. u32 size;
  2411. IPW_DEBUG_FW(">>\n");
  2412. IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
  2413. nr, dest_address, len);
  2414. for (i = 0; i < nr; i++) {
  2415. size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
  2416. ret = ipw_fw_dma_add_command_block(priv, src_address[i],
  2417. dest_address +
  2418. i * CB_MAX_LENGTH, size,
  2419. 0, 0);
  2420. if (ret) {
  2421. IPW_DEBUG_FW_INFO(": Failed\n");
  2422. return -1;
  2423. } else
  2424. IPW_DEBUG_FW_INFO(": Added new cb\n");
  2425. }
  2426. IPW_DEBUG_FW("<<\n");
  2427. return 0;
  2428. }
  2429. static int ipw_fw_dma_wait(struct ipw_priv *priv)
  2430. {
  2431. u32 current_index = 0, previous_index;
  2432. u32 watchdog = 0;
  2433. IPW_DEBUG_FW(">> :\n");
  2434. current_index = ipw_fw_dma_command_block_index(priv);
  2435. IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
  2436. (int)priv->sram_desc.last_cb_index);
  2437. while (current_index < priv->sram_desc.last_cb_index) {
  2438. udelay(50);
  2439. previous_index = current_index;
  2440. current_index = ipw_fw_dma_command_block_index(priv);
  2441. if (previous_index < current_index) {
  2442. watchdog = 0;
  2443. continue;
  2444. }
  2445. if (++watchdog > 400) {
  2446. IPW_DEBUG_FW_INFO("Timeout\n");
  2447. ipw_fw_dma_dump_command_block(priv);
  2448. ipw_fw_dma_abort(priv);
  2449. return -1;
  2450. }
  2451. }
  2452. ipw_fw_dma_abort(priv);
  2453. /*Disable the DMA in the CSR register */
  2454. ipw_set_bit(priv, IPW_RESET_REG,
  2455. IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
  2456. IPW_DEBUG_FW("<< dmaWaitSync\n");
  2457. return 0;
  2458. }
  2459. static void ipw_remove_current_network(struct ipw_priv *priv)
  2460. {
  2461. struct list_head *element, *safe;
  2462. struct libipw_network *network = NULL;
  2463. unsigned long flags;
  2464. spin_lock_irqsave(&priv->ieee->lock, flags);
  2465. list_for_each_safe(element, safe, &priv->ieee->network_list) {
  2466. network = list_entry(element, struct libipw_network, list);
  2467. if (ether_addr_equal(network->bssid, priv->bssid)) {
  2468. list_del(element);
  2469. list_add_tail(&network->list,
  2470. &priv->ieee->network_free_list);
  2471. }
  2472. }
  2473. spin_unlock_irqrestore(&priv->ieee->lock, flags);
  2474. }
  2475. /*
  2476. * Check that card is still alive.
  2477. * Reads debug register from domain0.
  2478. * If card is present, pre-defined value should
  2479. * be found there.
  2480. *
  2481. * @param priv
  2482. * @return 1 if card is present, 0 otherwise
  2483. */
  2484. static inline int ipw_alive(struct ipw_priv *priv)
  2485. {
  2486. return ipw_read32(priv, 0x90) == 0xd55555d5;
  2487. }
  2488. /* timeout in msec, attempted in 10-msec quanta */
  2489. static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
  2490. int timeout)
  2491. {
  2492. int i = 0;
  2493. do {
  2494. if ((ipw_read32(priv, addr) & mask) == mask)
  2495. return i;
  2496. mdelay(10);
  2497. i += 10;
  2498. } while (i < timeout);
  2499. return -ETIME;
  2500. }
  2501. /* These functions load the firmware and micro code for the operation of
  2502. * the ipw hardware. It assumes the buffer has all the bits for the
  2503. * image and the caller is handling the memory allocation and clean up.
  2504. */
  2505. static int ipw_stop_master(struct ipw_priv *priv)
  2506. {
  2507. int rc;
  2508. IPW_DEBUG_TRACE(">>\n");
  2509. /* stop master. typical delay - 0 */
  2510. ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
  2511. /* timeout is in msec, polled in 10-msec quanta */
  2512. rc = ipw_poll_bit(priv, IPW_RESET_REG,
  2513. IPW_RESET_REG_MASTER_DISABLED, 100);
  2514. if (rc < 0) {
  2515. IPW_ERROR("wait for stop master failed after 100ms\n");
  2516. return -1;
  2517. }
  2518. IPW_DEBUG_INFO("stop master %dms\n", rc);
  2519. return rc;
  2520. }
  2521. static void ipw_arc_release(struct ipw_priv *priv)
  2522. {
  2523. IPW_DEBUG_TRACE(">>\n");
  2524. mdelay(5);
  2525. ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
  2526. /* no one knows timing, for safety add some delay */
  2527. mdelay(5);
  2528. }
  2529. struct fw_chunk {
  2530. __le32 address;
  2531. __le32 length;
  2532. };
  2533. static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
  2534. {
  2535. int rc = 0, i, addr;
  2536. u8 cr = 0;
  2537. __le16 *image;
  2538. image = (__le16 *) data;
  2539. IPW_DEBUG_TRACE(">>\n");
  2540. rc = ipw_stop_master(priv);
  2541. if (rc < 0)
  2542. return rc;
  2543. for (addr = IPW_SHARED_LOWER_BOUND;
  2544. addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
  2545. ipw_write32(priv, addr, 0);
  2546. }
  2547. /* no ucode (yet) */
  2548. memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
  2549. /* destroy DMA queues */
  2550. /* reset sequence */
  2551. ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
  2552. ipw_arc_release(priv);
  2553. ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
  2554. mdelay(1);
  2555. /* reset PHY */
  2556. ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
  2557. mdelay(1);
  2558. ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
  2559. mdelay(1);
  2560. /* enable ucode store */
  2561. ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
  2562. ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
  2563. mdelay(1);
  2564. /* write ucode */
  2565. /*
  2566. * @bug
  2567. * Do NOT set indirect address register once and then
  2568. * store data to indirect data register in the loop.
  2569. * It seems very reasonable, but in this case DINO do not
  2570. * accept ucode. It is essential to set address each time.
  2571. */
  2572. /* load new ipw uCode */
  2573. for (i = 0; i < len / 2; i++)
  2574. ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
  2575. le16_to_cpu(image[i]));
  2576. /* enable DINO */
  2577. ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
  2578. ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
  2579. /* this is where the igx / win driver deveates from the VAP driver. */
  2580. /* wait for alive response */
  2581. for (i = 0; i < 100; i++) {
  2582. /* poll for incoming data */
  2583. cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
  2584. if (cr & DINO_RXFIFO_DATA)
  2585. break;
  2586. mdelay(1);
  2587. }
  2588. if (cr & DINO_RXFIFO_DATA) {
  2589. /* alive_command_responce size is NOT multiple of 4 */
  2590. __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
  2591. for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
  2592. response_buffer[i] =
  2593. cpu_to_le32(ipw_read_reg32(priv,
  2594. IPW_BASEBAND_RX_FIFO_READ));
  2595. memcpy(&priv->dino_alive, response_buffer,
  2596. sizeof(priv->dino_alive));
  2597. if (priv->dino_alive.alive_command == 1
  2598. && priv->dino_alive.ucode_valid == 1) {
  2599. rc = 0;
  2600. IPW_DEBUG_INFO
  2601. ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
  2602. "of %02d/%02d/%02d %02d:%02d\n",
  2603. priv->dino_alive.software_revision,
  2604. priv->dino_alive.software_revision,
  2605. priv->dino_alive.device_identifier,
  2606. priv->dino_alive.device_identifier,
  2607. priv->dino_alive.time_stamp[0],
  2608. priv->dino_alive.time_stamp[1],
  2609. priv->dino_alive.time_stamp[2],
  2610. priv->dino_alive.time_stamp[3],
  2611. priv->dino_alive.time_stamp[4]);
  2612. } else {
  2613. IPW_DEBUG_INFO("Microcode is not alive\n");
  2614. rc = -EINVAL;
  2615. }
  2616. } else {
  2617. IPW_DEBUG_INFO("No alive response from DINO\n");
  2618. rc = -ETIME;
  2619. }
  2620. /* disable DINO, otherwise for some reason
  2621. firmware have problem getting alive resp. */
  2622. ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
  2623. return rc;
  2624. }
  2625. static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
  2626. {
  2627. int ret = -1;
  2628. int offset = 0;
  2629. struct fw_chunk *chunk;
  2630. int total_nr = 0;
  2631. int i;
  2632. struct dma_pool *pool;
  2633. void **virts;
  2634. dma_addr_t *phys;
  2635. IPW_DEBUG_TRACE("<< :\n");
  2636. virts = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(void *),
  2637. GFP_KERNEL);
  2638. if (!virts)
  2639. return -ENOMEM;
  2640. phys = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(dma_addr_t),
  2641. GFP_KERNEL);
  2642. if (!phys) {
  2643. kfree(virts);
  2644. return -ENOMEM;
  2645. }
  2646. pool = dma_pool_create("ipw2200", &priv->pci_dev->dev, CB_MAX_LENGTH, 0,
  2647. 0);
  2648. if (!pool) {
  2649. IPW_ERROR("dma_pool_create failed\n");
  2650. kfree(phys);
  2651. kfree(virts);
  2652. return -ENOMEM;
  2653. }
  2654. /* Start the Dma */
  2655. ret = ipw_fw_dma_enable(priv);
  2656. /* the DMA is already ready this would be a bug. */
  2657. BUG_ON(priv->sram_desc.last_cb_index > 0);
  2658. do {
  2659. u32 chunk_len;
  2660. u8 *start;
  2661. int size;
  2662. int nr = 0;
  2663. chunk = (struct fw_chunk *)(data + offset);
  2664. offset += sizeof(struct fw_chunk);
  2665. chunk_len = le32_to_cpu(chunk->length);
  2666. start = data + offset;
  2667. nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
  2668. for (i = 0; i < nr; i++) {
  2669. virts[total_nr] = dma_pool_alloc(pool, GFP_KERNEL,
  2670. &phys[total_nr]);
  2671. if (!virts[total_nr]) {
  2672. ret = -ENOMEM;
  2673. goto out;
  2674. }
  2675. size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
  2676. CB_MAX_LENGTH);
  2677. memcpy(virts[total_nr], start, size);
  2678. start += size;
  2679. total_nr++;
  2680. /* We don't support fw chunk larger than 64*8K */
  2681. BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
  2682. }
  2683. /* build DMA packet and queue up for sending */
  2684. /* dma to chunk->address, the chunk->length bytes from data +
  2685. * offeset*/
  2686. /* Dma loading */
  2687. ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
  2688. nr, le32_to_cpu(chunk->address),
  2689. chunk_len);
  2690. if (ret) {
  2691. IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
  2692. goto out;
  2693. }
  2694. offset += chunk_len;
  2695. } while (offset < len);
  2696. /* Run the DMA and wait for the answer */
  2697. ret = ipw_fw_dma_kick(priv);
  2698. if (ret) {
  2699. IPW_ERROR("dmaKick Failed\n");
  2700. goto out;
  2701. }
  2702. ret = ipw_fw_dma_wait(priv);
  2703. if (ret) {
  2704. IPW_ERROR("dmaWaitSync Failed\n");
  2705. goto out;
  2706. }
  2707. out:
  2708. for (i = 0; i < total_nr; i++)
  2709. dma_pool_free(pool, virts[i], phys[i]);
  2710. dma_pool_destroy(pool);
  2711. kfree(phys);
  2712. kfree(virts);
  2713. return ret;
  2714. }
  2715. /* stop nic */
  2716. static int ipw_stop_nic(struct ipw_priv *priv)
  2717. {
  2718. int rc = 0;
  2719. /* stop */
  2720. ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
  2721. rc = ipw_poll_bit(priv, IPW_RESET_REG,
  2722. IPW_RESET_REG_MASTER_DISABLED, 500);
  2723. if (rc < 0) {
  2724. IPW_ERROR("wait for reg master disabled failed after 500ms\n");
  2725. return rc;
  2726. }
  2727. ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
  2728. return rc;
  2729. }
  2730. static void ipw_start_nic(struct ipw_priv *priv)
  2731. {
  2732. IPW_DEBUG_TRACE(">>\n");
  2733. /* prvHwStartNic release ARC */
  2734. ipw_clear_bit(priv, IPW_RESET_REG,
  2735. IPW_RESET_REG_MASTER_DISABLED |
  2736. IPW_RESET_REG_STOP_MASTER |
  2737. CBD_RESET_REG_PRINCETON_RESET);
  2738. /* enable power management */
  2739. ipw_set_bit(priv, IPW_GP_CNTRL_RW,
  2740. IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
  2741. IPW_DEBUG_TRACE("<<\n");
  2742. }
  2743. static int ipw_init_nic(struct ipw_priv *priv)
  2744. {
  2745. int rc;
  2746. IPW_DEBUG_TRACE(">>\n");
  2747. /* reset */
  2748. /*prvHwInitNic */
  2749. /* set "initialization complete" bit to move adapter to D0 state */
  2750. ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
  2751. /* low-level PLL activation */
  2752. ipw_write32(priv, IPW_READ_INT_REGISTER,
  2753. IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
  2754. /* wait for clock stabilization */
  2755. rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
  2756. IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
  2757. if (rc < 0)
  2758. IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
  2759. /* assert SW reset */
  2760. ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
  2761. udelay(10);
  2762. /* set "initialization complete" bit to move adapter to D0 state */
  2763. ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
  2764. IPW_DEBUG_TRACE(">>\n");
  2765. return 0;
  2766. }
  2767. /* Call this function from process context, it will sleep in request_firmware.
  2768. * Probe is an ok place to call this from.
  2769. */
  2770. static int ipw_reset_nic(struct ipw_priv *priv)
  2771. {
  2772. int rc = 0;
  2773. unsigned long flags;
  2774. IPW_DEBUG_TRACE(">>\n");
  2775. rc = ipw_init_nic(priv);
  2776. spin_lock_irqsave(&priv->lock, flags);
  2777. /* Clear the 'host command active' bit... */
  2778. priv->status &= ~STATUS_HCMD_ACTIVE;
  2779. wake_up_interruptible(&priv->wait_command_queue);
  2780. priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
  2781. wake_up_interruptible(&priv->wait_state);
  2782. spin_unlock_irqrestore(&priv->lock, flags);
  2783. IPW_DEBUG_TRACE("<<\n");
  2784. return rc;
  2785. }
  2786. struct ipw_fw {
  2787. __le32 ver;
  2788. __le32 boot_size;
  2789. __le32 ucode_size;
  2790. __le32 fw_size;
  2791. u8 data[];
  2792. };
  2793. static int ipw_get_fw(struct ipw_priv *priv,
  2794. const struct firmware **raw, const char *name)
  2795. {
  2796. struct ipw_fw *fw;
  2797. int rc;
  2798. /* ask firmware_class module to get the boot firmware off disk */
  2799. rc = request_firmware(raw, name, &priv->pci_dev->dev);
  2800. if (rc < 0) {
  2801. IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
  2802. return rc;
  2803. }
  2804. if ((*raw)->size < sizeof(*fw)) {
  2805. IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
  2806. return -EINVAL;
  2807. }
  2808. fw = (void *)(*raw)->data;
  2809. if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
  2810. le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
  2811. IPW_ERROR("%s is too small or corrupt (%zd)\n",
  2812. name, (*raw)->size);
  2813. return -EINVAL;
  2814. }
  2815. IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
  2816. name,
  2817. le32_to_cpu(fw->ver) >> 16,
  2818. le32_to_cpu(fw->ver) & 0xff,
  2819. (*raw)->size - sizeof(*fw));
  2820. return 0;
  2821. }
  2822. #define IPW_RX_BUF_SIZE (3000)
  2823. static void ipw_rx_queue_reset(struct ipw_priv *priv,
  2824. struct ipw_rx_queue *rxq)
  2825. {
  2826. unsigned long flags;
  2827. int i;
  2828. spin_lock_irqsave(&rxq->lock, flags);
  2829. INIT_LIST_HEAD(&rxq->rx_free);
  2830. INIT_LIST_HEAD(&rxq->rx_used);
  2831. /* Fill the rx_used queue with _all_ of the Rx buffers */
  2832. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  2833. /* In the reset function, these buffers may have been allocated
  2834. * to an SKB, so we need to unmap and free potential storage */
  2835. if (rxq->pool[i].skb != NULL) {
  2836. dma_unmap_single(&priv->pci_dev->dev,
  2837. rxq->pool[i].dma_addr,
  2838. IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
  2839. dev_kfree_skb_irq(rxq->pool[i].skb);
  2840. rxq->pool[i].skb = NULL;
  2841. }
  2842. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  2843. }
  2844. /* Set us so that we have processed and used all buffers, but have
  2845. * not restocked the Rx queue with fresh buffers */
  2846. rxq->read = rxq->write = 0;
  2847. rxq->free_count = 0;
  2848. spin_unlock_irqrestore(&rxq->lock, flags);
  2849. }
  2850. #ifdef CONFIG_PM
  2851. static int fw_loaded = 0;
  2852. static const struct firmware *raw = NULL;
  2853. static void free_firmware(void)
  2854. {
  2855. if (fw_loaded) {
  2856. release_firmware(raw);
  2857. raw = NULL;
  2858. fw_loaded = 0;
  2859. }
  2860. }
  2861. #else
  2862. #define free_firmware() do {} while (0)
  2863. #endif
  2864. static int ipw_load(struct ipw_priv *priv)
  2865. {
  2866. #ifndef CONFIG_PM
  2867. const struct firmware *raw = NULL;
  2868. #endif
  2869. struct ipw_fw *fw;
  2870. u8 *boot_img, *ucode_img, *fw_img;
  2871. u8 *name = NULL;
  2872. int rc = 0, retries = 3;
  2873. switch (priv->ieee->iw_mode) {
  2874. case IW_MODE_ADHOC:
  2875. name = "ipw2200-ibss.fw";
  2876. break;
  2877. #ifdef CONFIG_IPW2200_MONITOR
  2878. case IW_MODE_MONITOR:
  2879. name = "ipw2200-sniffer.fw";
  2880. break;
  2881. #endif
  2882. case IW_MODE_INFRA:
  2883. name = "ipw2200-bss.fw";
  2884. break;
  2885. }
  2886. if (!name) {
  2887. rc = -EINVAL;
  2888. goto error;
  2889. }
  2890. #ifdef CONFIG_PM
  2891. if (!fw_loaded) {
  2892. #endif
  2893. rc = ipw_get_fw(priv, &raw, name);
  2894. if (rc < 0)
  2895. goto error;
  2896. #ifdef CONFIG_PM
  2897. }
  2898. #endif
  2899. fw = (void *)raw->data;
  2900. boot_img = &fw->data[0];
  2901. ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
  2902. fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
  2903. le32_to_cpu(fw->ucode_size)];
  2904. if (!priv->rxq)
  2905. priv->rxq = ipw_rx_queue_alloc(priv);
  2906. else
  2907. ipw_rx_queue_reset(priv, priv->rxq);
  2908. if (!priv->rxq) {
  2909. IPW_ERROR("Unable to initialize Rx queue\n");
  2910. rc = -ENOMEM;
  2911. goto error;
  2912. }
  2913. retry:
  2914. /* Ensure interrupts are disabled */
  2915. ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
  2916. priv->status &= ~STATUS_INT_ENABLED;
  2917. /* ack pending interrupts */
  2918. ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
  2919. ipw_stop_nic(priv);
  2920. rc = ipw_reset_nic(priv);
  2921. if (rc < 0) {
  2922. IPW_ERROR("Unable to reset NIC\n");
  2923. goto error;
  2924. }
  2925. ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
  2926. IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
  2927. /* DMA the initial boot firmware into the device */
  2928. rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
  2929. if (rc < 0) {
  2930. IPW_ERROR("Unable to load boot firmware: %d\n", rc);
  2931. goto error;
  2932. }
  2933. /* kick start the device */
  2934. ipw_start_nic(priv);
  2935. /* wait for the device to finish its initial startup sequence */
  2936. rc = ipw_poll_bit(priv, IPW_INTA_RW,
  2937. IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
  2938. if (rc < 0) {
  2939. IPW_ERROR("device failed to boot initial fw image\n");
  2940. goto error;
  2941. }
  2942. IPW_DEBUG_INFO("initial device response after %dms\n", rc);
  2943. /* ack fw init done interrupt */
  2944. ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
  2945. /* DMA the ucode into the device */
  2946. rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
  2947. if (rc < 0) {
  2948. IPW_ERROR("Unable to load ucode: %d\n", rc);
  2949. goto error;
  2950. }
  2951. /* stop nic */
  2952. ipw_stop_nic(priv);
  2953. /* DMA bss firmware into the device */
  2954. rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
  2955. if (rc < 0) {
  2956. IPW_ERROR("Unable to load firmware: %d\n", rc);
  2957. goto error;
  2958. }
  2959. #ifdef CONFIG_PM
  2960. fw_loaded = 1;
  2961. #endif
  2962. ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
  2963. rc = ipw_queue_reset(priv);
  2964. if (rc < 0) {
  2965. IPW_ERROR("Unable to initialize queues\n");
  2966. goto error;
  2967. }
  2968. /* Ensure interrupts are disabled */
  2969. ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
  2970. /* ack pending interrupts */
  2971. ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
  2972. /* kick start the device */
  2973. ipw_start_nic(priv);
  2974. if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
  2975. if (retries > 0) {
  2976. IPW_WARNING("Parity error. Retrying init.\n");
  2977. retries--;
  2978. goto retry;
  2979. }
  2980. IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
  2981. rc = -EIO;
  2982. goto error;
  2983. }
  2984. /* wait for the device */
  2985. rc = ipw_poll_bit(priv, IPW_INTA_RW,
  2986. IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
  2987. if (rc < 0) {
  2988. IPW_ERROR("device failed to start within 500ms\n");
  2989. goto error;
  2990. }
  2991. IPW_DEBUG_INFO("device response after %dms\n", rc);
  2992. /* ack fw init done interrupt */
  2993. ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
  2994. /* read eeprom data */
  2995. priv->eeprom_delay = 1;
  2996. ipw_read_eeprom(priv);
  2997. /* initialize the eeprom region of sram */
  2998. ipw_eeprom_init_sram(priv);
  2999. /* enable interrupts */
  3000. ipw_enable_interrupts(priv);
  3001. /* Ensure our queue has valid packets */
  3002. ipw_rx_queue_replenish(priv);
  3003. ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
  3004. /* ack pending interrupts */
  3005. ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
  3006. #ifndef CONFIG_PM
  3007. release_firmware(raw);
  3008. #endif
  3009. return 0;
  3010. error:
  3011. if (priv->rxq) {
  3012. ipw_rx_queue_free(priv, priv->rxq);
  3013. priv->rxq = NULL;
  3014. }
  3015. ipw_tx_queue_free(priv);
  3016. release_firmware(raw);
  3017. #ifdef CONFIG_PM
  3018. fw_loaded = 0;
  3019. raw = NULL;
  3020. #endif
  3021. return rc;
  3022. }
  3023. /*
  3024. * DMA services
  3025. *
  3026. * Theory of operation
  3027. *
  3028. * A queue is a circular buffers with 'Read' and 'Write' pointers.
  3029. * 2 empty entries always kept in the buffer to protect from overflow.
  3030. *
  3031. * For Tx queue, there are low mark and high mark limits. If, after queuing
  3032. * the packet for Tx, free space become < low mark, Tx queue stopped. When
  3033. * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
  3034. * Tx queue resumed.
  3035. *
  3036. * The IPW operates with six queues, one receive queue in the device's
  3037. * sram, one transmit queue for sending commands to the device firmware,
  3038. * and four transmit queues for data.
  3039. *
  3040. * The four transmit queues allow for performing quality of service (qos)
  3041. * transmissions as per the 802.11 protocol. Currently Linux does not
  3042. * provide a mechanism to the user for utilizing prioritized queues, so
  3043. * we only utilize the first data transmit queue (queue1).
  3044. */
  3045. /*
  3046. * Driver allocates buffers of this size for Rx
  3047. */
  3048. /*
  3049. * ipw_rx_queue_space - Return number of free slots available in queue.
  3050. */
  3051. static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
  3052. {
  3053. int s = q->read - q->write;
  3054. if (s <= 0)
  3055. s += RX_QUEUE_SIZE;
  3056. /* keep some buffer to not confuse full and empty queue */
  3057. s -= 2;
  3058. if (s < 0)
  3059. s = 0;
  3060. return s;
  3061. }
  3062. static inline int ipw_tx_queue_space(const struct clx2_queue *q)
  3063. {
  3064. int s = q->last_used - q->first_empty;
  3065. if (s <= 0)
  3066. s += q->n_bd;
  3067. s -= 2; /* keep some reserve to not confuse empty and full situations */
  3068. if (s < 0)
  3069. s = 0;
  3070. return s;
  3071. }
  3072. static inline int ipw_queue_inc_wrap(int index, int n_bd)
  3073. {
  3074. return (++index == n_bd) ? 0 : index;
  3075. }
  3076. /*
  3077. * Initialize common DMA queue structure
  3078. *
  3079. * @param q queue to init
  3080. * @param count Number of BD's to allocate. Should be power of 2
  3081. * @param read_register Address for 'read' register
  3082. * (not offset within BAR, full address)
  3083. * @param write_register Address for 'write' register
  3084. * (not offset within BAR, full address)
  3085. * @param base_register Address for 'base' register
  3086. * (not offset within BAR, full address)
  3087. * @param size Address for 'size' register
  3088. * (not offset within BAR, full address)
  3089. */
  3090. static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
  3091. int count, u32 read, u32 write, u32 base, u32 size)
  3092. {
  3093. q->n_bd = count;
  3094. q->low_mark = q->n_bd / 4;
  3095. if (q->low_mark < 4)
  3096. q->low_mark = 4;
  3097. q->high_mark = q->n_bd / 8;
  3098. if (q->high_mark < 2)
  3099. q->high_mark = 2;
  3100. q->first_empty = q->last_used = 0;
  3101. q->reg_r = read;
  3102. q->reg_w = write;
  3103. ipw_write32(priv, base, q->dma_addr);
  3104. ipw_write32(priv, size, count);
  3105. ipw_write32(priv, read, 0);
  3106. ipw_write32(priv, write, 0);
  3107. _ipw_read32(priv, 0x90);
  3108. }
  3109. static int ipw_queue_tx_init(struct ipw_priv *priv,
  3110. struct clx2_tx_queue *q,
  3111. int count, u32 read, u32 write, u32 base, u32 size)
  3112. {
  3113. struct pci_dev *dev = priv->pci_dev;
  3114. q->txb = kmalloc_array(count, sizeof(q->txb[0]), GFP_KERNEL);
  3115. if (!q->txb)
  3116. return -ENOMEM;
  3117. q->bd =
  3118. dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count,
  3119. &q->q.dma_addr, GFP_KERNEL);
  3120. if (!q->bd) {
  3121. IPW_ERROR("dma_alloc_coherent(%zd) failed\n",
  3122. sizeof(q->bd[0]) * count);
  3123. kfree(q->txb);
  3124. q->txb = NULL;
  3125. return -ENOMEM;
  3126. }
  3127. ipw_queue_init(priv, &q->q, count, read, write, base, size);
  3128. return 0;
  3129. }
  3130. /*
  3131. * Free one TFD, those at index [txq->q.last_used].
  3132. * Do NOT advance any indexes
  3133. *
  3134. * @param dev
  3135. * @param txq
  3136. */
  3137. static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
  3138. struct clx2_tx_queue *txq)
  3139. {
  3140. struct tfd_frame *bd = &txq->bd[txq->q.last_used];
  3141. struct pci_dev *dev = priv->pci_dev;
  3142. int i;
  3143. /* classify bd */
  3144. if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
  3145. /* nothing to cleanup after for host commands */
  3146. return;
  3147. /* sanity check */
  3148. if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
  3149. IPW_ERROR("Too many chunks: %i\n",
  3150. le32_to_cpu(bd->u.data.num_chunks));
  3151. /* @todo issue fatal error, it is quite serious situation */
  3152. return;
  3153. }
  3154. /* unmap chunks if any */
  3155. for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
  3156. dma_unmap_single(&dev->dev,
  3157. le32_to_cpu(bd->u.data.chunk_ptr[i]),
  3158. le16_to_cpu(bd->u.data.chunk_len[i]),
  3159. DMA_TO_DEVICE);
  3160. if (txq->txb[txq->q.last_used]) {
  3161. libipw_txb_free(txq->txb[txq->q.last_used]);
  3162. txq->txb[txq->q.last_used] = NULL;
  3163. }
  3164. }
  3165. }
  3166. /*
  3167. * Deallocate DMA queue.
  3168. *
  3169. * Empty queue by removing and destroying all BD's.
  3170. * Free all buffers.
  3171. *
  3172. * @param dev
  3173. * @param q
  3174. */
  3175. static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
  3176. {
  3177. struct clx2_queue *q = &txq->q;
  3178. struct pci_dev *dev = priv->pci_dev;
  3179. if (q->n_bd == 0)
  3180. return;
  3181. /* first, empty all BD's */
  3182. for (; q->first_empty != q->last_used;
  3183. q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
  3184. ipw_queue_tx_free_tfd(priv, txq);
  3185. }
  3186. /* free buffers belonging to queue itself */
  3187. dma_free_coherent(&dev->dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
  3188. q->dma_addr);
  3189. kfree(txq->txb);
  3190. /* 0 fill whole structure */
  3191. memset(txq, 0, sizeof(*txq));
  3192. }
  3193. /*
  3194. * Destroy all DMA queues and structures
  3195. *
  3196. * @param priv
  3197. */
  3198. static void ipw_tx_queue_free(struct ipw_priv *priv)
  3199. {
  3200. /* Tx CMD queue */
  3201. ipw_queue_tx_free(priv, &priv->txq_cmd);
  3202. /* Tx queues */
  3203. ipw_queue_tx_free(priv, &priv->txq[0]);
  3204. ipw_queue_tx_free(priv, &priv->txq[1]);
  3205. ipw_queue_tx_free(priv, &priv->txq[2]);
  3206. ipw_queue_tx_free(priv, &priv->txq[3]);
  3207. }
  3208. static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
  3209. {
  3210. /* First 3 bytes are manufacturer */
  3211. bssid[0] = priv->mac_addr[0];
  3212. bssid[1] = priv->mac_addr[1];
  3213. bssid[2] = priv->mac_addr[2];
  3214. /* Last bytes are random */
  3215. get_random_bytes(&bssid[3], ETH_ALEN - 3);
  3216. bssid[0] &= 0xfe; /* clear multicast bit */
  3217. bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
  3218. }
  3219. static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
  3220. {
  3221. struct ipw_station_entry entry;
  3222. int i;
  3223. for (i = 0; i < priv->num_stations; i++) {
  3224. if (ether_addr_equal(priv->stations[i], bssid)) {
  3225. /* Another node is active in network */
  3226. priv->missed_adhoc_beacons = 0;
  3227. if (!(priv->config & CFG_STATIC_CHANNEL))
  3228. /* when other nodes drop out, we drop out */
  3229. priv->config &= ~CFG_ADHOC_PERSIST;
  3230. return i;
  3231. }
  3232. }
  3233. if (i == MAX_STATIONS)
  3234. return IPW_INVALID_STATION;
  3235. IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
  3236. entry.reserved = 0;
  3237. entry.support_mode = 0;
  3238. memcpy(entry.mac_addr, bssid, ETH_ALEN);
  3239. memcpy(priv->stations[i], bssid, ETH_ALEN);
  3240. ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
  3241. &entry, sizeof(entry));
  3242. priv->num_stations++;
  3243. return i;
  3244. }
  3245. static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
  3246. {
  3247. int i;
  3248. for (i = 0; i < priv->num_stations; i++)
  3249. if (ether_addr_equal(priv->stations[i], bssid))
  3250. return i;
  3251. return IPW_INVALID_STATION;
  3252. }
  3253. static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
  3254. {
  3255. int err;
  3256. if (priv->status & STATUS_ASSOCIATING) {
  3257. IPW_DEBUG_ASSOC("Disassociating while associating.\n");
  3258. schedule_work(&priv->disassociate);
  3259. return;
  3260. }
  3261. if (!(priv->status & STATUS_ASSOCIATED)) {
  3262. IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
  3263. return;
  3264. }
  3265. IPW_DEBUG_ASSOC("Disassociation attempt from %pM "
  3266. "on channel %d.\n",
  3267. priv->assoc_request.bssid,
  3268. priv->assoc_request.channel);
  3269. priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
  3270. priv->status |= STATUS_DISASSOCIATING;
  3271. if (quiet)
  3272. priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
  3273. else
  3274. priv->assoc_request.assoc_type = HC_DISASSOCIATE;
  3275. err = ipw_send_associate(priv, &priv->assoc_request);
  3276. if (err) {
  3277. IPW_DEBUG_HC("Attempt to send [dis]associate command "
  3278. "failed.\n");
  3279. return;
  3280. }
  3281. }
  3282. static int ipw_disassociate(void *data)
  3283. {
  3284. struct ipw_priv *priv = data;
  3285. if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
  3286. return 0;
  3287. ipw_send_disassociate(data, 0);
  3288. netif_carrier_off(priv->net_dev);
  3289. return 1;
  3290. }
  3291. static void ipw_bg_disassociate(struct work_struct *work)
  3292. {
  3293. struct ipw_priv *priv =
  3294. container_of(work, struct ipw_priv, disassociate);
  3295. mutex_lock(&priv->mutex);
  3296. ipw_disassociate(priv);
  3297. mutex_unlock(&priv->mutex);
  3298. }
  3299. static void ipw_system_config(struct work_struct *work)
  3300. {
  3301. struct ipw_priv *priv =
  3302. container_of(work, struct ipw_priv, system_config);
  3303. #ifdef CONFIG_IPW2200_PROMISCUOUS
  3304. if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
  3305. priv->sys_config.accept_all_data_frames = 1;
  3306. priv->sys_config.accept_non_directed_frames = 1;
  3307. priv->sys_config.accept_all_mgmt_bcpr = 1;
  3308. priv->sys_config.accept_all_mgmt_frames = 1;
  3309. }
  3310. #endif
  3311. ipw_send_system_config(priv);
  3312. }
  3313. struct ipw_status_code {
  3314. u16 status;
  3315. const char *reason;
  3316. };
  3317. static const struct ipw_status_code ipw_status_codes[] = {
  3318. {0x00, "Successful"},
  3319. {0x01, "Unspecified failure"},
  3320. {0x0A, "Cannot support all requested capabilities in the "
  3321. "Capability information field"},
  3322. {0x0B, "Reassociation denied due to inability to confirm that "
  3323. "association exists"},
  3324. {0x0C, "Association denied due to reason outside the scope of this "
  3325. "standard"},
  3326. {0x0D,
  3327. "Responding station does not support the specified authentication "
  3328. "algorithm"},
  3329. {0x0E,
  3330. "Received an Authentication frame with authentication sequence "
  3331. "transaction sequence number out of expected sequence"},
  3332. {0x0F, "Authentication rejected because of challenge failure"},
  3333. {0x10, "Authentication rejected due to timeout waiting for next "
  3334. "frame in sequence"},
  3335. {0x11, "Association denied because AP is unable to handle additional "
  3336. "associated stations"},
  3337. {0x12,
  3338. "Association denied due to requesting station not supporting all "
  3339. "of the datarates in the BSSBasicServiceSet Parameter"},
  3340. {0x13,
  3341. "Association denied due to requesting station not supporting "
  3342. "short preamble operation"},
  3343. {0x14,
  3344. "Association denied due to requesting station not supporting "
  3345. "PBCC encoding"},
  3346. {0x15,
  3347. "Association denied due to requesting station not supporting "
  3348. "channel agility"},
  3349. {0x19,
  3350. "Association denied due to requesting station not supporting "
  3351. "short slot operation"},
  3352. {0x1A,
  3353. "Association denied due to requesting station not supporting "
  3354. "DSSS-OFDM operation"},
  3355. {0x28, "Invalid Information Element"},
  3356. {0x29, "Group Cipher is not valid"},
  3357. {0x2A, "Pairwise Cipher is not valid"},
  3358. {0x2B, "AKMP is not valid"},
  3359. {0x2C, "Unsupported RSN IE version"},
  3360. {0x2D, "Invalid RSN IE Capabilities"},
  3361. {0x2E, "Cipher suite is rejected per security policy"},
  3362. };
  3363. static const char *ipw_get_status_code(u16 status)
  3364. {
  3365. int i;
  3366. for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
  3367. if (ipw_status_codes[i].status == (status & 0xff))
  3368. return ipw_status_codes[i].reason;
  3369. return "Unknown status value.";
  3370. }
  3371. static inline void average_init(struct average *avg)
  3372. {
  3373. memset(avg, 0, sizeof(*avg));
  3374. }
  3375. #define DEPTH_RSSI 8
  3376. #define DEPTH_NOISE 16
  3377. static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
  3378. {
  3379. return ((depth-1)*prev_avg + val)/depth;
  3380. }
  3381. static void average_add(struct average *avg, s16 val)
  3382. {
  3383. avg->sum -= avg->entries[avg->pos];
  3384. avg->sum += val;
  3385. avg->entries[avg->pos++] = val;
  3386. if (unlikely(avg->pos == AVG_ENTRIES)) {
  3387. avg->init = 1;
  3388. avg->pos = 0;
  3389. }
  3390. }
  3391. static s16 average_value(struct average *avg)
  3392. {
  3393. if (!unlikely(avg->init)) {
  3394. if (avg->pos)
  3395. return avg->sum / avg->pos;
  3396. return 0;
  3397. }
  3398. return avg->sum / AVG_ENTRIES;
  3399. }
  3400. static void ipw_reset_stats(struct ipw_priv *priv)
  3401. {
  3402. u32 len = sizeof(u32);
  3403. priv->quality = 0;
  3404. average_init(&priv->average_missed_beacons);
  3405. priv->exp_avg_rssi = -60;
  3406. priv->exp_avg_noise = -85 + 0x100;
  3407. priv->last_rate = 0;
  3408. priv->last_missed_beacons = 0;
  3409. priv->last_rx_packets = 0;
  3410. priv->last_tx_packets = 0;
  3411. priv->last_tx_failures = 0;
  3412. /* Firmware managed, reset only when NIC is restarted, so we have to
  3413. * normalize on the current value */
  3414. ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
  3415. &priv->last_rx_err, &len);
  3416. ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
  3417. &priv->last_tx_failures, &len);
  3418. /* Driver managed, reset with each association */
  3419. priv->missed_adhoc_beacons = 0;
  3420. priv->missed_beacons = 0;
  3421. priv->tx_packets = 0;
  3422. priv->rx_packets = 0;
  3423. }
  3424. static u32 ipw_get_max_rate(struct ipw_priv *priv)
  3425. {
  3426. u32 i = 0x80000000;
  3427. u32 mask = priv->rates_mask;
  3428. /* If currently associated in B mode, restrict the maximum
  3429. * rate match to B rates */
  3430. if (priv->assoc_request.ieee_mode == IPW_B_MODE)
  3431. mask &= LIBIPW_CCK_RATES_MASK;
  3432. /* TODO: Verify that the rate is supported by the current rates
  3433. * list. */
  3434. while (i && !(mask & i))
  3435. i >>= 1;
  3436. switch (i) {
  3437. case LIBIPW_CCK_RATE_1MB_MASK:
  3438. return 1000000;
  3439. case LIBIPW_CCK_RATE_2MB_MASK:
  3440. return 2000000;
  3441. case LIBIPW_CCK_RATE_5MB_MASK:
  3442. return 5500000;
  3443. case LIBIPW_OFDM_RATE_6MB_MASK:
  3444. return 6000000;
  3445. case LIBIPW_OFDM_RATE_9MB_MASK:
  3446. return 9000000;
  3447. case LIBIPW_CCK_RATE_11MB_MASK:
  3448. return 11000000;
  3449. case LIBIPW_OFDM_RATE_12MB_MASK:
  3450. return 12000000;
  3451. case LIBIPW_OFDM_RATE_18MB_MASK:
  3452. return 18000000;
  3453. case LIBIPW_OFDM_RATE_24MB_MASK:
  3454. return 24000000;
  3455. case LIBIPW_OFDM_RATE_36MB_MASK:
  3456. return 36000000;
  3457. case LIBIPW_OFDM_RATE_48MB_MASK:
  3458. return 48000000;
  3459. case LIBIPW_OFDM_RATE_54MB_MASK:
  3460. return 54000000;
  3461. }
  3462. if (priv->ieee->mode == IEEE_B)
  3463. return 11000000;
  3464. else
  3465. return 54000000;
  3466. }
  3467. static u32 ipw_get_current_rate(struct ipw_priv *priv)
  3468. {
  3469. u32 rate, len = sizeof(rate);
  3470. int err;
  3471. if (!(priv->status & STATUS_ASSOCIATED))
  3472. return 0;
  3473. if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
  3474. err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
  3475. &len);
  3476. if (err) {
  3477. IPW_DEBUG_INFO("failed querying ordinals.\n");
  3478. return 0;
  3479. }
  3480. } else
  3481. return ipw_get_max_rate(priv);
  3482. switch (rate) {
  3483. case IPW_TX_RATE_1MB:
  3484. return 1000000;
  3485. case IPW_TX_RATE_2MB:
  3486. return 2000000;
  3487. case IPW_TX_RATE_5MB:
  3488. return 5500000;
  3489. case IPW_TX_RATE_6MB:
  3490. return 6000000;
  3491. case IPW_TX_RATE_9MB:
  3492. return 9000000;
  3493. case IPW_TX_RATE_11MB:
  3494. return 11000000;
  3495. case IPW_TX_RATE_12MB:
  3496. return 12000000;
  3497. case IPW_TX_RATE_18MB:
  3498. return 18000000;
  3499. case IPW_TX_RATE_24MB:
  3500. return 24000000;
  3501. case IPW_TX_RATE_36MB:
  3502. return 36000000;
  3503. case IPW_TX_RATE_48MB:
  3504. return 48000000;
  3505. case IPW_TX_RATE_54MB:
  3506. return 54000000;
  3507. }
  3508. return 0;
  3509. }
  3510. #define IPW_STATS_INTERVAL (2 * HZ)
  3511. static void ipw_gather_stats(struct ipw_priv *priv)
  3512. {
  3513. u32 rx_err, rx_err_delta, rx_packets_delta;
  3514. u32 tx_failures, tx_failures_delta, tx_packets_delta;
  3515. u32 missed_beacons_percent, missed_beacons_delta;
  3516. u32 quality = 0;
  3517. u32 len = sizeof(u32);
  3518. s16 rssi;
  3519. u32 beacon_quality, signal_quality, tx_quality, rx_quality,
  3520. rate_quality;
  3521. u32 max_rate;
  3522. if (!(priv->status & STATUS_ASSOCIATED)) {
  3523. priv->quality = 0;
  3524. return;
  3525. }
  3526. /* Update the statistics */
  3527. ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
  3528. &priv->missed_beacons, &len);
  3529. missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
  3530. priv->last_missed_beacons = priv->missed_beacons;
  3531. if (priv->assoc_request.beacon_interval) {
  3532. missed_beacons_percent = missed_beacons_delta *
  3533. (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
  3534. (IPW_STATS_INTERVAL * 10);
  3535. } else {
  3536. missed_beacons_percent = 0;
  3537. }
  3538. average_add(&priv->average_missed_beacons, missed_beacons_percent);
  3539. ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
  3540. rx_err_delta = rx_err - priv->last_rx_err;
  3541. priv->last_rx_err = rx_err;
  3542. ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
  3543. tx_failures_delta = tx_failures - priv->last_tx_failures;
  3544. priv->last_tx_failures = tx_failures;
  3545. rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
  3546. priv->last_rx_packets = priv->rx_packets;
  3547. tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
  3548. priv->last_tx_packets = priv->tx_packets;
  3549. /* Calculate quality based on the following:
  3550. *
  3551. * Missed beacon: 100% = 0, 0% = 70% missed
  3552. * Rate: 60% = 1Mbs, 100% = Max
  3553. * Rx and Tx errors represent a straight % of total Rx/Tx
  3554. * RSSI: 100% = > -50, 0% = < -80
  3555. * Rx errors: 100% = 0, 0% = 50% missed
  3556. *
  3557. * The lowest computed quality is used.
  3558. *
  3559. */
  3560. #define BEACON_THRESHOLD 5
  3561. beacon_quality = 100 - missed_beacons_percent;
  3562. if (beacon_quality < BEACON_THRESHOLD)
  3563. beacon_quality = 0;
  3564. else
  3565. beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
  3566. (100 - BEACON_THRESHOLD);
  3567. IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
  3568. beacon_quality, missed_beacons_percent);
  3569. priv->last_rate = ipw_get_current_rate(priv);
  3570. max_rate = ipw_get_max_rate(priv);
  3571. rate_quality = priv->last_rate * 40 / max_rate + 60;
  3572. IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
  3573. rate_quality, priv->last_rate / 1000000);
  3574. if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
  3575. rx_quality = 100 - (rx_err_delta * 100) /
  3576. (rx_packets_delta + rx_err_delta);
  3577. else
  3578. rx_quality = 100;
  3579. IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
  3580. rx_quality, rx_err_delta, rx_packets_delta);
  3581. if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
  3582. tx_quality = 100 - (tx_failures_delta * 100) /
  3583. (tx_packets_delta + tx_failures_delta);
  3584. else
  3585. tx_quality = 100;
  3586. IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
  3587. tx_quality, tx_failures_delta, tx_packets_delta);
  3588. rssi = priv->exp_avg_rssi;
  3589. signal_quality =
  3590. (100 *
  3591. (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
  3592. (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
  3593. (priv->ieee->perfect_rssi - rssi) *
  3594. (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
  3595. 62 * (priv->ieee->perfect_rssi - rssi))) /
  3596. ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
  3597. (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
  3598. if (signal_quality > 100)
  3599. signal_quality = 100;
  3600. else if (signal_quality < 1)
  3601. signal_quality = 0;
  3602. IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
  3603. signal_quality, rssi);
  3604. quality = min(rx_quality, signal_quality);
  3605. quality = min(tx_quality, quality);
  3606. quality = min(rate_quality, quality);
  3607. quality = min(beacon_quality, quality);
  3608. if (quality == beacon_quality)
  3609. IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
  3610. quality);
  3611. if (quality == rate_quality)
  3612. IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
  3613. quality);
  3614. if (quality == tx_quality)
  3615. IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
  3616. quality);
  3617. if (quality == rx_quality)
  3618. IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
  3619. quality);
  3620. if (quality == signal_quality)
  3621. IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
  3622. quality);
  3623. priv->quality = quality;
  3624. schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
  3625. }
  3626. static void ipw_bg_gather_stats(struct work_struct *work)
  3627. {
  3628. struct ipw_priv *priv =
  3629. container_of(work, struct ipw_priv, gather_stats.work);
  3630. mutex_lock(&priv->mutex);
  3631. ipw_gather_stats(priv);
  3632. mutex_unlock(&priv->mutex);
  3633. }
  3634. /* Missed beacon behavior:
  3635. * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
  3636. * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
  3637. * Above disassociate threshold, give up and stop scanning.
  3638. * Roaming is disabled if disassociate_threshold <= roaming_threshold */
  3639. static void ipw_handle_missed_beacon(struct ipw_priv *priv,
  3640. int missed_count)
  3641. {
  3642. priv->notif_missed_beacons = missed_count;
  3643. if (missed_count > priv->disassociate_threshold &&
  3644. priv->status & STATUS_ASSOCIATED) {
  3645. /* If associated and we've hit the missed
  3646. * beacon threshold, disassociate, turn
  3647. * off roaming, and abort any active scans */
  3648. IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
  3649. IPW_DL_STATE | IPW_DL_ASSOC,
  3650. "Missed beacon: %d - disassociate\n", missed_count);
  3651. priv->status &= ~STATUS_ROAMING;
  3652. if (priv->status & STATUS_SCANNING) {
  3653. IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
  3654. IPW_DL_STATE,
  3655. "Aborting scan with missed beacon.\n");
  3656. schedule_work(&priv->abort_scan);
  3657. }
  3658. schedule_work(&priv->disassociate);
  3659. return;
  3660. }
  3661. if (priv->status & STATUS_ROAMING) {
  3662. /* If we are currently roaming, then just
  3663. * print a debug statement... */
  3664. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
  3665. "Missed beacon: %d - roam in progress\n",
  3666. missed_count);
  3667. return;
  3668. }
  3669. if (roaming &&
  3670. (missed_count > priv->roaming_threshold &&
  3671. missed_count <= priv->disassociate_threshold)) {
  3672. /* If we are not already roaming, set the ROAM
  3673. * bit in the status and kick off a scan.
  3674. * This can happen several times before we reach
  3675. * disassociate_threshold. */
  3676. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
  3677. "Missed beacon: %d - initiate "
  3678. "roaming\n", missed_count);
  3679. if (!(priv->status & STATUS_ROAMING)) {
  3680. priv->status |= STATUS_ROAMING;
  3681. if (!(priv->status & STATUS_SCANNING))
  3682. schedule_delayed_work(&priv->request_scan, 0);
  3683. }
  3684. return;
  3685. }
  3686. if (priv->status & STATUS_SCANNING &&
  3687. missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
  3688. /* Stop scan to keep fw from getting
  3689. * stuck (only if we aren't roaming --
  3690. * otherwise we'll never scan more than 2 or 3
  3691. * channels..) */
  3692. IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
  3693. "Aborting scan with missed beacon.\n");
  3694. schedule_work(&priv->abort_scan);
  3695. }
  3696. IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
  3697. }
  3698. static void ipw_scan_event(struct work_struct *work)
  3699. {
  3700. union iwreq_data wrqu;
  3701. struct ipw_priv *priv =
  3702. container_of(work, struct ipw_priv, scan_event.work);
  3703. wrqu.data.length = 0;
  3704. wrqu.data.flags = 0;
  3705. wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
  3706. }
  3707. static void handle_scan_event(struct ipw_priv *priv)
  3708. {
  3709. /* Only userspace-requested scan completion events go out immediately */
  3710. if (!priv->user_requested_scan) {
  3711. schedule_delayed_work(&priv->scan_event,
  3712. round_jiffies_relative(msecs_to_jiffies(4000)));
  3713. } else {
  3714. priv->user_requested_scan = 0;
  3715. mod_delayed_work(system_wq, &priv->scan_event, 0);
  3716. }
  3717. }
  3718. /*
  3719. * Handle host notification packet.
  3720. * Called from interrupt routine
  3721. */
  3722. static void ipw_rx_notification(struct ipw_priv *priv,
  3723. struct ipw_rx_notification *notif)
  3724. {
  3725. u16 size = le16_to_cpu(notif->size);
  3726. IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
  3727. switch (notif->subtype) {
  3728. case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
  3729. struct notif_association *assoc = &notif->u.assoc;
  3730. switch (assoc->state) {
  3731. case CMAS_ASSOCIATED:{
  3732. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3733. IPW_DL_ASSOC,
  3734. "associated: '%*pE' %pM\n",
  3735. priv->essid_len, priv->essid,
  3736. priv->bssid);
  3737. switch (priv->ieee->iw_mode) {
  3738. case IW_MODE_INFRA:
  3739. memcpy(priv->ieee->bssid,
  3740. priv->bssid, ETH_ALEN);
  3741. break;
  3742. case IW_MODE_ADHOC:
  3743. memcpy(priv->ieee->bssid,
  3744. priv->bssid, ETH_ALEN);
  3745. /* clear out the station table */
  3746. priv->num_stations = 0;
  3747. IPW_DEBUG_ASSOC
  3748. ("queueing adhoc check\n");
  3749. schedule_delayed_work(
  3750. &priv->adhoc_check,
  3751. le16_to_cpu(priv->
  3752. assoc_request.
  3753. beacon_interval));
  3754. break;
  3755. }
  3756. priv->status &= ~STATUS_ASSOCIATING;
  3757. priv->status |= STATUS_ASSOCIATED;
  3758. schedule_work(&priv->system_config);
  3759. #ifdef CONFIG_IPW2200_QOS
  3760. #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
  3761. le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
  3762. if ((priv->status & STATUS_AUTH) &&
  3763. (IPW_GET_PACKET_STYPE(&notif->u.raw)
  3764. == IEEE80211_STYPE_ASSOC_RESP)) {
  3765. if ((sizeof
  3766. (struct
  3767. libipw_assoc_response)
  3768. <= size)
  3769. && (size <= 2314)) {
  3770. struct
  3771. libipw_rx_stats
  3772. stats = {
  3773. .len = size - 1,
  3774. };
  3775. IPW_DEBUG_QOS
  3776. ("QoS Associate "
  3777. "size %d\n", size);
  3778. libipw_rx_mgt(priv->
  3779. ieee,
  3780. (struct
  3781. libipw_hdr_4addr
  3782. *)
  3783. &notif->u.raw, &stats);
  3784. }
  3785. }
  3786. #endif
  3787. schedule_work(&priv->link_up);
  3788. break;
  3789. }
  3790. case CMAS_AUTHENTICATED:{
  3791. if (priv->
  3792. status & (STATUS_ASSOCIATED |
  3793. STATUS_AUTH)) {
  3794. struct notif_authenticate *auth
  3795. = &notif->u.auth;
  3796. IPW_DEBUG(IPW_DL_NOTIF |
  3797. IPW_DL_STATE |
  3798. IPW_DL_ASSOC,
  3799. "deauthenticated: '%*pE' %pM: (0x%04X) - %s\n",
  3800. priv->essid_len,
  3801. priv->essid,
  3802. priv->bssid,
  3803. le16_to_cpu(auth->status),
  3804. ipw_get_status_code
  3805. (le16_to_cpu
  3806. (auth->status)));
  3807. priv->status &=
  3808. ~(STATUS_ASSOCIATING |
  3809. STATUS_AUTH |
  3810. STATUS_ASSOCIATED);
  3811. schedule_work(&priv->link_down);
  3812. break;
  3813. }
  3814. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3815. IPW_DL_ASSOC,
  3816. "authenticated: '%*pE' %pM\n",
  3817. priv->essid_len, priv->essid,
  3818. priv->bssid);
  3819. break;
  3820. }
  3821. case CMAS_INIT:{
  3822. if (priv->status & STATUS_AUTH) {
  3823. struct
  3824. libipw_assoc_response
  3825. *resp;
  3826. resp =
  3827. (struct
  3828. libipw_assoc_response
  3829. *)&notif->u.raw;
  3830. IPW_DEBUG(IPW_DL_NOTIF |
  3831. IPW_DL_STATE |
  3832. IPW_DL_ASSOC,
  3833. "association failed (0x%04X): %s\n",
  3834. le16_to_cpu(resp->status),
  3835. ipw_get_status_code
  3836. (le16_to_cpu
  3837. (resp->status)));
  3838. }
  3839. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3840. IPW_DL_ASSOC,
  3841. "disassociated: '%*pE' %pM\n",
  3842. priv->essid_len, priv->essid,
  3843. priv->bssid);
  3844. priv->status &=
  3845. ~(STATUS_DISASSOCIATING |
  3846. STATUS_ASSOCIATING |
  3847. STATUS_ASSOCIATED | STATUS_AUTH);
  3848. if (priv->assoc_network
  3849. && (priv->assoc_network->
  3850. capability &
  3851. WLAN_CAPABILITY_IBSS))
  3852. ipw_remove_current_network
  3853. (priv);
  3854. schedule_work(&priv->link_down);
  3855. break;
  3856. }
  3857. case CMAS_RX_ASSOC_RESP:
  3858. break;
  3859. default:
  3860. IPW_ERROR("assoc: unknown (%d)\n",
  3861. assoc->state);
  3862. break;
  3863. }
  3864. break;
  3865. }
  3866. case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
  3867. struct notif_authenticate *auth = &notif->u.auth;
  3868. switch (auth->state) {
  3869. case CMAS_AUTHENTICATED:
  3870. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
  3871. "authenticated: '%*pE' %pM\n",
  3872. priv->essid_len, priv->essid,
  3873. priv->bssid);
  3874. priv->status |= STATUS_AUTH;
  3875. break;
  3876. case CMAS_INIT:
  3877. if (priv->status & STATUS_AUTH) {
  3878. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3879. IPW_DL_ASSOC,
  3880. "authentication failed (0x%04X): %s\n",
  3881. le16_to_cpu(auth->status),
  3882. ipw_get_status_code(le16_to_cpu
  3883. (auth->
  3884. status)));
  3885. }
  3886. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3887. IPW_DL_ASSOC,
  3888. "deauthenticated: '%*pE' %pM\n",
  3889. priv->essid_len, priv->essid,
  3890. priv->bssid);
  3891. priv->status &= ~(STATUS_ASSOCIATING |
  3892. STATUS_AUTH |
  3893. STATUS_ASSOCIATED);
  3894. schedule_work(&priv->link_down);
  3895. break;
  3896. case CMAS_TX_AUTH_SEQ_1:
  3897. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3898. IPW_DL_ASSOC, "AUTH_SEQ_1\n");
  3899. break;
  3900. case CMAS_RX_AUTH_SEQ_2:
  3901. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3902. IPW_DL_ASSOC, "AUTH_SEQ_2\n");
  3903. break;
  3904. case CMAS_AUTH_SEQ_1_PASS:
  3905. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3906. IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
  3907. break;
  3908. case CMAS_AUTH_SEQ_1_FAIL:
  3909. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3910. IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
  3911. break;
  3912. case CMAS_TX_AUTH_SEQ_3:
  3913. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3914. IPW_DL_ASSOC, "AUTH_SEQ_3\n");
  3915. break;
  3916. case CMAS_RX_AUTH_SEQ_4:
  3917. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3918. IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
  3919. break;
  3920. case CMAS_AUTH_SEQ_2_PASS:
  3921. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3922. IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
  3923. break;
  3924. case CMAS_AUTH_SEQ_2_FAIL:
  3925. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3926. IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
  3927. break;
  3928. case CMAS_TX_ASSOC:
  3929. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3930. IPW_DL_ASSOC, "TX_ASSOC\n");
  3931. break;
  3932. case CMAS_RX_ASSOC_RESP:
  3933. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3934. IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
  3935. break;
  3936. case CMAS_ASSOCIATED:
  3937. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3938. IPW_DL_ASSOC, "ASSOCIATED\n");
  3939. break;
  3940. default:
  3941. IPW_DEBUG_NOTIF("auth: failure - %d\n",
  3942. auth->state);
  3943. break;
  3944. }
  3945. break;
  3946. }
  3947. case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
  3948. struct notif_channel_result *x =
  3949. &notif->u.channel_result;
  3950. if (size == sizeof(*x)) {
  3951. IPW_DEBUG_SCAN("Scan result for channel %d\n",
  3952. x->channel_num);
  3953. } else {
  3954. IPW_DEBUG_SCAN("Scan result of wrong size %d "
  3955. "(should be %zd)\n",
  3956. size, sizeof(*x));
  3957. }
  3958. break;
  3959. }
  3960. case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
  3961. struct notif_scan_complete *x = &notif->u.scan_complete;
  3962. if (size == sizeof(*x)) {
  3963. IPW_DEBUG_SCAN
  3964. ("Scan completed: type %d, %d channels, "
  3965. "%d status\n", x->scan_type,
  3966. x->num_channels, x->status);
  3967. } else {
  3968. IPW_ERROR("Scan completed of wrong size %d "
  3969. "(should be %zd)\n",
  3970. size, sizeof(*x));
  3971. }
  3972. priv->status &=
  3973. ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
  3974. wake_up_interruptible(&priv->wait_state);
  3975. cancel_delayed_work(&priv->scan_check);
  3976. if (priv->status & STATUS_EXIT_PENDING)
  3977. break;
  3978. priv->ieee->scans++;
  3979. #ifdef CONFIG_IPW2200_MONITOR
  3980. if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
  3981. priv->status |= STATUS_SCAN_FORCED;
  3982. schedule_delayed_work(&priv->request_scan, 0);
  3983. break;
  3984. }
  3985. priv->status &= ~STATUS_SCAN_FORCED;
  3986. #endif /* CONFIG_IPW2200_MONITOR */
  3987. /* Do queued direct scans first */
  3988. if (priv->status & STATUS_DIRECT_SCAN_PENDING)
  3989. schedule_delayed_work(&priv->request_direct_scan, 0);
  3990. if (!(priv->status & (STATUS_ASSOCIATED |
  3991. STATUS_ASSOCIATING |
  3992. STATUS_ROAMING |
  3993. STATUS_DISASSOCIATING)))
  3994. schedule_work(&priv->associate);
  3995. else if (priv->status & STATUS_ROAMING) {
  3996. if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
  3997. /* If a scan completed and we are in roam mode, then
  3998. * the scan that completed was the one requested as a
  3999. * result of entering roam... so, schedule the
  4000. * roam work */
  4001. schedule_work(&priv->roam);
  4002. else
  4003. /* Don't schedule if we aborted the scan */
  4004. priv->status &= ~STATUS_ROAMING;
  4005. } else if (priv->status & STATUS_SCAN_PENDING)
  4006. schedule_delayed_work(&priv->request_scan, 0);
  4007. else if (priv->config & CFG_BACKGROUND_SCAN
  4008. && priv->status & STATUS_ASSOCIATED)
  4009. schedule_delayed_work(&priv->request_scan,
  4010. round_jiffies_relative(HZ));
  4011. /* Send an empty event to user space.
  4012. * We don't send the received data on the event because
  4013. * it would require us to do complex transcoding, and
  4014. * we want to minimise the work done in the irq handler
  4015. * Use a request to extract the data.
  4016. * Also, we generate this even for any scan, regardless
  4017. * on how the scan was initiated. User space can just
  4018. * sync on periodic scan to get fresh data...
  4019. * Jean II */
  4020. if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
  4021. handle_scan_event(priv);
  4022. break;
  4023. }
  4024. case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
  4025. struct notif_frag_length *x = &notif->u.frag_len;
  4026. if (size == sizeof(*x))
  4027. IPW_ERROR("Frag length: %d\n",
  4028. le16_to_cpu(x->frag_length));
  4029. else
  4030. IPW_ERROR("Frag length of wrong size %d "
  4031. "(should be %zd)\n",
  4032. size, sizeof(*x));
  4033. break;
  4034. }
  4035. case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
  4036. struct notif_link_deterioration *x =
  4037. &notif->u.link_deterioration;
  4038. if (size == sizeof(*x)) {
  4039. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
  4040. "link deterioration: type %d, cnt %d\n",
  4041. x->silence_notification_type,
  4042. x->silence_count);
  4043. memcpy(&priv->last_link_deterioration, x,
  4044. sizeof(*x));
  4045. } else {
  4046. IPW_ERROR("Link Deterioration of wrong size %d "
  4047. "(should be %zd)\n",
  4048. size, sizeof(*x));
  4049. }
  4050. break;
  4051. }
  4052. case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
  4053. IPW_ERROR("Dino config\n");
  4054. if (priv->hcmd
  4055. && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
  4056. IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
  4057. break;
  4058. }
  4059. case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
  4060. struct notif_beacon_state *x = &notif->u.beacon_state;
  4061. if (size != sizeof(*x)) {
  4062. IPW_ERROR
  4063. ("Beacon state of wrong size %d (should "
  4064. "be %zd)\n", size, sizeof(*x));
  4065. break;
  4066. }
  4067. if (le32_to_cpu(x->state) ==
  4068. HOST_NOTIFICATION_STATUS_BEACON_MISSING)
  4069. ipw_handle_missed_beacon(priv,
  4070. le32_to_cpu(x->
  4071. number));
  4072. break;
  4073. }
  4074. case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
  4075. struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
  4076. if (size == sizeof(*x)) {
  4077. IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
  4078. "0x%02x station %d\n",
  4079. x->key_state, x->security_type,
  4080. x->station_index);
  4081. break;
  4082. }
  4083. IPW_ERROR
  4084. ("TGi Tx Key of wrong size %d (should be %zd)\n",
  4085. size, sizeof(*x));
  4086. break;
  4087. }
  4088. case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
  4089. struct notif_calibration *x = &notif->u.calibration;
  4090. if (size == sizeof(*x)) {
  4091. memcpy(&priv->calib, x, sizeof(*x));
  4092. IPW_DEBUG_INFO("TODO: Calibration\n");
  4093. break;
  4094. }
  4095. IPW_ERROR
  4096. ("Calibration of wrong size %d (should be %zd)\n",
  4097. size, sizeof(*x));
  4098. break;
  4099. }
  4100. case HOST_NOTIFICATION_NOISE_STATS:{
  4101. if (size == sizeof(u32)) {
  4102. priv->exp_avg_noise =
  4103. exponential_average(priv->exp_avg_noise,
  4104. (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
  4105. DEPTH_NOISE);
  4106. break;
  4107. }
  4108. IPW_ERROR
  4109. ("Noise stat is wrong size %d (should be %zd)\n",
  4110. size, sizeof(u32));
  4111. break;
  4112. }
  4113. default:
  4114. IPW_DEBUG_NOTIF("Unknown notification: "
  4115. "subtype=%d,flags=0x%2x,size=%d\n",
  4116. notif->subtype, notif->flags, size);
  4117. }
  4118. }
  4119. /*
  4120. * Destroys all DMA structures and initialise them again
  4121. *
  4122. * @param priv
  4123. * @return error code
  4124. */
  4125. static int ipw_queue_reset(struct ipw_priv *priv)
  4126. {
  4127. int rc = 0;
  4128. /* @todo customize queue sizes */
  4129. int nTx = 64, nTxCmd = 8;
  4130. ipw_tx_queue_free(priv);
  4131. /* Tx CMD queue */
  4132. rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
  4133. IPW_TX_CMD_QUEUE_READ_INDEX,
  4134. IPW_TX_CMD_QUEUE_WRITE_INDEX,
  4135. IPW_TX_CMD_QUEUE_BD_BASE,
  4136. IPW_TX_CMD_QUEUE_BD_SIZE);
  4137. if (rc) {
  4138. IPW_ERROR("Tx Cmd queue init failed\n");
  4139. goto error;
  4140. }
  4141. /* Tx queue(s) */
  4142. rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
  4143. IPW_TX_QUEUE_0_READ_INDEX,
  4144. IPW_TX_QUEUE_0_WRITE_INDEX,
  4145. IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
  4146. if (rc) {
  4147. IPW_ERROR("Tx 0 queue init failed\n");
  4148. goto error;
  4149. }
  4150. rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
  4151. IPW_TX_QUEUE_1_READ_INDEX,
  4152. IPW_TX_QUEUE_1_WRITE_INDEX,
  4153. IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
  4154. if (rc) {
  4155. IPW_ERROR("Tx 1 queue init failed\n");
  4156. goto error;
  4157. }
  4158. rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
  4159. IPW_TX_QUEUE_2_READ_INDEX,
  4160. IPW_TX_QUEUE_2_WRITE_INDEX,
  4161. IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
  4162. if (rc) {
  4163. IPW_ERROR("Tx 2 queue init failed\n");
  4164. goto error;
  4165. }
  4166. rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
  4167. IPW_TX_QUEUE_3_READ_INDEX,
  4168. IPW_TX_QUEUE_3_WRITE_INDEX,
  4169. IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
  4170. if (rc) {
  4171. IPW_ERROR("Tx 3 queue init failed\n");
  4172. goto error;
  4173. }
  4174. /* statistics */
  4175. priv->rx_bufs_min = 0;
  4176. priv->rx_pend_max = 0;
  4177. return rc;
  4178. error:
  4179. ipw_tx_queue_free(priv);
  4180. return rc;
  4181. }
  4182. /*
  4183. * Reclaim Tx queue entries no more used by NIC.
  4184. *
  4185. * When FW advances 'R' index, all entries between old and
  4186. * new 'R' index need to be reclaimed. As result, some free space
  4187. * forms. If there is enough free space (> low mark), wake Tx queue.
  4188. *
  4189. * @note Need to protect against garbage in 'R' index
  4190. * @param priv
  4191. * @param txq
  4192. * @param qindex
  4193. * @return Number of used entries remains in the queue
  4194. */
  4195. static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
  4196. struct clx2_tx_queue *txq, int qindex)
  4197. {
  4198. u32 hw_tail;
  4199. int used;
  4200. struct clx2_queue *q = &txq->q;
  4201. hw_tail = ipw_read32(priv, q->reg_r);
  4202. if (hw_tail >= q->n_bd) {
  4203. IPW_ERROR
  4204. ("Read index for DMA queue (%d) is out of range [0-%d)\n",
  4205. hw_tail, q->n_bd);
  4206. goto done;
  4207. }
  4208. for (; q->last_used != hw_tail;
  4209. q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
  4210. ipw_queue_tx_free_tfd(priv, txq);
  4211. priv->tx_packets++;
  4212. }
  4213. done:
  4214. if ((ipw_tx_queue_space(q) > q->low_mark) &&
  4215. (qindex >= 0))
  4216. netif_wake_queue(priv->net_dev);
  4217. used = q->first_empty - q->last_used;
  4218. if (used < 0)
  4219. used += q->n_bd;
  4220. return used;
  4221. }
  4222. static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, const void *buf,
  4223. int len, int sync)
  4224. {
  4225. struct clx2_tx_queue *txq = &priv->txq_cmd;
  4226. struct clx2_queue *q = &txq->q;
  4227. struct tfd_frame *tfd;
  4228. if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
  4229. IPW_ERROR("No space for Tx\n");
  4230. return -EBUSY;
  4231. }
  4232. tfd = &txq->bd[q->first_empty];
  4233. txq->txb[q->first_empty] = NULL;
  4234. memset(tfd, 0, sizeof(*tfd));
  4235. tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
  4236. tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
  4237. priv->hcmd_seq++;
  4238. tfd->u.cmd.index = hcmd;
  4239. tfd->u.cmd.length = len;
  4240. memcpy(tfd->u.cmd.payload, buf, len);
  4241. q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
  4242. ipw_write32(priv, q->reg_w, q->first_empty);
  4243. _ipw_read32(priv, 0x90);
  4244. return 0;
  4245. }
  4246. /*
  4247. * Rx theory of operation
  4248. *
  4249. * The host allocates 32 DMA target addresses and passes the host address
  4250. * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
  4251. * 0 to 31
  4252. *
  4253. * Rx Queue Indexes
  4254. * The host/firmware share two index registers for managing the Rx buffers.
  4255. *
  4256. * The READ index maps to the first position that the firmware may be writing
  4257. * to -- the driver can read up to (but not including) this position and get
  4258. * good data.
  4259. * The READ index is managed by the firmware once the card is enabled.
  4260. *
  4261. * The WRITE index maps to the last position the driver has read from -- the
  4262. * position preceding WRITE is the last slot the firmware can place a packet.
  4263. *
  4264. * The queue is empty (no good data) if WRITE = READ - 1, and is full if
  4265. * WRITE = READ.
  4266. *
  4267. * During initialization the host sets up the READ queue position to the first
  4268. * INDEX position, and WRITE to the last (READ - 1 wrapped)
  4269. *
  4270. * When the firmware places a packet in a buffer it will advance the READ index
  4271. * and fire the RX interrupt. The driver can then query the READ index and
  4272. * process as many packets as possible, moving the WRITE index forward as it
  4273. * resets the Rx queue buffers with new memory.
  4274. *
  4275. * The management in the driver is as follows:
  4276. * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
  4277. * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
  4278. * to replensish the ipw->rxq->rx_free.
  4279. * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
  4280. * ipw->rxq is replenished and the READ INDEX is updated (updating the
  4281. * 'processed' and 'read' driver indexes as well)
  4282. * + A received packet is processed and handed to the kernel network stack,
  4283. * detached from the ipw->rxq. The driver 'processed' index is updated.
  4284. * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
  4285. * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
  4286. * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
  4287. * were enough free buffers and RX_STALLED is set it is cleared.
  4288. *
  4289. *
  4290. * Driver sequence:
  4291. *
  4292. * ipw_rx_queue_alloc() Allocates rx_free
  4293. * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
  4294. * ipw_rx_queue_restock
  4295. * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
  4296. * queue, updates firmware pointers, and updates
  4297. * the WRITE index. If insufficient rx_free buffers
  4298. * are available, schedules ipw_rx_queue_replenish
  4299. *
  4300. * -- enable interrupts --
  4301. * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
  4302. * READ INDEX, detaching the SKB from the pool.
  4303. * Moves the packet buffer from queue to rx_used.
  4304. * Calls ipw_rx_queue_restock to refill any empty
  4305. * slots.
  4306. * ...
  4307. *
  4308. */
  4309. /*
  4310. * If there are slots in the RX queue that need to be restocked,
  4311. * and we have free pre-allocated buffers, fill the ranks as much
  4312. * as we can pulling from rx_free.
  4313. *
  4314. * This moves the 'write' index forward to catch up with 'processed', and
  4315. * also updates the memory address in the firmware to reference the new
  4316. * target buffer.
  4317. */
  4318. static void ipw_rx_queue_restock(struct ipw_priv *priv)
  4319. {
  4320. struct ipw_rx_queue *rxq = priv->rxq;
  4321. struct list_head *element;
  4322. struct ipw_rx_mem_buffer *rxb;
  4323. unsigned long flags;
  4324. int write;
  4325. spin_lock_irqsave(&rxq->lock, flags);
  4326. write = rxq->write;
  4327. while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
  4328. element = rxq->rx_free.next;
  4329. rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
  4330. list_del(element);
  4331. ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
  4332. rxb->dma_addr);
  4333. rxq->queue[rxq->write] = rxb;
  4334. rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
  4335. rxq->free_count--;
  4336. }
  4337. spin_unlock_irqrestore(&rxq->lock, flags);
  4338. /* If the pre-allocated buffer pool is dropping low, schedule to
  4339. * refill it */
  4340. if (rxq->free_count <= RX_LOW_WATERMARK)
  4341. schedule_work(&priv->rx_replenish);
  4342. /* If we've added more space for the firmware to place data, tell it */
  4343. if (write != rxq->write)
  4344. ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
  4345. }
  4346. /*
  4347. * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
  4348. * Also restock the Rx queue via ipw_rx_queue_restock.
  4349. *
  4350. * This is called as a scheduled work item (except for during initialization)
  4351. */
  4352. static void ipw_rx_queue_replenish(void *data)
  4353. {
  4354. struct ipw_priv *priv = data;
  4355. struct ipw_rx_queue *rxq = priv->rxq;
  4356. struct list_head *element;
  4357. struct ipw_rx_mem_buffer *rxb;
  4358. unsigned long flags;
  4359. spin_lock_irqsave(&rxq->lock, flags);
  4360. while (!list_empty(&rxq->rx_used)) {
  4361. element = rxq->rx_used.next;
  4362. rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
  4363. rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
  4364. if (!rxb->skb) {
  4365. printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
  4366. priv->net_dev->name);
  4367. /* We don't reschedule replenish work here -- we will
  4368. * call the restock method and if it still needs
  4369. * more buffers it will schedule replenish */
  4370. break;
  4371. }
  4372. list_del(element);
  4373. rxb->dma_addr =
  4374. dma_map_single(&priv->pci_dev->dev, rxb->skb->data,
  4375. IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
  4376. list_add_tail(&rxb->list, &rxq->rx_free);
  4377. rxq->free_count++;
  4378. }
  4379. spin_unlock_irqrestore(&rxq->lock, flags);
  4380. ipw_rx_queue_restock(priv);
  4381. }
  4382. static void ipw_bg_rx_queue_replenish(struct work_struct *work)
  4383. {
  4384. struct ipw_priv *priv =
  4385. container_of(work, struct ipw_priv, rx_replenish);
  4386. mutex_lock(&priv->mutex);
  4387. ipw_rx_queue_replenish(priv);
  4388. mutex_unlock(&priv->mutex);
  4389. }
  4390. /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
  4391. * If an SKB has been detached, the POOL needs to have its SKB set to NULL
  4392. * This free routine walks the list of POOL entries and if SKB is set to
  4393. * non NULL it is unmapped and freed
  4394. */
  4395. static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
  4396. {
  4397. int i;
  4398. if (!rxq)
  4399. return;
  4400. for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
  4401. if (rxq->pool[i].skb != NULL) {
  4402. dma_unmap_single(&priv->pci_dev->dev,
  4403. rxq->pool[i].dma_addr,
  4404. IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
  4405. dev_kfree_skb(rxq->pool[i].skb);
  4406. }
  4407. }
  4408. kfree(rxq);
  4409. }
  4410. static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
  4411. {
  4412. struct ipw_rx_queue *rxq;
  4413. int i;
  4414. rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
  4415. if (unlikely(!rxq)) {
  4416. IPW_ERROR("memory allocation failed\n");
  4417. return NULL;
  4418. }
  4419. spin_lock_init(&rxq->lock);
  4420. INIT_LIST_HEAD(&rxq->rx_free);
  4421. INIT_LIST_HEAD(&rxq->rx_used);
  4422. /* Fill the rx_used queue with _all_ of the Rx buffers */
  4423. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
  4424. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  4425. /* Set us so that we have processed and used all buffers, but have
  4426. * not restocked the Rx queue with fresh buffers */
  4427. rxq->read = rxq->write = 0;
  4428. rxq->free_count = 0;
  4429. return rxq;
  4430. }
  4431. static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
  4432. {
  4433. rate &= ~LIBIPW_BASIC_RATE_MASK;
  4434. if (ieee_mode == IEEE_A) {
  4435. switch (rate) {
  4436. case LIBIPW_OFDM_RATE_6MB:
  4437. return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
  4438. 1 : 0;
  4439. case LIBIPW_OFDM_RATE_9MB:
  4440. return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
  4441. 1 : 0;
  4442. case LIBIPW_OFDM_RATE_12MB:
  4443. return priv->
  4444. rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
  4445. case LIBIPW_OFDM_RATE_18MB:
  4446. return priv->
  4447. rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
  4448. case LIBIPW_OFDM_RATE_24MB:
  4449. return priv->
  4450. rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
  4451. case LIBIPW_OFDM_RATE_36MB:
  4452. return priv->
  4453. rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
  4454. case LIBIPW_OFDM_RATE_48MB:
  4455. return priv->
  4456. rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
  4457. case LIBIPW_OFDM_RATE_54MB:
  4458. return priv->
  4459. rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
  4460. default:
  4461. return 0;
  4462. }
  4463. }
  4464. /* B and G mixed */
  4465. switch (rate) {
  4466. case LIBIPW_CCK_RATE_1MB:
  4467. return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
  4468. case LIBIPW_CCK_RATE_2MB:
  4469. return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
  4470. case LIBIPW_CCK_RATE_5MB:
  4471. return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
  4472. case LIBIPW_CCK_RATE_11MB:
  4473. return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
  4474. }
  4475. /* If we are limited to B modulations, bail at this point */
  4476. if (ieee_mode == IEEE_B)
  4477. return 0;
  4478. /* G */
  4479. switch (rate) {
  4480. case LIBIPW_OFDM_RATE_6MB:
  4481. return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
  4482. case LIBIPW_OFDM_RATE_9MB:
  4483. return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
  4484. case LIBIPW_OFDM_RATE_12MB:
  4485. return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
  4486. case LIBIPW_OFDM_RATE_18MB:
  4487. return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
  4488. case LIBIPW_OFDM_RATE_24MB:
  4489. return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
  4490. case LIBIPW_OFDM_RATE_36MB:
  4491. return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
  4492. case LIBIPW_OFDM_RATE_48MB:
  4493. return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
  4494. case LIBIPW_OFDM_RATE_54MB:
  4495. return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
  4496. }
  4497. return 0;
  4498. }
  4499. static int ipw_compatible_rates(struct ipw_priv *priv,
  4500. const struct libipw_network *network,
  4501. struct ipw_supported_rates *rates)
  4502. {
  4503. int num_rates, i;
  4504. memset(rates, 0, sizeof(*rates));
  4505. num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
  4506. rates->num_rates = 0;
  4507. for (i = 0; i < num_rates; i++) {
  4508. if (!ipw_is_rate_in_mask(priv, network->mode,
  4509. network->rates[i])) {
  4510. if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
  4511. IPW_DEBUG_SCAN("Adding masked mandatory "
  4512. "rate %02X\n",
  4513. network->rates[i]);
  4514. rates->supported_rates[rates->num_rates++] =
  4515. network->rates[i];
  4516. continue;
  4517. }
  4518. IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
  4519. network->rates[i], priv->rates_mask);
  4520. continue;
  4521. }
  4522. rates->supported_rates[rates->num_rates++] = network->rates[i];
  4523. }
  4524. num_rates = min(network->rates_ex_len,
  4525. (u8) (IPW_MAX_RATES - num_rates));
  4526. for (i = 0; i < num_rates; i++) {
  4527. if (!ipw_is_rate_in_mask(priv, network->mode,
  4528. network->rates_ex[i])) {
  4529. if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
  4530. IPW_DEBUG_SCAN("Adding masked mandatory "
  4531. "rate %02X\n",
  4532. network->rates_ex[i]);
  4533. rates->supported_rates[rates->num_rates++] =
  4534. network->rates[i];
  4535. continue;
  4536. }
  4537. IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
  4538. network->rates_ex[i], priv->rates_mask);
  4539. continue;
  4540. }
  4541. rates->supported_rates[rates->num_rates++] =
  4542. network->rates_ex[i];
  4543. }
  4544. return 1;
  4545. }
  4546. static void ipw_copy_rates(struct ipw_supported_rates *dest,
  4547. const struct ipw_supported_rates *src)
  4548. {
  4549. u8 i;
  4550. for (i = 0; i < src->num_rates; i++)
  4551. dest->supported_rates[i] = src->supported_rates[i];
  4552. dest->num_rates = src->num_rates;
  4553. }
  4554. /* TODO: Look at sniffed packets in the air to determine if the basic rate
  4555. * mask should ever be used -- right now all callers to add the scan rates are
  4556. * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
  4557. static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
  4558. u8 modulation, u32 rate_mask)
  4559. {
  4560. u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
  4561. LIBIPW_BASIC_RATE_MASK : 0;
  4562. if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
  4563. rates->supported_rates[rates->num_rates++] =
  4564. LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
  4565. if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
  4566. rates->supported_rates[rates->num_rates++] =
  4567. LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
  4568. if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
  4569. rates->supported_rates[rates->num_rates++] = basic_mask |
  4570. LIBIPW_CCK_RATE_5MB;
  4571. if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
  4572. rates->supported_rates[rates->num_rates++] = basic_mask |
  4573. LIBIPW_CCK_RATE_11MB;
  4574. }
  4575. static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
  4576. u8 modulation, u32 rate_mask)
  4577. {
  4578. u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
  4579. LIBIPW_BASIC_RATE_MASK : 0;
  4580. if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
  4581. rates->supported_rates[rates->num_rates++] = basic_mask |
  4582. LIBIPW_OFDM_RATE_6MB;
  4583. if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
  4584. rates->supported_rates[rates->num_rates++] =
  4585. LIBIPW_OFDM_RATE_9MB;
  4586. if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
  4587. rates->supported_rates[rates->num_rates++] = basic_mask |
  4588. LIBIPW_OFDM_RATE_12MB;
  4589. if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
  4590. rates->supported_rates[rates->num_rates++] =
  4591. LIBIPW_OFDM_RATE_18MB;
  4592. if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
  4593. rates->supported_rates[rates->num_rates++] = basic_mask |
  4594. LIBIPW_OFDM_RATE_24MB;
  4595. if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
  4596. rates->supported_rates[rates->num_rates++] =
  4597. LIBIPW_OFDM_RATE_36MB;
  4598. if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
  4599. rates->supported_rates[rates->num_rates++] =
  4600. LIBIPW_OFDM_RATE_48MB;
  4601. if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
  4602. rates->supported_rates[rates->num_rates++] =
  4603. LIBIPW_OFDM_RATE_54MB;
  4604. }
  4605. struct ipw_network_match {
  4606. struct libipw_network *network;
  4607. struct ipw_supported_rates rates;
  4608. };
  4609. static int ipw_find_adhoc_network(struct ipw_priv *priv,
  4610. struct ipw_network_match *match,
  4611. struct libipw_network *network,
  4612. int roaming)
  4613. {
  4614. struct ipw_supported_rates rates;
  4615. /* Verify that this network's capability is compatible with the
  4616. * current mode (AdHoc or Infrastructure) */
  4617. if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
  4618. !(network->capability & WLAN_CAPABILITY_IBSS))) {
  4619. IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
  4620. network->ssid_len, network->ssid,
  4621. network->bssid);
  4622. return 0;
  4623. }
  4624. if (unlikely(roaming)) {
  4625. /* If we are roaming, then ensure check if this is a valid
  4626. * network to try and roam to */
  4627. if ((network->ssid_len != match->network->ssid_len) ||
  4628. memcmp(network->ssid, match->network->ssid,
  4629. network->ssid_len)) {
  4630. IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
  4631. network->ssid_len, network->ssid,
  4632. network->bssid);
  4633. return 0;
  4634. }
  4635. } else {
  4636. /* If an ESSID has been configured then compare the broadcast
  4637. * ESSID to ours */
  4638. if ((priv->config & CFG_STATIC_ESSID) &&
  4639. ((network->ssid_len != priv->essid_len) ||
  4640. memcmp(network->ssid, priv->essid,
  4641. min(network->ssid_len, priv->essid_len)))) {
  4642. IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
  4643. network->ssid_len, network->ssid,
  4644. network->bssid, priv->essid_len,
  4645. priv->essid);
  4646. return 0;
  4647. }
  4648. }
  4649. /* If the old network rate is better than this one, don't bother
  4650. * testing everything else. */
  4651. if (network->time_stamp[0] < match->network->time_stamp[0]) {
  4652. IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
  4653. match->network->ssid_len, match->network->ssid);
  4654. return 0;
  4655. } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
  4656. IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
  4657. match->network->ssid_len, match->network->ssid);
  4658. return 0;
  4659. }
  4660. /* Now go through and see if the requested network is valid... */
  4661. if (priv->ieee->scan_age != 0 &&
  4662. time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
  4663. IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of age: %ums.\n",
  4664. network->ssid_len, network->ssid,
  4665. network->bssid,
  4666. jiffies_to_msecs(jiffies -
  4667. network->last_scanned));
  4668. return 0;
  4669. }
  4670. if ((priv->config & CFG_STATIC_CHANNEL) &&
  4671. (network->channel != priv->channel)) {
  4672. IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
  4673. network->ssid_len, network->ssid,
  4674. network->bssid,
  4675. network->channel, priv->channel);
  4676. return 0;
  4677. }
  4678. /* Verify privacy compatibility */
  4679. if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
  4680. ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
  4681. IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
  4682. network->ssid_len, network->ssid,
  4683. network->bssid,
  4684. priv->
  4685. capability & CAP_PRIVACY_ON ? "on" : "off",
  4686. network->
  4687. capability & WLAN_CAPABILITY_PRIVACY ? "on" :
  4688. "off");
  4689. return 0;
  4690. }
  4691. if (ether_addr_equal(network->bssid, priv->bssid)) {
  4692. IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of the same BSSID match: %pM.\n",
  4693. network->ssid_len, network->ssid,
  4694. network->bssid, priv->bssid);
  4695. return 0;
  4696. }
  4697. /* Filter out any incompatible freq / mode combinations */
  4698. if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
  4699. IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
  4700. network->ssid_len, network->ssid,
  4701. network->bssid);
  4702. return 0;
  4703. }
  4704. /* Ensure that the rates supported by the driver are compatible with
  4705. * this AP, including verification of basic rates (mandatory) */
  4706. if (!ipw_compatible_rates(priv, network, &rates)) {
  4707. IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
  4708. network->ssid_len, network->ssid,
  4709. network->bssid);
  4710. return 0;
  4711. }
  4712. if (rates.num_rates == 0) {
  4713. IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
  4714. network->ssid_len, network->ssid,
  4715. network->bssid);
  4716. return 0;
  4717. }
  4718. /* TODO: Perform any further minimal comparititive tests. We do not
  4719. * want to put too much policy logic here; intelligent scan selection
  4720. * should occur within a generic IEEE 802.11 user space tool. */
  4721. /* Set up 'new' AP to this network */
  4722. ipw_copy_rates(&match->rates, &rates);
  4723. match->network = network;
  4724. IPW_DEBUG_MERGE("Network '%*pE (%pM)' is a viable match.\n",
  4725. network->ssid_len, network->ssid, network->bssid);
  4726. return 1;
  4727. }
  4728. static void ipw_merge_adhoc_network(struct work_struct *work)
  4729. {
  4730. struct ipw_priv *priv =
  4731. container_of(work, struct ipw_priv, merge_networks);
  4732. struct libipw_network *network = NULL;
  4733. struct ipw_network_match match = {
  4734. .network = priv->assoc_network
  4735. };
  4736. if ((priv->status & STATUS_ASSOCIATED) &&
  4737. (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
  4738. /* First pass through ROAM process -- look for a better
  4739. * network */
  4740. unsigned long flags;
  4741. spin_lock_irqsave(&priv->ieee->lock, flags);
  4742. list_for_each_entry(network, &priv->ieee->network_list, list) {
  4743. if (network != priv->assoc_network)
  4744. ipw_find_adhoc_network(priv, &match, network,
  4745. 1);
  4746. }
  4747. spin_unlock_irqrestore(&priv->ieee->lock, flags);
  4748. if (match.network == priv->assoc_network) {
  4749. IPW_DEBUG_MERGE("No better ADHOC in this network to "
  4750. "merge to.\n");
  4751. return;
  4752. }
  4753. mutex_lock(&priv->mutex);
  4754. if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
  4755. IPW_DEBUG_MERGE("remove network %*pE\n",
  4756. priv->essid_len, priv->essid);
  4757. ipw_remove_current_network(priv);
  4758. }
  4759. ipw_disassociate(priv);
  4760. priv->assoc_network = match.network;
  4761. mutex_unlock(&priv->mutex);
  4762. return;
  4763. }
  4764. }
  4765. static int ipw_best_network(struct ipw_priv *priv,
  4766. struct ipw_network_match *match,
  4767. struct libipw_network *network, int roaming)
  4768. {
  4769. struct ipw_supported_rates rates;
  4770. /* Verify that this network's capability is compatible with the
  4771. * current mode (AdHoc or Infrastructure) */
  4772. if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
  4773. !(network->capability & WLAN_CAPABILITY_ESS)) ||
  4774. (priv->ieee->iw_mode == IW_MODE_ADHOC &&
  4775. !(network->capability & WLAN_CAPABILITY_IBSS))) {
  4776. IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
  4777. network->ssid_len, network->ssid,
  4778. network->bssid);
  4779. return 0;
  4780. }
  4781. if (unlikely(roaming)) {
  4782. /* If we are roaming, then ensure check if this is a valid
  4783. * network to try and roam to */
  4784. if ((network->ssid_len != match->network->ssid_len) ||
  4785. memcmp(network->ssid, match->network->ssid,
  4786. network->ssid_len)) {
  4787. IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
  4788. network->ssid_len, network->ssid,
  4789. network->bssid);
  4790. return 0;
  4791. }
  4792. } else {
  4793. /* If an ESSID has been configured then compare the broadcast
  4794. * ESSID to ours */
  4795. if ((priv->config & CFG_STATIC_ESSID) &&
  4796. ((network->ssid_len != priv->essid_len) ||
  4797. memcmp(network->ssid, priv->essid,
  4798. min(network->ssid_len, priv->essid_len)))) {
  4799. IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
  4800. network->ssid_len, network->ssid,
  4801. network->bssid, priv->essid_len,
  4802. priv->essid);
  4803. return 0;
  4804. }
  4805. }
  4806. /* If the old network rate is better than this one, don't bother
  4807. * testing everything else. */
  4808. if (match->network && match->network->stats.rssi > network->stats.rssi) {
  4809. IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because '%*pE (%pM)' has a stronger signal.\n",
  4810. network->ssid_len, network->ssid,
  4811. network->bssid, match->network->ssid_len,
  4812. match->network->ssid, match->network->bssid);
  4813. return 0;
  4814. }
  4815. /* If this network has already had an association attempt within the
  4816. * last 3 seconds, do not try and associate again... */
  4817. if (network->last_associate &&
  4818. time_after(network->last_associate + (HZ * 3UL), jiffies)) {
  4819. IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of storming (%ums since last assoc attempt).\n",
  4820. network->ssid_len, network->ssid,
  4821. network->bssid,
  4822. jiffies_to_msecs(jiffies -
  4823. network->last_associate));
  4824. return 0;
  4825. }
  4826. /* Now go through and see if the requested network is valid... */
  4827. if (priv->ieee->scan_age != 0 &&
  4828. time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
  4829. IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of age: %ums.\n",
  4830. network->ssid_len, network->ssid,
  4831. network->bssid,
  4832. jiffies_to_msecs(jiffies -
  4833. network->last_scanned));
  4834. return 0;
  4835. }
  4836. if ((priv->config & CFG_STATIC_CHANNEL) &&
  4837. (network->channel != priv->channel)) {
  4838. IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
  4839. network->ssid_len, network->ssid,
  4840. network->bssid,
  4841. network->channel, priv->channel);
  4842. return 0;
  4843. }
  4844. /* Verify privacy compatibility */
  4845. if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
  4846. ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
  4847. IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
  4848. network->ssid_len, network->ssid,
  4849. network->bssid,
  4850. priv->capability & CAP_PRIVACY_ON ? "on" :
  4851. "off",
  4852. network->capability &
  4853. WLAN_CAPABILITY_PRIVACY ? "on" : "off");
  4854. return 0;
  4855. }
  4856. if ((priv->config & CFG_STATIC_BSSID) &&
  4857. !ether_addr_equal(network->bssid, priv->bssid)) {
  4858. IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of BSSID mismatch: %pM.\n",
  4859. network->ssid_len, network->ssid,
  4860. network->bssid, priv->bssid);
  4861. return 0;
  4862. }
  4863. /* Filter out any incompatible freq / mode combinations */
  4864. if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
  4865. IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
  4866. network->ssid_len, network->ssid,
  4867. network->bssid);
  4868. return 0;
  4869. }
  4870. /* Filter out invalid channel in current GEO */
  4871. if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
  4872. IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid channel in current GEO\n",
  4873. network->ssid_len, network->ssid,
  4874. network->bssid);
  4875. return 0;
  4876. }
  4877. /* Ensure that the rates supported by the driver are compatible with
  4878. * this AP, including verification of basic rates (mandatory) */
  4879. if (!ipw_compatible_rates(priv, network, &rates)) {
  4880. IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
  4881. network->ssid_len, network->ssid,
  4882. network->bssid);
  4883. return 0;
  4884. }
  4885. if (rates.num_rates == 0) {
  4886. IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
  4887. network->ssid_len, network->ssid,
  4888. network->bssid);
  4889. return 0;
  4890. }
  4891. /* TODO: Perform any further minimal comparititive tests. We do not
  4892. * want to put too much policy logic here; intelligent scan selection
  4893. * should occur within a generic IEEE 802.11 user space tool. */
  4894. /* Set up 'new' AP to this network */
  4895. ipw_copy_rates(&match->rates, &rates);
  4896. match->network = network;
  4897. IPW_DEBUG_ASSOC("Network '%*pE (%pM)' is a viable match.\n",
  4898. network->ssid_len, network->ssid, network->bssid);
  4899. return 1;
  4900. }
  4901. static void ipw_adhoc_create(struct ipw_priv *priv,
  4902. struct libipw_network *network)
  4903. {
  4904. const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
  4905. int i;
  4906. /*
  4907. * For the purposes of scanning, we can set our wireless mode
  4908. * to trigger scans across combinations of bands, but when it
  4909. * comes to creating a new ad-hoc network, we have tell the FW
  4910. * exactly which band to use.
  4911. *
  4912. * We also have the possibility of an invalid channel for the
  4913. * chossen band. Attempting to create a new ad-hoc network
  4914. * with an invalid channel for wireless mode will trigger a
  4915. * FW fatal error.
  4916. *
  4917. */
  4918. switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
  4919. case LIBIPW_52GHZ_BAND:
  4920. network->mode = IEEE_A;
  4921. i = libipw_channel_to_index(priv->ieee, priv->channel);
  4922. BUG_ON(i == -1);
  4923. if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
  4924. IPW_WARNING("Overriding invalid channel\n");
  4925. priv->channel = geo->a[0].channel;
  4926. }
  4927. break;
  4928. case LIBIPW_24GHZ_BAND:
  4929. if (priv->ieee->mode & IEEE_G)
  4930. network->mode = IEEE_G;
  4931. else
  4932. network->mode = IEEE_B;
  4933. i = libipw_channel_to_index(priv->ieee, priv->channel);
  4934. BUG_ON(i == -1);
  4935. if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
  4936. IPW_WARNING("Overriding invalid channel\n");
  4937. priv->channel = geo->bg[0].channel;
  4938. }
  4939. break;
  4940. default:
  4941. IPW_WARNING("Overriding invalid channel\n");
  4942. if (priv->ieee->mode & IEEE_A) {
  4943. network->mode = IEEE_A;
  4944. priv->channel = geo->a[0].channel;
  4945. } else if (priv->ieee->mode & IEEE_G) {
  4946. network->mode = IEEE_G;
  4947. priv->channel = geo->bg[0].channel;
  4948. } else {
  4949. network->mode = IEEE_B;
  4950. priv->channel = geo->bg[0].channel;
  4951. }
  4952. break;
  4953. }
  4954. network->channel = priv->channel;
  4955. priv->config |= CFG_ADHOC_PERSIST;
  4956. ipw_create_bssid(priv, network->bssid);
  4957. network->ssid_len = priv->essid_len;
  4958. memcpy(network->ssid, priv->essid, priv->essid_len);
  4959. memset(&network->stats, 0, sizeof(network->stats));
  4960. network->capability = WLAN_CAPABILITY_IBSS;
  4961. if (!(priv->config & CFG_PREAMBLE_LONG))
  4962. network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
  4963. if (priv->capability & CAP_PRIVACY_ON)
  4964. network->capability |= WLAN_CAPABILITY_PRIVACY;
  4965. network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
  4966. memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
  4967. network->rates_ex_len = priv->rates.num_rates - network->rates_len;
  4968. memcpy(network->rates_ex,
  4969. &priv->rates.supported_rates[network->rates_len],
  4970. network->rates_ex_len);
  4971. network->last_scanned = 0;
  4972. network->flags = 0;
  4973. network->last_associate = 0;
  4974. network->time_stamp[0] = 0;
  4975. network->time_stamp[1] = 0;
  4976. network->beacon_interval = 100; /* Default */
  4977. network->listen_interval = 10; /* Default */
  4978. network->atim_window = 0; /* Default */
  4979. network->wpa_ie_len = 0;
  4980. network->rsn_ie_len = 0;
  4981. }
  4982. static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
  4983. {
  4984. struct ipw_tgi_tx_key key;
  4985. if (!(priv->ieee->sec.flags & (1 << index)))
  4986. return;
  4987. key.key_id = index;
  4988. memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
  4989. key.security_type = type;
  4990. key.station_index = 0; /* always 0 for BSS */
  4991. key.flags = 0;
  4992. /* 0 for new key; previous value of counter (after fatal error) */
  4993. key.tx_counter[0] = cpu_to_le32(0);
  4994. key.tx_counter[1] = cpu_to_le32(0);
  4995. ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
  4996. }
  4997. static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
  4998. {
  4999. struct ipw_wep_key key;
  5000. int i;
  5001. key.cmd_id = DINO_CMD_WEP_KEY;
  5002. key.seq_num = 0;
  5003. /* Note: AES keys cannot be set for multiple times.
  5004. * Only set it at the first time. */
  5005. for (i = 0; i < 4; i++) {
  5006. key.key_index = i | type;
  5007. if (!(priv->ieee->sec.flags & (1 << i))) {
  5008. key.key_size = 0;
  5009. continue;
  5010. }
  5011. key.key_size = priv->ieee->sec.key_sizes[i];
  5012. memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
  5013. ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
  5014. }
  5015. }
  5016. static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
  5017. {
  5018. if (priv->ieee->host_encrypt)
  5019. return;
  5020. switch (level) {
  5021. case SEC_LEVEL_3:
  5022. priv->sys_config.disable_unicast_decryption = 0;
  5023. priv->ieee->host_decrypt = 0;
  5024. break;
  5025. case SEC_LEVEL_2:
  5026. priv->sys_config.disable_unicast_decryption = 1;
  5027. priv->ieee->host_decrypt = 1;
  5028. break;
  5029. case SEC_LEVEL_1:
  5030. priv->sys_config.disable_unicast_decryption = 0;
  5031. priv->ieee->host_decrypt = 0;
  5032. break;
  5033. case SEC_LEVEL_0:
  5034. priv->sys_config.disable_unicast_decryption = 1;
  5035. break;
  5036. default:
  5037. break;
  5038. }
  5039. }
  5040. static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
  5041. {
  5042. if (priv->ieee->host_encrypt)
  5043. return;
  5044. switch (level) {
  5045. case SEC_LEVEL_3:
  5046. priv->sys_config.disable_multicast_decryption = 0;
  5047. break;
  5048. case SEC_LEVEL_2:
  5049. priv->sys_config.disable_multicast_decryption = 1;
  5050. break;
  5051. case SEC_LEVEL_1:
  5052. priv->sys_config.disable_multicast_decryption = 0;
  5053. break;
  5054. case SEC_LEVEL_0:
  5055. priv->sys_config.disable_multicast_decryption = 1;
  5056. break;
  5057. default:
  5058. break;
  5059. }
  5060. }
  5061. static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
  5062. {
  5063. switch (priv->ieee->sec.level) {
  5064. case SEC_LEVEL_3:
  5065. if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
  5066. ipw_send_tgi_tx_key(priv,
  5067. DCT_FLAG_EXT_SECURITY_CCM,
  5068. priv->ieee->sec.active_key);
  5069. if (!priv->ieee->host_mc_decrypt)
  5070. ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
  5071. break;
  5072. case SEC_LEVEL_2:
  5073. if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
  5074. ipw_send_tgi_tx_key(priv,
  5075. DCT_FLAG_EXT_SECURITY_TKIP,
  5076. priv->ieee->sec.active_key);
  5077. break;
  5078. case SEC_LEVEL_1:
  5079. ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
  5080. ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
  5081. ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
  5082. break;
  5083. case SEC_LEVEL_0:
  5084. default:
  5085. break;
  5086. }
  5087. }
  5088. static void ipw_adhoc_check(void *data)
  5089. {
  5090. struct ipw_priv *priv = data;
  5091. if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
  5092. !(priv->config & CFG_ADHOC_PERSIST)) {
  5093. IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
  5094. IPW_DL_STATE | IPW_DL_ASSOC,
  5095. "Missed beacon: %d - disassociate\n",
  5096. priv->missed_adhoc_beacons);
  5097. ipw_remove_current_network(priv);
  5098. ipw_disassociate(priv);
  5099. return;
  5100. }
  5101. schedule_delayed_work(&priv->adhoc_check,
  5102. le16_to_cpu(priv->assoc_request.beacon_interval));
  5103. }
  5104. static void ipw_bg_adhoc_check(struct work_struct *work)
  5105. {
  5106. struct ipw_priv *priv =
  5107. container_of(work, struct ipw_priv, adhoc_check.work);
  5108. mutex_lock(&priv->mutex);
  5109. ipw_adhoc_check(priv);
  5110. mutex_unlock(&priv->mutex);
  5111. }
  5112. static void ipw_debug_config(struct ipw_priv *priv)
  5113. {
  5114. IPW_DEBUG_INFO("Scan completed, no valid APs matched "
  5115. "[CFG 0x%08X]\n", priv->config);
  5116. if (priv->config & CFG_STATIC_CHANNEL)
  5117. IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
  5118. else
  5119. IPW_DEBUG_INFO("Channel unlocked.\n");
  5120. if (priv->config & CFG_STATIC_ESSID)
  5121. IPW_DEBUG_INFO("ESSID locked to '%*pE'\n",
  5122. priv->essid_len, priv->essid);
  5123. else
  5124. IPW_DEBUG_INFO("ESSID unlocked.\n");
  5125. if (priv->config & CFG_STATIC_BSSID)
  5126. IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
  5127. else
  5128. IPW_DEBUG_INFO("BSSID unlocked.\n");
  5129. if (priv->capability & CAP_PRIVACY_ON)
  5130. IPW_DEBUG_INFO("PRIVACY on\n");
  5131. else
  5132. IPW_DEBUG_INFO("PRIVACY off\n");
  5133. IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
  5134. }
  5135. static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
  5136. {
  5137. /* TODO: Verify that this works... */
  5138. struct ipw_fixed_rate fr;
  5139. u32 reg;
  5140. u16 mask = 0;
  5141. u16 new_tx_rates = priv->rates_mask;
  5142. /* Identify 'current FW band' and match it with the fixed
  5143. * Tx rates */
  5144. switch (priv->ieee->freq_band) {
  5145. case LIBIPW_52GHZ_BAND: /* A only */
  5146. /* IEEE_A */
  5147. if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
  5148. /* Invalid fixed rate mask */
  5149. IPW_DEBUG_WX
  5150. ("invalid fixed rate mask in ipw_set_fixed_rate\n");
  5151. new_tx_rates = 0;
  5152. break;
  5153. }
  5154. new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
  5155. break;
  5156. default: /* 2.4Ghz or Mixed */
  5157. /* IEEE_B */
  5158. if (mode == IEEE_B) {
  5159. if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
  5160. /* Invalid fixed rate mask */
  5161. IPW_DEBUG_WX
  5162. ("invalid fixed rate mask in ipw_set_fixed_rate\n");
  5163. new_tx_rates = 0;
  5164. }
  5165. break;
  5166. }
  5167. /* IEEE_G */
  5168. if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
  5169. LIBIPW_OFDM_RATES_MASK)) {
  5170. /* Invalid fixed rate mask */
  5171. IPW_DEBUG_WX
  5172. ("invalid fixed rate mask in ipw_set_fixed_rate\n");
  5173. new_tx_rates = 0;
  5174. break;
  5175. }
  5176. if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
  5177. mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
  5178. new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
  5179. }
  5180. if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
  5181. mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
  5182. new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
  5183. }
  5184. if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
  5185. mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
  5186. new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
  5187. }
  5188. new_tx_rates |= mask;
  5189. break;
  5190. }
  5191. fr.tx_rates = cpu_to_le16(new_tx_rates);
  5192. reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
  5193. ipw_write_reg32(priv, reg, *(u32 *) & fr);
  5194. }
  5195. static void ipw_abort_scan(struct ipw_priv *priv)
  5196. {
  5197. int err;
  5198. if (priv->status & STATUS_SCAN_ABORTING) {
  5199. IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
  5200. return;
  5201. }
  5202. priv->status |= STATUS_SCAN_ABORTING;
  5203. err = ipw_send_scan_abort(priv);
  5204. if (err)
  5205. IPW_DEBUG_HC("Request to abort scan failed.\n");
  5206. }
  5207. static void ipw_add_scan_channels(struct ipw_priv *priv,
  5208. struct ipw_scan_request_ext *scan,
  5209. int scan_type)
  5210. {
  5211. int channel_index = 0;
  5212. const struct libipw_geo *geo;
  5213. int i;
  5214. geo = libipw_get_geo(priv->ieee);
  5215. if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
  5216. int start = channel_index;
  5217. for (i = 0; i < geo->a_channels; i++) {
  5218. if ((priv->status & STATUS_ASSOCIATED) &&
  5219. geo->a[i].channel == priv->channel)
  5220. continue;
  5221. channel_index++;
  5222. scan->channels_list[channel_index] = geo->a[i].channel;
  5223. ipw_set_scan_type(scan, channel_index,
  5224. geo->a[i].
  5225. flags & LIBIPW_CH_PASSIVE_ONLY ?
  5226. IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
  5227. scan_type);
  5228. }
  5229. if (start != channel_index) {
  5230. scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
  5231. (channel_index - start);
  5232. channel_index++;
  5233. }
  5234. }
  5235. if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
  5236. int start = channel_index;
  5237. if (priv->config & CFG_SPEED_SCAN) {
  5238. int index;
  5239. u8 channels[LIBIPW_24GHZ_CHANNELS] = {
  5240. /* nop out the list */
  5241. [0] = 0
  5242. };
  5243. u8 channel;
  5244. while (channel_index < IPW_SCAN_CHANNELS - 1) {
  5245. channel =
  5246. priv->speed_scan[priv->speed_scan_pos];
  5247. if (channel == 0) {
  5248. priv->speed_scan_pos = 0;
  5249. channel = priv->speed_scan[0];
  5250. }
  5251. if ((priv->status & STATUS_ASSOCIATED) &&
  5252. channel == priv->channel) {
  5253. priv->speed_scan_pos++;
  5254. continue;
  5255. }
  5256. /* If this channel has already been
  5257. * added in scan, break from loop
  5258. * and this will be the first channel
  5259. * in the next scan.
  5260. */
  5261. if (channels[channel - 1] != 0)
  5262. break;
  5263. channels[channel - 1] = 1;
  5264. priv->speed_scan_pos++;
  5265. channel_index++;
  5266. scan->channels_list[channel_index] = channel;
  5267. index =
  5268. libipw_channel_to_index(priv->ieee, channel);
  5269. ipw_set_scan_type(scan, channel_index,
  5270. geo->bg[index].
  5271. flags &
  5272. LIBIPW_CH_PASSIVE_ONLY ?
  5273. IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
  5274. : scan_type);
  5275. }
  5276. } else {
  5277. for (i = 0; i < geo->bg_channels; i++) {
  5278. if ((priv->status & STATUS_ASSOCIATED) &&
  5279. geo->bg[i].channel == priv->channel)
  5280. continue;
  5281. channel_index++;
  5282. scan->channels_list[channel_index] =
  5283. geo->bg[i].channel;
  5284. ipw_set_scan_type(scan, channel_index,
  5285. geo->bg[i].
  5286. flags &
  5287. LIBIPW_CH_PASSIVE_ONLY ?
  5288. IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
  5289. : scan_type);
  5290. }
  5291. }
  5292. if (start != channel_index) {
  5293. scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
  5294. (channel_index - start);
  5295. }
  5296. }
  5297. }
  5298. static int ipw_passive_dwell_time(struct ipw_priv *priv)
  5299. {
  5300. /* staying on passive channels longer than the DTIM interval during a
  5301. * scan, while associated, causes the firmware to cancel the scan
  5302. * without notification. Hence, don't stay on passive channels longer
  5303. * than the beacon interval.
  5304. */
  5305. if (priv->status & STATUS_ASSOCIATED
  5306. && priv->assoc_network->beacon_interval > 10)
  5307. return priv->assoc_network->beacon_interval - 10;
  5308. else
  5309. return 120;
  5310. }
  5311. static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
  5312. {
  5313. struct ipw_scan_request_ext scan;
  5314. int err = 0, scan_type;
  5315. if (!(priv->status & STATUS_INIT) ||
  5316. (priv->status & STATUS_EXIT_PENDING))
  5317. return 0;
  5318. mutex_lock(&priv->mutex);
  5319. if (direct && (priv->direct_scan_ssid_len == 0)) {
  5320. IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
  5321. priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
  5322. goto done;
  5323. }
  5324. if (priv->status & STATUS_SCANNING) {
  5325. IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
  5326. priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
  5327. STATUS_SCAN_PENDING;
  5328. goto done;
  5329. }
  5330. if (!(priv->status & STATUS_SCAN_FORCED) &&
  5331. priv->status & STATUS_SCAN_ABORTING) {
  5332. IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
  5333. priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
  5334. STATUS_SCAN_PENDING;
  5335. goto done;
  5336. }
  5337. if (priv->status & STATUS_RF_KILL_MASK) {
  5338. IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
  5339. priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
  5340. STATUS_SCAN_PENDING;
  5341. goto done;
  5342. }
  5343. memset(&scan, 0, sizeof(scan));
  5344. scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
  5345. if (type == IW_SCAN_TYPE_PASSIVE) {
  5346. IPW_DEBUG_WX("use passive scanning\n");
  5347. scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
  5348. scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
  5349. cpu_to_le16(ipw_passive_dwell_time(priv));
  5350. ipw_add_scan_channels(priv, &scan, scan_type);
  5351. goto send_request;
  5352. }
  5353. /* Use active scan by default. */
  5354. if (priv->config & CFG_SPEED_SCAN)
  5355. scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
  5356. cpu_to_le16(30);
  5357. else
  5358. scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
  5359. cpu_to_le16(20);
  5360. scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
  5361. cpu_to_le16(20);
  5362. scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
  5363. cpu_to_le16(ipw_passive_dwell_time(priv));
  5364. scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
  5365. #ifdef CONFIG_IPW2200_MONITOR
  5366. if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
  5367. u8 channel;
  5368. u8 band = 0;
  5369. switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
  5370. case LIBIPW_52GHZ_BAND:
  5371. band = (u8) (IPW_A_MODE << 6) | 1;
  5372. channel = priv->channel;
  5373. break;
  5374. case LIBIPW_24GHZ_BAND:
  5375. band = (u8) (IPW_B_MODE << 6) | 1;
  5376. channel = priv->channel;
  5377. break;
  5378. default:
  5379. band = (u8) (IPW_B_MODE << 6) | 1;
  5380. channel = 9;
  5381. break;
  5382. }
  5383. scan.channels_list[0] = band;
  5384. scan.channels_list[1] = channel;
  5385. ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
  5386. /* NOTE: The card will sit on this channel for this time
  5387. * period. Scan aborts are timing sensitive and frequently
  5388. * result in firmware restarts. As such, it is best to
  5389. * set a small dwell_time here and just keep re-issuing
  5390. * scans. Otherwise fast channel hopping will not actually
  5391. * hop channels.
  5392. *
  5393. * TODO: Move SPEED SCAN support to all modes and bands */
  5394. scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
  5395. cpu_to_le16(2000);
  5396. } else {
  5397. #endif /* CONFIG_IPW2200_MONITOR */
  5398. /* Honor direct scans first, otherwise if we are roaming make
  5399. * this a direct scan for the current network. Finally,
  5400. * ensure that every other scan is a fast channel hop scan */
  5401. if (direct) {
  5402. err = ipw_send_ssid(priv, priv->direct_scan_ssid,
  5403. priv->direct_scan_ssid_len);
  5404. if (err) {
  5405. IPW_DEBUG_HC("Attempt to send SSID command "
  5406. "failed\n");
  5407. goto done;
  5408. }
  5409. scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
  5410. } else if ((priv->status & STATUS_ROAMING)
  5411. || (!(priv->status & STATUS_ASSOCIATED)
  5412. && (priv->config & CFG_STATIC_ESSID)
  5413. && (le32_to_cpu(scan.full_scan_index) % 2))) {
  5414. err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
  5415. if (err) {
  5416. IPW_DEBUG_HC("Attempt to send SSID command "
  5417. "failed.\n");
  5418. goto done;
  5419. }
  5420. scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
  5421. } else
  5422. scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
  5423. ipw_add_scan_channels(priv, &scan, scan_type);
  5424. #ifdef CONFIG_IPW2200_MONITOR
  5425. }
  5426. #endif
  5427. send_request:
  5428. err = ipw_send_scan_request_ext(priv, &scan);
  5429. if (err) {
  5430. IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
  5431. goto done;
  5432. }
  5433. priv->status |= STATUS_SCANNING;
  5434. if (direct) {
  5435. priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
  5436. priv->direct_scan_ssid_len = 0;
  5437. } else
  5438. priv->status &= ~STATUS_SCAN_PENDING;
  5439. schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
  5440. done:
  5441. mutex_unlock(&priv->mutex);
  5442. return err;
  5443. }
  5444. static void ipw_request_passive_scan(struct work_struct *work)
  5445. {
  5446. struct ipw_priv *priv =
  5447. container_of(work, struct ipw_priv, request_passive_scan.work);
  5448. ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
  5449. }
  5450. static void ipw_request_scan(struct work_struct *work)
  5451. {
  5452. struct ipw_priv *priv =
  5453. container_of(work, struct ipw_priv, request_scan.work);
  5454. ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
  5455. }
  5456. static void ipw_request_direct_scan(struct work_struct *work)
  5457. {
  5458. struct ipw_priv *priv =
  5459. container_of(work, struct ipw_priv, request_direct_scan.work);
  5460. ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
  5461. }
  5462. static void ipw_bg_abort_scan(struct work_struct *work)
  5463. {
  5464. struct ipw_priv *priv =
  5465. container_of(work, struct ipw_priv, abort_scan);
  5466. mutex_lock(&priv->mutex);
  5467. ipw_abort_scan(priv);
  5468. mutex_unlock(&priv->mutex);
  5469. }
  5470. static int ipw_wpa_enable(struct ipw_priv *priv, int value)
  5471. {
  5472. /* This is called when wpa_supplicant loads and closes the driver
  5473. * interface. */
  5474. priv->ieee->wpa_enabled = value;
  5475. return 0;
  5476. }
  5477. static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
  5478. {
  5479. struct libipw_device *ieee = priv->ieee;
  5480. struct libipw_security sec = {
  5481. .flags = SEC_AUTH_MODE,
  5482. };
  5483. int ret = 0;
  5484. if (value & IW_AUTH_ALG_SHARED_KEY) {
  5485. sec.auth_mode = WLAN_AUTH_SHARED_KEY;
  5486. ieee->open_wep = 0;
  5487. } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
  5488. sec.auth_mode = WLAN_AUTH_OPEN;
  5489. ieee->open_wep = 1;
  5490. } else if (value & IW_AUTH_ALG_LEAP) {
  5491. sec.auth_mode = WLAN_AUTH_LEAP;
  5492. ieee->open_wep = 1;
  5493. } else
  5494. return -EINVAL;
  5495. if (ieee->set_security)
  5496. ieee->set_security(ieee->dev, &sec);
  5497. else
  5498. ret = -EOPNOTSUPP;
  5499. return ret;
  5500. }
  5501. static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
  5502. int wpa_ie_len)
  5503. {
  5504. /* make sure WPA is enabled */
  5505. ipw_wpa_enable(priv, 1);
  5506. }
  5507. static int ipw_set_rsn_capa(struct ipw_priv *priv,
  5508. char *capabilities, int length)
  5509. {
  5510. IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
  5511. return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
  5512. capabilities);
  5513. }
  5514. /*
  5515. * WE-18 support
  5516. */
  5517. /* SIOCSIWGENIE */
  5518. static int ipw_wx_set_genie(struct net_device *dev,
  5519. struct iw_request_info *info,
  5520. union iwreq_data *wrqu, char *extra)
  5521. {
  5522. struct ipw_priv *priv = libipw_priv(dev);
  5523. struct libipw_device *ieee = priv->ieee;
  5524. u8 *buf;
  5525. int err = 0;
  5526. if (wrqu->data.length > MAX_WPA_IE_LEN ||
  5527. (wrqu->data.length && extra == NULL))
  5528. return -EINVAL;
  5529. if (wrqu->data.length) {
  5530. buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
  5531. if (buf == NULL) {
  5532. err = -ENOMEM;
  5533. goto out;
  5534. }
  5535. kfree(ieee->wpa_ie);
  5536. ieee->wpa_ie = buf;
  5537. ieee->wpa_ie_len = wrqu->data.length;
  5538. } else {
  5539. kfree(ieee->wpa_ie);
  5540. ieee->wpa_ie = NULL;
  5541. ieee->wpa_ie_len = 0;
  5542. }
  5543. ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
  5544. out:
  5545. return err;
  5546. }
  5547. /* SIOCGIWGENIE */
  5548. static int ipw_wx_get_genie(struct net_device *dev,
  5549. struct iw_request_info *info,
  5550. union iwreq_data *wrqu, char *extra)
  5551. {
  5552. struct ipw_priv *priv = libipw_priv(dev);
  5553. struct libipw_device *ieee = priv->ieee;
  5554. int err = 0;
  5555. if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
  5556. wrqu->data.length = 0;
  5557. goto out;
  5558. }
  5559. if (wrqu->data.length < ieee->wpa_ie_len) {
  5560. err = -E2BIG;
  5561. goto out;
  5562. }
  5563. wrqu->data.length = ieee->wpa_ie_len;
  5564. memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
  5565. out:
  5566. return err;
  5567. }
  5568. static int wext_cipher2level(int cipher)
  5569. {
  5570. switch (cipher) {
  5571. case IW_AUTH_CIPHER_NONE:
  5572. return SEC_LEVEL_0;
  5573. case IW_AUTH_CIPHER_WEP40:
  5574. case IW_AUTH_CIPHER_WEP104:
  5575. return SEC_LEVEL_1;
  5576. case IW_AUTH_CIPHER_TKIP:
  5577. return SEC_LEVEL_2;
  5578. case IW_AUTH_CIPHER_CCMP:
  5579. return SEC_LEVEL_3;
  5580. default:
  5581. return -1;
  5582. }
  5583. }
  5584. /* SIOCSIWAUTH */
  5585. static int ipw_wx_set_auth(struct net_device *dev,
  5586. struct iw_request_info *info,
  5587. union iwreq_data *wrqu, char *extra)
  5588. {
  5589. struct ipw_priv *priv = libipw_priv(dev);
  5590. struct libipw_device *ieee = priv->ieee;
  5591. struct iw_param *param = &wrqu->param;
  5592. struct lib80211_crypt_data *crypt;
  5593. unsigned long flags;
  5594. int ret = 0;
  5595. switch (param->flags & IW_AUTH_INDEX) {
  5596. case IW_AUTH_WPA_VERSION:
  5597. break;
  5598. case IW_AUTH_CIPHER_PAIRWISE:
  5599. ipw_set_hw_decrypt_unicast(priv,
  5600. wext_cipher2level(param->value));
  5601. break;
  5602. case IW_AUTH_CIPHER_GROUP:
  5603. ipw_set_hw_decrypt_multicast(priv,
  5604. wext_cipher2level(param->value));
  5605. break;
  5606. case IW_AUTH_KEY_MGMT:
  5607. /*
  5608. * ipw2200 does not use these parameters
  5609. */
  5610. break;
  5611. case IW_AUTH_TKIP_COUNTERMEASURES:
  5612. crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
  5613. if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
  5614. break;
  5615. flags = crypt->ops->get_flags(crypt->priv);
  5616. if (param->value)
  5617. flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
  5618. else
  5619. flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
  5620. crypt->ops->set_flags(flags, crypt->priv);
  5621. break;
  5622. case IW_AUTH_DROP_UNENCRYPTED:{
  5623. /* HACK:
  5624. *
  5625. * wpa_supplicant calls set_wpa_enabled when the driver
  5626. * is loaded and unloaded, regardless of if WPA is being
  5627. * used. No other calls are made which can be used to
  5628. * determine if encryption will be used or not prior to
  5629. * association being expected. If encryption is not being
  5630. * used, drop_unencrypted is set to false, else true -- we
  5631. * can use this to determine if the CAP_PRIVACY_ON bit should
  5632. * be set.
  5633. */
  5634. struct libipw_security sec = {
  5635. .flags = SEC_ENABLED,
  5636. .enabled = param->value,
  5637. };
  5638. priv->ieee->drop_unencrypted = param->value;
  5639. /* We only change SEC_LEVEL for open mode. Others
  5640. * are set by ipw_wpa_set_encryption.
  5641. */
  5642. if (!param->value) {
  5643. sec.flags |= SEC_LEVEL;
  5644. sec.level = SEC_LEVEL_0;
  5645. } else {
  5646. sec.flags |= SEC_LEVEL;
  5647. sec.level = SEC_LEVEL_1;
  5648. }
  5649. if (priv->ieee->set_security)
  5650. priv->ieee->set_security(priv->ieee->dev, &sec);
  5651. break;
  5652. }
  5653. case IW_AUTH_80211_AUTH_ALG:
  5654. ret = ipw_wpa_set_auth_algs(priv, param->value);
  5655. break;
  5656. case IW_AUTH_WPA_ENABLED:
  5657. ret = ipw_wpa_enable(priv, param->value);
  5658. ipw_disassociate(priv);
  5659. break;
  5660. case IW_AUTH_RX_UNENCRYPTED_EAPOL:
  5661. ieee->ieee802_1x = param->value;
  5662. break;
  5663. case IW_AUTH_PRIVACY_INVOKED:
  5664. ieee->privacy_invoked = param->value;
  5665. break;
  5666. default:
  5667. return -EOPNOTSUPP;
  5668. }
  5669. return ret;
  5670. }
  5671. /* SIOCGIWAUTH */
  5672. static int ipw_wx_get_auth(struct net_device *dev,
  5673. struct iw_request_info *info,
  5674. union iwreq_data *wrqu, char *extra)
  5675. {
  5676. struct ipw_priv *priv = libipw_priv(dev);
  5677. struct libipw_device *ieee = priv->ieee;
  5678. struct lib80211_crypt_data *crypt;
  5679. struct iw_param *param = &wrqu->param;
  5680. switch (param->flags & IW_AUTH_INDEX) {
  5681. case IW_AUTH_WPA_VERSION:
  5682. case IW_AUTH_CIPHER_PAIRWISE:
  5683. case IW_AUTH_CIPHER_GROUP:
  5684. case IW_AUTH_KEY_MGMT:
  5685. /*
  5686. * wpa_supplicant will control these internally
  5687. */
  5688. return -EOPNOTSUPP;
  5689. case IW_AUTH_TKIP_COUNTERMEASURES:
  5690. crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
  5691. if (!crypt || !crypt->ops->get_flags)
  5692. break;
  5693. param->value = (crypt->ops->get_flags(crypt->priv) &
  5694. IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
  5695. break;
  5696. case IW_AUTH_DROP_UNENCRYPTED:
  5697. param->value = ieee->drop_unencrypted;
  5698. break;
  5699. case IW_AUTH_80211_AUTH_ALG:
  5700. param->value = ieee->sec.auth_mode;
  5701. break;
  5702. case IW_AUTH_WPA_ENABLED:
  5703. param->value = ieee->wpa_enabled;
  5704. break;
  5705. case IW_AUTH_RX_UNENCRYPTED_EAPOL:
  5706. param->value = ieee->ieee802_1x;
  5707. break;
  5708. case IW_AUTH_ROAMING_CONTROL:
  5709. case IW_AUTH_PRIVACY_INVOKED:
  5710. param->value = ieee->privacy_invoked;
  5711. break;
  5712. default:
  5713. return -EOPNOTSUPP;
  5714. }
  5715. return 0;
  5716. }
  5717. /* SIOCSIWENCODEEXT */
  5718. static int ipw_wx_set_encodeext(struct net_device *dev,
  5719. struct iw_request_info *info,
  5720. union iwreq_data *wrqu, char *extra)
  5721. {
  5722. struct ipw_priv *priv = libipw_priv(dev);
  5723. struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
  5724. if (hwcrypto) {
  5725. if (ext->alg == IW_ENCODE_ALG_TKIP) {
  5726. /* IPW HW can't build TKIP MIC,
  5727. host decryption still needed */
  5728. if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
  5729. priv->ieee->host_mc_decrypt = 1;
  5730. else {
  5731. priv->ieee->host_encrypt = 0;
  5732. priv->ieee->host_encrypt_msdu = 1;
  5733. priv->ieee->host_decrypt = 1;
  5734. }
  5735. } else {
  5736. priv->ieee->host_encrypt = 0;
  5737. priv->ieee->host_encrypt_msdu = 0;
  5738. priv->ieee->host_decrypt = 0;
  5739. priv->ieee->host_mc_decrypt = 0;
  5740. }
  5741. }
  5742. return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
  5743. }
  5744. /* SIOCGIWENCODEEXT */
  5745. static int ipw_wx_get_encodeext(struct net_device *dev,
  5746. struct iw_request_info *info,
  5747. union iwreq_data *wrqu, char *extra)
  5748. {
  5749. struct ipw_priv *priv = libipw_priv(dev);
  5750. return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
  5751. }
  5752. /* SIOCSIWMLME */
  5753. static int ipw_wx_set_mlme(struct net_device *dev,
  5754. struct iw_request_info *info,
  5755. union iwreq_data *wrqu, char *extra)
  5756. {
  5757. struct ipw_priv *priv = libipw_priv(dev);
  5758. struct iw_mlme *mlme = (struct iw_mlme *)extra;
  5759. switch (mlme->cmd) {
  5760. case IW_MLME_DEAUTH:
  5761. /* silently ignore */
  5762. break;
  5763. case IW_MLME_DISASSOC:
  5764. ipw_disassociate(priv);
  5765. break;
  5766. default:
  5767. return -EOPNOTSUPP;
  5768. }
  5769. return 0;
  5770. }
  5771. #ifdef CONFIG_IPW2200_QOS
  5772. /* QoS */
  5773. /*
  5774. * get the modulation type of the current network or
  5775. * the card current mode
  5776. */
  5777. static u8 ipw_qos_current_mode(struct ipw_priv * priv)
  5778. {
  5779. u8 mode = 0;
  5780. if (priv->status & STATUS_ASSOCIATED) {
  5781. unsigned long flags;
  5782. spin_lock_irqsave(&priv->ieee->lock, flags);
  5783. mode = priv->assoc_network->mode;
  5784. spin_unlock_irqrestore(&priv->ieee->lock, flags);
  5785. } else {
  5786. mode = priv->ieee->mode;
  5787. }
  5788. IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
  5789. return mode;
  5790. }
  5791. /*
  5792. * Handle management frame beacon and probe response
  5793. */
  5794. static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
  5795. int active_network,
  5796. struct libipw_network *network)
  5797. {
  5798. u32 size = sizeof(struct libipw_qos_parameters);
  5799. if (network->capability & WLAN_CAPABILITY_IBSS)
  5800. network->qos_data.active = network->qos_data.supported;
  5801. if (network->flags & NETWORK_HAS_QOS_MASK) {
  5802. if (active_network &&
  5803. (network->flags & NETWORK_HAS_QOS_PARAMETERS))
  5804. network->qos_data.active = network->qos_data.supported;
  5805. if ((network->qos_data.active == 1) && (active_network == 1) &&
  5806. (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
  5807. (network->qos_data.old_param_count !=
  5808. network->qos_data.param_count)) {
  5809. network->qos_data.old_param_count =
  5810. network->qos_data.param_count;
  5811. schedule_work(&priv->qos_activate);
  5812. IPW_DEBUG_QOS("QoS parameters change call "
  5813. "qos_activate\n");
  5814. }
  5815. } else {
  5816. if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
  5817. memcpy(&network->qos_data.parameters,
  5818. &def_parameters_CCK, size);
  5819. else
  5820. memcpy(&network->qos_data.parameters,
  5821. &def_parameters_OFDM, size);
  5822. if ((network->qos_data.active == 1) && (active_network == 1)) {
  5823. IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
  5824. schedule_work(&priv->qos_activate);
  5825. }
  5826. network->qos_data.active = 0;
  5827. network->qos_data.supported = 0;
  5828. }
  5829. if ((priv->status & STATUS_ASSOCIATED) &&
  5830. (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
  5831. if (!ether_addr_equal(network->bssid, priv->bssid))
  5832. if (network->capability & WLAN_CAPABILITY_IBSS)
  5833. if ((network->ssid_len ==
  5834. priv->assoc_network->ssid_len) &&
  5835. !memcmp(network->ssid,
  5836. priv->assoc_network->ssid,
  5837. network->ssid_len)) {
  5838. schedule_work(&priv->merge_networks);
  5839. }
  5840. }
  5841. return 0;
  5842. }
  5843. /*
  5844. * This function set up the firmware to support QoS. It sends
  5845. * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
  5846. */
  5847. static int ipw_qos_activate(struct ipw_priv *priv,
  5848. struct libipw_qos_data *qos_network_data)
  5849. {
  5850. int err;
  5851. struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
  5852. struct libipw_qos_parameters *active_one = NULL;
  5853. u32 size = sizeof(struct libipw_qos_parameters);
  5854. u32 burst_duration;
  5855. int i;
  5856. u8 type;
  5857. type = ipw_qos_current_mode(priv);
  5858. active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
  5859. memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
  5860. active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
  5861. memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
  5862. if (qos_network_data == NULL) {
  5863. if (type == IEEE_B) {
  5864. IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
  5865. active_one = &def_parameters_CCK;
  5866. } else
  5867. active_one = &def_parameters_OFDM;
  5868. memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
  5869. burst_duration = ipw_qos_get_burst_duration(priv);
  5870. for (i = 0; i < QOS_QUEUE_NUM; i++)
  5871. qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
  5872. cpu_to_le16(burst_duration);
  5873. } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
  5874. if (type == IEEE_B) {
  5875. IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
  5876. type);
  5877. if (priv->qos_data.qos_enable == 0)
  5878. active_one = &def_parameters_CCK;
  5879. else
  5880. active_one = priv->qos_data.def_qos_parm_CCK;
  5881. } else {
  5882. if (priv->qos_data.qos_enable == 0)
  5883. active_one = &def_parameters_OFDM;
  5884. else
  5885. active_one = priv->qos_data.def_qos_parm_OFDM;
  5886. }
  5887. memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
  5888. } else {
  5889. unsigned long flags;
  5890. int active;
  5891. spin_lock_irqsave(&priv->ieee->lock, flags);
  5892. active_one = &(qos_network_data->parameters);
  5893. qos_network_data->old_param_count =
  5894. qos_network_data->param_count;
  5895. memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
  5896. active = qos_network_data->supported;
  5897. spin_unlock_irqrestore(&priv->ieee->lock, flags);
  5898. if (active == 0) {
  5899. burst_duration = ipw_qos_get_burst_duration(priv);
  5900. for (i = 0; i < QOS_QUEUE_NUM; i++)
  5901. qos_parameters[QOS_PARAM_SET_ACTIVE].
  5902. tx_op_limit[i] = cpu_to_le16(burst_duration);
  5903. }
  5904. }
  5905. IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
  5906. err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
  5907. if (err)
  5908. IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
  5909. return err;
  5910. }
  5911. /*
  5912. * send IPW_CMD_WME_INFO to the firmware
  5913. */
  5914. static int ipw_qos_set_info_element(struct ipw_priv *priv)
  5915. {
  5916. int ret = 0;
  5917. struct libipw_qos_information_element qos_info;
  5918. if (priv == NULL)
  5919. return -1;
  5920. qos_info.elementID = QOS_ELEMENT_ID;
  5921. qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
  5922. qos_info.version = QOS_VERSION_1;
  5923. qos_info.ac_info = 0;
  5924. memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
  5925. qos_info.qui_type = QOS_OUI_TYPE;
  5926. qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
  5927. ret = ipw_send_qos_info_command(priv, &qos_info);
  5928. if (ret != 0) {
  5929. IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
  5930. }
  5931. return ret;
  5932. }
  5933. /*
  5934. * Set the QoS parameter with the association request structure
  5935. */
  5936. static int ipw_qos_association(struct ipw_priv *priv,
  5937. struct libipw_network *network)
  5938. {
  5939. int err = 0;
  5940. struct libipw_qos_data *qos_data = NULL;
  5941. struct libipw_qos_data ibss_data = {
  5942. .supported = 1,
  5943. .active = 1,
  5944. };
  5945. switch (priv->ieee->iw_mode) {
  5946. case IW_MODE_ADHOC:
  5947. BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
  5948. qos_data = &ibss_data;
  5949. break;
  5950. case IW_MODE_INFRA:
  5951. qos_data = &network->qos_data;
  5952. break;
  5953. default:
  5954. BUG();
  5955. break;
  5956. }
  5957. err = ipw_qos_activate(priv, qos_data);
  5958. if (err) {
  5959. priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
  5960. return err;
  5961. }
  5962. if (priv->qos_data.qos_enable && qos_data->supported) {
  5963. IPW_DEBUG_QOS("QoS will be enabled for this association\n");
  5964. priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
  5965. return ipw_qos_set_info_element(priv);
  5966. }
  5967. return 0;
  5968. }
  5969. /*
  5970. * handling the beaconing responses. if we get different QoS setting
  5971. * off the network from the associated setting, adjust the QoS
  5972. * setting
  5973. */
  5974. static void ipw_qos_association_resp(struct ipw_priv *priv,
  5975. struct libipw_network *network)
  5976. {
  5977. unsigned long flags;
  5978. u32 size = sizeof(struct libipw_qos_parameters);
  5979. int set_qos_param = 0;
  5980. if ((priv == NULL) || (network == NULL) ||
  5981. (priv->assoc_network == NULL))
  5982. return;
  5983. if (!(priv->status & STATUS_ASSOCIATED))
  5984. return;
  5985. if ((priv->ieee->iw_mode != IW_MODE_INFRA))
  5986. return;
  5987. spin_lock_irqsave(&priv->ieee->lock, flags);
  5988. if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
  5989. memcpy(&priv->assoc_network->qos_data, &network->qos_data,
  5990. sizeof(struct libipw_qos_data));
  5991. priv->assoc_network->qos_data.active = 1;
  5992. if ((network->qos_data.old_param_count !=
  5993. network->qos_data.param_count)) {
  5994. set_qos_param = 1;
  5995. network->qos_data.old_param_count =
  5996. network->qos_data.param_count;
  5997. }
  5998. } else {
  5999. if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
  6000. memcpy(&priv->assoc_network->qos_data.parameters,
  6001. &def_parameters_CCK, size);
  6002. else
  6003. memcpy(&priv->assoc_network->qos_data.parameters,
  6004. &def_parameters_OFDM, size);
  6005. priv->assoc_network->qos_data.active = 0;
  6006. priv->assoc_network->qos_data.supported = 0;
  6007. set_qos_param = 1;
  6008. }
  6009. spin_unlock_irqrestore(&priv->ieee->lock, flags);
  6010. if (set_qos_param == 1)
  6011. schedule_work(&priv->qos_activate);
  6012. }
  6013. static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
  6014. {
  6015. u32 ret = 0;
  6016. if (!priv)
  6017. return 0;
  6018. if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
  6019. ret = priv->qos_data.burst_duration_CCK;
  6020. else
  6021. ret = priv->qos_data.burst_duration_OFDM;
  6022. return ret;
  6023. }
  6024. /*
  6025. * Initialize the setting of QoS global
  6026. */
  6027. static void ipw_qos_init(struct ipw_priv *priv, int enable,
  6028. int burst_enable, u32 burst_duration_CCK,
  6029. u32 burst_duration_OFDM)
  6030. {
  6031. priv->qos_data.qos_enable = enable;
  6032. if (priv->qos_data.qos_enable) {
  6033. priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
  6034. priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
  6035. IPW_DEBUG_QOS("QoS is enabled\n");
  6036. } else {
  6037. priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
  6038. priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
  6039. IPW_DEBUG_QOS("QoS is not enabled\n");
  6040. }
  6041. priv->qos_data.burst_enable = burst_enable;
  6042. if (burst_enable) {
  6043. priv->qos_data.burst_duration_CCK = burst_duration_CCK;
  6044. priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
  6045. } else {
  6046. priv->qos_data.burst_duration_CCK = 0;
  6047. priv->qos_data.burst_duration_OFDM = 0;
  6048. }
  6049. }
  6050. /*
  6051. * map the packet priority to the right TX Queue
  6052. */
  6053. static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
  6054. {
  6055. if (priority > 7 || !priv->qos_data.qos_enable)
  6056. priority = 0;
  6057. return from_priority_to_tx_queue[priority] - 1;
  6058. }
  6059. static int ipw_is_qos_active(struct net_device *dev,
  6060. struct sk_buff *skb)
  6061. {
  6062. struct ipw_priv *priv = libipw_priv(dev);
  6063. struct libipw_qos_data *qos_data = NULL;
  6064. int active, supported;
  6065. u8 *daddr = skb->data + ETH_ALEN;
  6066. int unicast = !is_multicast_ether_addr(daddr);
  6067. if (!(priv->status & STATUS_ASSOCIATED))
  6068. return 0;
  6069. qos_data = &priv->assoc_network->qos_data;
  6070. if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
  6071. if (unicast == 0)
  6072. qos_data->active = 0;
  6073. else
  6074. qos_data->active = qos_data->supported;
  6075. }
  6076. active = qos_data->active;
  6077. supported = qos_data->supported;
  6078. IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
  6079. "unicast %d\n",
  6080. priv->qos_data.qos_enable, active, supported, unicast);
  6081. if (active && priv->qos_data.qos_enable)
  6082. return 1;
  6083. return 0;
  6084. }
  6085. /*
  6086. * add QoS parameter to the TX command
  6087. */
  6088. static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
  6089. u16 priority,
  6090. struct tfd_data *tfd)
  6091. {
  6092. int tx_queue_id = 0;
  6093. tx_queue_id = from_priority_to_tx_queue[priority] - 1;
  6094. tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
  6095. if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
  6096. tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
  6097. tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
  6098. }
  6099. return 0;
  6100. }
  6101. /*
  6102. * background support to run QoS activate functionality
  6103. */
  6104. static void ipw_bg_qos_activate(struct work_struct *work)
  6105. {
  6106. struct ipw_priv *priv =
  6107. container_of(work, struct ipw_priv, qos_activate);
  6108. mutex_lock(&priv->mutex);
  6109. if (priv->status & STATUS_ASSOCIATED)
  6110. ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
  6111. mutex_unlock(&priv->mutex);
  6112. }
  6113. static int ipw_handle_probe_response(struct net_device *dev,
  6114. struct libipw_probe_response *resp,
  6115. struct libipw_network *network)
  6116. {
  6117. struct ipw_priv *priv = libipw_priv(dev);
  6118. int active_network = ((priv->status & STATUS_ASSOCIATED) &&
  6119. (network == priv->assoc_network));
  6120. ipw_qos_handle_probe_response(priv, active_network, network);
  6121. return 0;
  6122. }
  6123. static int ipw_handle_beacon(struct net_device *dev,
  6124. struct libipw_beacon *resp,
  6125. struct libipw_network *network)
  6126. {
  6127. struct ipw_priv *priv = libipw_priv(dev);
  6128. int active_network = ((priv->status & STATUS_ASSOCIATED) &&
  6129. (network == priv->assoc_network));
  6130. ipw_qos_handle_probe_response(priv, active_network, network);
  6131. return 0;
  6132. }
  6133. static int ipw_handle_assoc_response(struct net_device *dev,
  6134. struct libipw_assoc_response *resp,
  6135. struct libipw_network *network)
  6136. {
  6137. struct ipw_priv *priv = libipw_priv(dev);
  6138. ipw_qos_association_resp(priv, network);
  6139. return 0;
  6140. }
  6141. static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
  6142. *qos_param)
  6143. {
  6144. return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
  6145. sizeof(*qos_param) * 3, qos_param);
  6146. }
  6147. static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
  6148. *qos_param)
  6149. {
  6150. return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
  6151. qos_param);
  6152. }
  6153. #endif /* CONFIG_IPW2200_QOS */
  6154. static int ipw_associate_network(struct ipw_priv *priv,
  6155. struct libipw_network *network,
  6156. struct ipw_supported_rates *rates, int roaming)
  6157. {
  6158. int err;
  6159. if (priv->config & CFG_FIXED_RATE)
  6160. ipw_set_fixed_rate(priv, network->mode);
  6161. if (!(priv->config & CFG_STATIC_ESSID)) {
  6162. priv->essid_len = min(network->ssid_len,
  6163. (u8) IW_ESSID_MAX_SIZE);
  6164. memcpy(priv->essid, network->ssid, priv->essid_len);
  6165. }
  6166. network->last_associate = jiffies;
  6167. memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
  6168. priv->assoc_request.channel = network->channel;
  6169. priv->assoc_request.auth_key = 0;
  6170. if ((priv->capability & CAP_PRIVACY_ON) &&
  6171. (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
  6172. priv->assoc_request.auth_type = AUTH_SHARED_KEY;
  6173. priv->assoc_request.auth_key = priv->ieee->sec.active_key;
  6174. if (priv->ieee->sec.level == SEC_LEVEL_1)
  6175. ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
  6176. } else if ((priv->capability & CAP_PRIVACY_ON) &&
  6177. (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
  6178. priv->assoc_request.auth_type = AUTH_LEAP;
  6179. else
  6180. priv->assoc_request.auth_type = AUTH_OPEN;
  6181. if (priv->ieee->wpa_ie_len) {
  6182. priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
  6183. ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
  6184. priv->ieee->wpa_ie_len);
  6185. }
  6186. /*
  6187. * It is valid for our ieee device to support multiple modes, but
  6188. * when it comes to associating to a given network we have to choose
  6189. * just one mode.
  6190. */
  6191. if (network->mode & priv->ieee->mode & IEEE_A)
  6192. priv->assoc_request.ieee_mode = IPW_A_MODE;
  6193. else if (network->mode & priv->ieee->mode & IEEE_G)
  6194. priv->assoc_request.ieee_mode = IPW_G_MODE;
  6195. else if (network->mode & priv->ieee->mode & IEEE_B)
  6196. priv->assoc_request.ieee_mode = IPW_B_MODE;
  6197. priv->assoc_request.capability = cpu_to_le16(network->capability);
  6198. if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
  6199. && !(priv->config & CFG_PREAMBLE_LONG)) {
  6200. priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
  6201. } else {
  6202. priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
  6203. /* Clear the short preamble if we won't be supporting it */
  6204. priv->assoc_request.capability &=
  6205. ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
  6206. }
  6207. /* Clear capability bits that aren't used in Ad Hoc */
  6208. if (priv->ieee->iw_mode == IW_MODE_ADHOC)
  6209. priv->assoc_request.capability &=
  6210. ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
  6211. IPW_DEBUG_ASSOC("%ssociation attempt: '%*pE', channel %d, 802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
  6212. roaming ? "Rea" : "A",
  6213. priv->essid_len, priv->essid,
  6214. network->channel,
  6215. ipw_modes[priv->assoc_request.ieee_mode],
  6216. rates->num_rates,
  6217. (priv->assoc_request.preamble_length ==
  6218. DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
  6219. network->capability &
  6220. WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
  6221. priv->capability & CAP_PRIVACY_ON ? "on " : "off",
  6222. priv->capability & CAP_PRIVACY_ON ?
  6223. (priv->capability & CAP_SHARED_KEY ? "(shared)" :
  6224. "(open)") : "",
  6225. priv->capability & CAP_PRIVACY_ON ? " key=" : "",
  6226. priv->capability & CAP_PRIVACY_ON ?
  6227. '1' + priv->ieee->sec.active_key : '.',
  6228. priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
  6229. priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
  6230. if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
  6231. (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
  6232. priv->assoc_request.assoc_type = HC_IBSS_START;
  6233. priv->assoc_request.assoc_tsf_msw = 0;
  6234. priv->assoc_request.assoc_tsf_lsw = 0;
  6235. } else {
  6236. if (unlikely(roaming))
  6237. priv->assoc_request.assoc_type = HC_REASSOCIATE;
  6238. else
  6239. priv->assoc_request.assoc_type = HC_ASSOCIATE;
  6240. priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
  6241. priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
  6242. }
  6243. memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
  6244. if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
  6245. eth_broadcast_addr(priv->assoc_request.dest);
  6246. priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
  6247. } else {
  6248. memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
  6249. priv->assoc_request.atim_window = 0;
  6250. }
  6251. priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
  6252. err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
  6253. if (err) {
  6254. IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
  6255. return err;
  6256. }
  6257. rates->ieee_mode = priv->assoc_request.ieee_mode;
  6258. rates->purpose = IPW_RATE_CONNECT;
  6259. ipw_send_supported_rates(priv, rates);
  6260. if (priv->assoc_request.ieee_mode == IPW_G_MODE)
  6261. priv->sys_config.dot11g_auto_detection = 1;
  6262. else
  6263. priv->sys_config.dot11g_auto_detection = 0;
  6264. if (priv->ieee->iw_mode == IW_MODE_ADHOC)
  6265. priv->sys_config.answer_broadcast_ssid_probe = 1;
  6266. else
  6267. priv->sys_config.answer_broadcast_ssid_probe = 0;
  6268. err = ipw_send_system_config(priv);
  6269. if (err) {
  6270. IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
  6271. return err;
  6272. }
  6273. IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
  6274. err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
  6275. if (err) {
  6276. IPW_DEBUG_HC("Attempt to send associate command failed.\n");
  6277. return err;
  6278. }
  6279. /*
  6280. * If preemption is enabled, it is possible for the association
  6281. * to complete before we return from ipw_send_associate. Therefore
  6282. * we have to be sure and update our priviate data first.
  6283. */
  6284. priv->channel = network->channel;
  6285. memcpy(priv->bssid, network->bssid, ETH_ALEN);
  6286. priv->status |= STATUS_ASSOCIATING;
  6287. priv->status &= ~STATUS_SECURITY_UPDATED;
  6288. priv->assoc_network = network;
  6289. #ifdef CONFIG_IPW2200_QOS
  6290. ipw_qos_association(priv, network);
  6291. #endif
  6292. err = ipw_send_associate(priv, &priv->assoc_request);
  6293. if (err) {
  6294. IPW_DEBUG_HC("Attempt to send associate command failed.\n");
  6295. return err;
  6296. }
  6297. IPW_DEBUG(IPW_DL_STATE, "associating: '%*pE' %pM\n",
  6298. priv->essid_len, priv->essid, priv->bssid);
  6299. return 0;
  6300. }
  6301. static void ipw_roam(void *data)
  6302. {
  6303. struct ipw_priv *priv = data;
  6304. struct libipw_network *network = NULL;
  6305. struct ipw_network_match match = {
  6306. .network = priv->assoc_network
  6307. };
  6308. /* The roaming process is as follows:
  6309. *
  6310. * 1. Missed beacon threshold triggers the roaming process by
  6311. * setting the status ROAM bit and requesting a scan.
  6312. * 2. When the scan completes, it schedules the ROAM work
  6313. * 3. The ROAM work looks at all of the known networks for one that
  6314. * is a better network than the currently associated. If none
  6315. * found, the ROAM process is over (ROAM bit cleared)
  6316. * 4. If a better network is found, a disassociation request is
  6317. * sent.
  6318. * 5. When the disassociation completes, the roam work is again
  6319. * scheduled. The second time through, the driver is no longer
  6320. * associated, and the newly selected network is sent an
  6321. * association request.
  6322. * 6. At this point ,the roaming process is complete and the ROAM
  6323. * status bit is cleared.
  6324. */
  6325. /* If we are no longer associated, and the roaming bit is no longer
  6326. * set, then we are not actively roaming, so just return */
  6327. if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
  6328. return;
  6329. if (priv->status & STATUS_ASSOCIATED) {
  6330. /* First pass through ROAM process -- look for a better
  6331. * network */
  6332. unsigned long flags;
  6333. u8 rssi = priv->assoc_network->stats.rssi;
  6334. priv->assoc_network->stats.rssi = -128;
  6335. spin_lock_irqsave(&priv->ieee->lock, flags);
  6336. list_for_each_entry(network, &priv->ieee->network_list, list) {
  6337. if (network != priv->assoc_network)
  6338. ipw_best_network(priv, &match, network, 1);
  6339. }
  6340. spin_unlock_irqrestore(&priv->ieee->lock, flags);
  6341. priv->assoc_network->stats.rssi = rssi;
  6342. if (match.network == priv->assoc_network) {
  6343. IPW_DEBUG_ASSOC("No better APs in this network to "
  6344. "roam to.\n");
  6345. priv->status &= ~STATUS_ROAMING;
  6346. ipw_debug_config(priv);
  6347. return;
  6348. }
  6349. ipw_send_disassociate(priv, 1);
  6350. priv->assoc_network = match.network;
  6351. return;
  6352. }
  6353. /* Second pass through ROAM process -- request association */
  6354. ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
  6355. ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
  6356. priv->status &= ~STATUS_ROAMING;
  6357. }
  6358. static void ipw_bg_roam(struct work_struct *work)
  6359. {
  6360. struct ipw_priv *priv =
  6361. container_of(work, struct ipw_priv, roam);
  6362. mutex_lock(&priv->mutex);
  6363. ipw_roam(priv);
  6364. mutex_unlock(&priv->mutex);
  6365. }
  6366. static int ipw_associate(void *data)
  6367. {
  6368. struct ipw_priv *priv = data;
  6369. struct libipw_network *network = NULL;
  6370. struct ipw_network_match match = {
  6371. .network = NULL
  6372. };
  6373. struct ipw_supported_rates *rates;
  6374. struct list_head *element;
  6375. unsigned long flags;
  6376. if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
  6377. IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
  6378. return 0;
  6379. }
  6380. if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
  6381. IPW_DEBUG_ASSOC("Not attempting association (already in "
  6382. "progress)\n");
  6383. return 0;
  6384. }
  6385. if (priv->status & STATUS_DISASSOCIATING) {
  6386. IPW_DEBUG_ASSOC("Not attempting association (in disassociating)\n");
  6387. schedule_work(&priv->associate);
  6388. return 0;
  6389. }
  6390. if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
  6391. IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
  6392. "initialized)\n");
  6393. return 0;
  6394. }
  6395. if (!(priv->config & CFG_ASSOCIATE) &&
  6396. !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
  6397. IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
  6398. return 0;
  6399. }
  6400. /* Protect our use of the network_list */
  6401. spin_lock_irqsave(&priv->ieee->lock, flags);
  6402. list_for_each_entry(network, &priv->ieee->network_list, list)
  6403. ipw_best_network(priv, &match, network, 0);
  6404. network = match.network;
  6405. rates = &match.rates;
  6406. if (network == NULL &&
  6407. priv->ieee->iw_mode == IW_MODE_ADHOC &&
  6408. priv->config & CFG_ADHOC_CREATE &&
  6409. priv->config & CFG_STATIC_ESSID &&
  6410. priv->config & CFG_STATIC_CHANNEL) {
  6411. /* Use oldest network if the free list is empty */
  6412. if (list_empty(&priv->ieee->network_free_list)) {
  6413. struct libipw_network *oldest = NULL;
  6414. struct libipw_network *target;
  6415. list_for_each_entry(target, &priv->ieee->network_list, list) {
  6416. if ((oldest == NULL) ||
  6417. (target->last_scanned < oldest->last_scanned))
  6418. oldest = target;
  6419. }
  6420. /* If there are no more slots, expire the oldest */
  6421. list_del(&oldest->list);
  6422. target = oldest;
  6423. IPW_DEBUG_ASSOC("Expired '%*pE' (%pM) from network list.\n",
  6424. target->ssid_len, target->ssid,
  6425. target->bssid);
  6426. list_add_tail(&target->list,
  6427. &priv->ieee->network_free_list);
  6428. }
  6429. element = priv->ieee->network_free_list.next;
  6430. network = list_entry(element, struct libipw_network, list);
  6431. ipw_adhoc_create(priv, network);
  6432. rates = &priv->rates;
  6433. list_del(element);
  6434. list_add_tail(&network->list, &priv->ieee->network_list);
  6435. }
  6436. spin_unlock_irqrestore(&priv->ieee->lock, flags);
  6437. /* If we reached the end of the list, then we don't have any valid
  6438. * matching APs */
  6439. if (!network) {
  6440. ipw_debug_config(priv);
  6441. if (!(priv->status & STATUS_SCANNING)) {
  6442. if (!(priv->config & CFG_SPEED_SCAN))
  6443. schedule_delayed_work(&priv->request_scan,
  6444. SCAN_INTERVAL);
  6445. else
  6446. schedule_delayed_work(&priv->request_scan, 0);
  6447. }
  6448. return 0;
  6449. }
  6450. ipw_associate_network(priv, network, rates, 0);
  6451. return 1;
  6452. }
  6453. static void ipw_bg_associate(struct work_struct *work)
  6454. {
  6455. struct ipw_priv *priv =
  6456. container_of(work, struct ipw_priv, associate);
  6457. mutex_lock(&priv->mutex);
  6458. ipw_associate(priv);
  6459. mutex_unlock(&priv->mutex);
  6460. }
  6461. static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
  6462. struct sk_buff *skb)
  6463. {
  6464. struct ieee80211_hdr *hdr;
  6465. u16 fc;
  6466. hdr = (struct ieee80211_hdr *)skb->data;
  6467. fc = le16_to_cpu(hdr->frame_control);
  6468. if (!(fc & IEEE80211_FCTL_PROTECTED))
  6469. return;
  6470. fc &= ~IEEE80211_FCTL_PROTECTED;
  6471. hdr->frame_control = cpu_to_le16(fc);
  6472. switch (priv->ieee->sec.level) {
  6473. case SEC_LEVEL_3:
  6474. /* Remove CCMP HDR */
  6475. memmove(skb->data + LIBIPW_3ADDR_LEN,
  6476. skb->data + LIBIPW_3ADDR_LEN + 8,
  6477. skb->len - LIBIPW_3ADDR_LEN - 8);
  6478. skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
  6479. break;
  6480. case SEC_LEVEL_2:
  6481. break;
  6482. case SEC_LEVEL_1:
  6483. /* Remove IV */
  6484. memmove(skb->data + LIBIPW_3ADDR_LEN,
  6485. skb->data + LIBIPW_3ADDR_LEN + 4,
  6486. skb->len - LIBIPW_3ADDR_LEN - 4);
  6487. skb_trim(skb, skb->len - 8); /* IV + ICV */
  6488. break;
  6489. case SEC_LEVEL_0:
  6490. break;
  6491. default:
  6492. printk(KERN_ERR "Unknown security level %d\n",
  6493. priv->ieee->sec.level);
  6494. break;
  6495. }
  6496. }
  6497. static void ipw_handle_data_packet(struct ipw_priv *priv,
  6498. struct ipw_rx_mem_buffer *rxb,
  6499. struct libipw_rx_stats *stats)
  6500. {
  6501. struct net_device *dev = priv->net_dev;
  6502. struct libipw_hdr_4addr *hdr;
  6503. struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
  6504. /* We received data from the HW, so stop the watchdog */
  6505. netif_trans_update(dev);
  6506. /* We only process data packets if the
  6507. * interface is open */
  6508. if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
  6509. skb_tailroom(rxb->skb))) {
  6510. dev->stats.rx_errors++;
  6511. priv->wstats.discard.misc++;
  6512. IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
  6513. return;
  6514. } else if (unlikely(!netif_running(priv->net_dev))) {
  6515. dev->stats.rx_dropped++;
  6516. priv->wstats.discard.misc++;
  6517. IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
  6518. return;
  6519. }
  6520. /* Advance skb->data to the start of the actual payload */
  6521. skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
  6522. /* Set the size of the skb to the size of the frame */
  6523. skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
  6524. IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
  6525. /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
  6526. hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
  6527. if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
  6528. (is_multicast_ether_addr(hdr->addr1) ?
  6529. !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
  6530. ipw_rebuild_decrypted_skb(priv, rxb->skb);
  6531. if (!libipw_rx(priv->ieee, rxb->skb, stats))
  6532. dev->stats.rx_errors++;
  6533. else { /* libipw_rx succeeded, so it now owns the SKB */
  6534. rxb->skb = NULL;
  6535. __ipw_led_activity_on(priv);
  6536. }
  6537. }
  6538. #ifdef CONFIG_IPW2200_RADIOTAP
  6539. static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
  6540. struct ipw_rx_mem_buffer *rxb,
  6541. struct libipw_rx_stats *stats)
  6542. {
  6543. struct net_device *dev = priv->net_dev;
  6544. struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
  6545. struct ipw_rx_frame *frame = &pkt->u.frame;
  6546. /* initial pull of some data */
  6547. u16 received_channel = frame->received_channel;
  6548. u8 antennaAndPhy = frame->antennaAndPhy;
  6549. s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
  6550. u16 pktrate = frame->rate;
  6551. /* Magic struct that slots into the radiotap header -- no reason
  6552. * to build this manually element by element, we can write it much
  6553. * more efficiently than we can parse it. ORDER MATTERS HERE */
  6554. struct ipw_rt_hdr *ipw_rt;
  6555. unsigned short len = le16_to_cpu(pkt->u.frame.length);
  6556. /* We received data from the HW, so stop the watchdog */
  6557. netif_trans_update(dev);
  6558. /* We only process data packets if the
  6559. * interface is open */
  6560. if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
  6561. skb_tailroom(rxb->skb))) {
  6562. dev->stats.rx_errors++;
  6563. priv->wstats.discard.misc++;
  6564. IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
  6565. return;
  6566. } else if (unlikely(!netif_running(priv->net_dev))) {
  6567. dev->stats.rx_dropped++;
  6568. priv->wstats.discard.misc++;
  6569. IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
  6570. return;
  6571. }
  6572. /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
  6573. * that now */
  6574. if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
  6575. /* FIXME: Should alloc bigger skb instead */
  6576. dev->stats.rx_dropped++;
  6577. priv->wstats.discard.misc++;
  6578. IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
  6579. return;
  6580. }
  6581. /* copy the frame itself */
  6582. memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
  6583. rxb->skb->data + IPW_RX_FRAME_SIZE, len);
  6584. ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
  6585. ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
  6586. ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
  6587. ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
  6588. /* Big bitfield of all the fields we provide in radiotap */
  6589. ipw_rt->rt_hdr.it_present = cpu_to_le32(
  6590. (1 << IEEE80211_RADIOTAP_TSFT) |
  6591. (1 << IEEE80211_RADIOTAP_FLAGS) |
  6592. (1 << IEEE80211_RADIOTAP_RATE) |
  6593. (1 << IEEE80211_RADIOTAP_CHANNEL) |
  6594. (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
  6595. (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
  6596. (1 << IEEE80211_RADIOTAP_ANTENNA));
  6597. /* Zero the flags, we'll add to them as we go */
  6598. ipw_rt->rt_flags = 0;
  6599. ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
  6600. frame->parent_tsf[2] << 16 |
  6601. frame->parent_tsf[1] << 8 |
  6602. frame->parent_tsf[0]);
  6603. /* Convert signal to DBM */
  6604. ipw_rt->rt_dbmsignal = antsignal;
  6605. ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
  6606. /* Convert the channel data and set the flags */
  6607. ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
  6608. if (received_channel > 14) { /* 802.11a */
  6609. ipw_rt->rt_chbitmask =
  6610. cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
  6611. } else if (antennaAndPhy & 32) { /* 802.11b */
  6612. ipw_rt->rt_chbitmask =
  6613. cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
  6614. } else { /* 802.11g */
  6615. ipw_rt->rt_chbitmask =
  6616. cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
  6617. }
  6618. /* set the rate in multiples of 500k/s */
  6619. switch (pktrate) {
  6620. case IPW_TX_RATE_1MB:
  6621. ipw_rt->rt_rate = 2;
  6622. break;
  6623. case IPW_TX_RATE_2MB:
  6624. ipw_rt->rt_rate = 4;
  6625. break;
  6626. case IPW_TX_RATE_5MB:
  6627. ipw_rt->rt_rate = 10;
  6628. break;
  6629. case IPW_TX_RATE_6MB:
  6630. ipw_rt->rt_rate = 12;
  6631. break;
  6632. case IPW_TX_RATE_9MB:
  6633. ipw_rt->rt_rate = 18;
  6634. break;
  6635. case IPW_TX_RATE_11MB:
  6636. ipw_rt->rt_rate = 22;
  6637. break;
  6638. case IPW_TX_RATE_12MB:
  6639. ipw_rt->rt_rate = 24;
  6640. break;
  6641. case IPW_TX_RATE_18MB:
  6642. ipw_rt->rt_rate = 36;
  6643. break;
  6644. case IPW_TX_RATE_24MB:
  6645. ipw_rt->rt_rate = 48;
  6646. break;
  6647. case IPW_TX_RATE_36MB:
  6648. ipw_rt->rt_rate = 72;
  6649. break;
  6650. case IPW_TX_RATE_48MB:
  6651. ipw_rt->rt_rate = 96;
  6652. break;
  6653. case IPW_TX_RATE_54MB:
  6654. ipw_rt->rt_rate = 108;
  6655. break;
  6656. default:
  6657. ipw_rt->rt_rate = 0;
  6658. break;
  6659. }
  6660. /* antenna number */
  6661. ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
  6662. /* set the preamble flag if we have it */
  6663. if ((antennaAndPhy & 64))
  6664. ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
  6665. /* Set the size of the skb to the size of the frame */
  6666. skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
  6667. IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
  6668. if (!libipw_rx(priv->ieee, rxb->skb, stats))
  6669. dev->stats.rx_errors++;
  6670. else { /* libipw_rx succeeded, so it now owns the SKB */
  6671. rxb->skb = NULL;
  6672. /* no LED during capture */
  6673. }
  6674. }
  6675. #endif
  6676. #ifdef CONFIG_IPW2200_PROMISCUOUS
  6677. #define libipw_is_probe_response(fc) \
  6678. ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
  6679. (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
  6680. #define libipw_is_management(fc) \
  6681. ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
  6682. #define libipw_is_control(fc) \
  6683. ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
  6684. #define libipw_is_data(fc) \
  6685. ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
  6686. #define libipw_is_assoc_request(fc) \
  6687. ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
  6688. #define libipw_is_reassoc_request(fc) \
  6689. ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
  6690. static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
  6691. struct ipw_rx_mem_buffer *rxb,
  6692. struct libipw_rx_stats *stats)
  6693. {
  6694. struct net_device *dev = priv->prom_net_dev;
  6695. struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
  6696. struct ipw_rx_frame *frame = &pkt->u.frame;
  6697. struct ipw_rt_hdr *ipw_rt;
  6698. /* First cache any information we need before we overwrite
  6699. * the information provided in the skb from the hardware */
  6700. struct ieee80211_hdr *hdr;
  6701. u16 channel = frame->received_channel;
  6702. u8 phy_flags = frame->antennaAndPhy;
  6703. s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
  6704. s8 noise = (s8) le16_to_cpu(frame->noise);
  6705. u8 rate = frame->rate;
  6706. unsigned short len = le16_to_cpu(pkt->u.frame.length);
  6707. struct sk_buff *skb;
  6708. int hdr_only = 0;
  6709. u16 filter = priv->prom_priv->filter;
  6710. /* If the filter is set to not include Rx frames then return */
  6711. if (filter & IPW_PROM_NO_RX)
  6712. return;
  6713. /* We received data from the HW, so stop the watchdog */
  6714. netif_trans_update(dev);
  6715. if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
  6716. dev->stats.rx_errors++;
  6717. IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
  6718. return;
  6719. }
  6720. /* We only process data packets if the interface is open */
  6721. if (unlikely(!netif_running(dev))) {
  6722. dev->stats.rx_dropped++;
  6723. IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
  6724. return;
  6725. }
  6726. /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
  6727. * that now */
  6728. if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
  6729. /* FIXME: Should alloc bigger skb instead */
  6730. dev->stats.rx_dropped++;
  6731. IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
  6732. return;
  6733. }
  6734. hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
  6735. if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
  6736. if (filter & IPW_PROM_NO_MGMT)
  6737. return;
  6738. if (filter & IPW_PROM_MGMT_HEADER_ONLY)
  6739. hdr_only = 1;
  6740. } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
  6741. if (filter & IPW_PROM_NO_CTL)
  6742. return;
  6743. if (filter & IPW_PROM_CTL_HEADER_ONLY)
  6744. hdr_only = 1;
  6745. } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
  6746. if (filter & IPW_PROM_NO_DATA)
  6747. return;
  6748. if (filter & IPW_PROM_DATA_HEADER_ONLY)
  6749. hdr_only = 1;
  6750. }
  6751. /* Copy the SKB since this is for the promiscuous side */
  6752. skb = skb_copy(rxb->skb, GFP_ATOMIC);
  6753. if (skb == NULL) {
  6754. IPW_ERROR("skb_clone failed for promiscuous copy.\n");
  6755. return;
  6756. }
  6757. /* copy the frame data to write after where the radiotap header goes */
  6758. ipw_rt = (void *)skb->data;
  6759. if (hdr_only)
  6760. len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
  6761. memcpy(ipw_rt->payload, hdr, len);
  6762. ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
  6763. ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
  6764. ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
  6765. /* Set the size of the skb to the size of the frame */
  6766. skb_put(skb, sizeof(*ipw_rt) + len);
  6767. /* Big bitfield of all the fields we provide in radiotap */
  6768. ipw_rt->rt_hdr.it_present = cpu_to_le32(
  6769. (1 << IEEE80211_RADIOTAP_TSFT) |
  6770. (1 << IEEE80211_RADIOTAP_FLAGS) |
  6771. (1 << IEEE80211_RADIOTAP_RATE) |
  6772. (1 << IEEE80211_RADIOTAP_CHANNEL) |
  6773. (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
  6774. (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
  6775. (1 << IEEE80211_RADIOTAP_ANTENNA));
  6776. /* Zero the flags, we'll add to them as we go */
  6777. ipw_rt->rt_flags = 0;
  6778. ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
  6779. frame->parent_tsf[2] << 16 |
  6780. frame->parent_tsf[1] << 8 |
  6781. frame->parent_tsf[0]);
  6782. /* Convert to DBM */
  6783. ipw_rt->rt_dbmsignal = signal;
  6784. ipw_rt->rt_dbmnoise = noise;
  6785. /* Convert the channel data and set the flags */
  6786. ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
  6787. if (channel > 14) { /* 802.11a */
  6788. ipw_rt->rt_chbitmask =
  6789. cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
  6790. } else if (phy_flags & (1 << 5)) { /* 802.11b */
  6791. ipw_rt->rt_chbitmask =
  6792. cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
  6793. } else { /* 802.11g */
  6794. ipw_rt->rt_chbitmask =
  6795. cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
  6796. }
  6797. /* set the rate in multiples of 500k/s */
  6798. switch (rate) {
  6799. case IPW_TX_RATE_1MB:
  6800. ipw_rt->rt_rate = 2;
  6801. break;
  6802. case IPW_TX_RATE_2MB:
  6803. ipw_rt->rt_rate = 4;
  6804. break;
  6805. case IPW_TX_RATE_5MB:
  6806. ipw_rt->rt_rate = 10;
  6807. break;
  6808. case IPW_TX_RATE_6MB:
  6809. ipw_rt->rt_rate = 12;
  6810. break;
  6811. case IPW_TX_RATE_9MB:
  6812. ipw_rt->rt_rate = 18;
  6813. break;
  6814. case IPW_TX_RATE_11MB:
  6815. ipw_rt->rt_rate = 22;
  6816. break;
  6817. case IPW_TX_RATE_12MB:
  6818. ipw_rt->rt_rate = 24;
  6819. break;
  6820. case IPW_TX_RATE_18MB:
  6821. ipw_rt->rt_rate = 36;
  6822. break;
  6823. case IPW_TX_RATE_24MB:
  6824. ipw_rt->rt_rate = 48;
  6825. break;
  6826. case IPW_TX_RATE_36MB:
  6827. ipw_rt->rt_rate = 72;
  6828. break;
  6829. case IPW_TX_RATE_48MB:
  6830. ipw_rt->rt_rate = 96;
  6831. break;
  6832. case IPW_TX_RATE_54MB:
  6833. ipw_rt->rt_rate = 108;
  6834. break;
  6835. default:
  6836. ipw_rt->rt_rate = 0;
  6837. break;
  6838. }
  6839. /* antenna number */
  6840. ipw_rt->rt_antenna = (phy_flags & 3);
  6841. /* set the preamble flag if we have it */
  6842. if (phy_flags & (1 << 6))
  6843. ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
  6844. IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
  6845. if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
  6846. dev->stats.rx_errors++;
  6847. dev_kfree_skb_any(skb);
  6848. }
  6849. }
  6850. #endif
  6851. static int is_network_packet(struct ipw_priv *priv,
  6852. struct libipw_hdr_4addr *header)
  6853. {
  6854. /* Filter incoming packets to determine if they are targeted toward
  6855. * this network, discarding packets coming from ourselves */
  6856. switch (priv->ieee->iw_mode) {
  6857. case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
  6858. /* packets from our adapter are dropped (echo) */
  6859. if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr))
  6860. return 0;
  6861. /* {broad,multi}cast packets to our BSSID go through */
  6862. if (is_multicast_ether_addr(header->addr1))
  6863. return ether_addr_equal(header->addr3, priv->bssid);
  6864. /* packets to our adapter go through */
  6865. return ether_addr_equal(header->addr1,
  6866. priv->net_dev->dev_addr);
  6867. case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
  6868. /* packets from our adapter are dropped (echo) */
  6869. if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr))
  6870. return 0;
  6871. /* {broad,multi}cast packets to our BSS go through */
  6872. if (is_multicast_ether_addr(header->addr1))
  6873. return ether_addr_equal(header->addr2, priv->bssid);
  6874. /* packets to our adapter go through */
  6875. return ether_addr_equal(header->addr1,
  6876. priv->net_dev->dev_addr);
  6877. }
  6878. return 1;
  6879. }
  6880. #define IPW_PACKET_RETRY_TIME HZ
  6881. static int is_duplicate_packet(struct ipw_priv *priv,
  6882. struct libipw_hdr_4addr *header)
  6883. {
  6884. u16 sc = le16_to_cpu(header->seq_ctl);
  6885. u16 seq = WLAN_GET_SEQ_SEQ(sc);
  6886. u16 frag = WLAN_GET_SEQ_FRAG(sc);
  6887. u16 *last_seq, *last_frag;
  6888. unsigned long *last_time;
  6889. switch (priv->ieee->iw_mode) {
  6890. case IW_MODE_ADHOC:
  6891. {
  6892. struct list_head *p;
  6893. struct ipw_ibss_seq *entry = NULL;
  6894. u8 *mac = header->addr2;
  6895. int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
  6896. list_for_each(p, &priv->ibss_mac_hash[index]) {
  6897. entry =
  6898. list_entry(p, struct ipw_ibss_seq, list);
  6899. if (ether_addr_equal(entry->mac, mac))
  6900. break;
  6901. }
  6902. if (p == &priv->ibss_mac_hash[index]) {
  6903. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  6904. if (!entry) {
  6905. IPW_ERROR
  6906. ("Cannot malloc new mac entry\n");
  6907. return 0;
  6908. }
  6909. memcpy(entry->mac, mac, ETH_ALEN);
  6910. entry->seq_num = seq;
  6911. entry->frag_num = frag;
  6912. entry->packet_time = jiffies;
  6913. list_add(&entry->list,
  6914. &priv->ibss_mac_hash[index]);
  6915. return 0;
  6916. }
  6917. last_seq = &entry->seq_num;
  6918. last_frag = &entry->frag_num;
  6919. last_time = &entry->packet_time;
  6920. break;
  6921. }
  6922. case IW_MODE_INFRA:
  6923. last_seq = &priv->last_seq_num;
  6924. last_frag = &priv->last_frag_num;
  6925. last_time = &priv->last_packet_time;
  6926. break;
  6927. default:
  6928. return 0;
  6929. }
  6930. if ((*last_seq == seq) &&
  6931. time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
  6932. if (*last_frag == frag)
  6933. goto drop;
  6934. if (*last_frag + 1 != frag)
  6935. /* out-of-order fragment */
  6936. goto drop;
  6937. } else
  6938. *last_seq = seq;
  6939. *last_frag = frag;
  6940. *last_time = jiffies;
  6941. return 0;
  6942. drop:
  6943. /* Comment this line now since we observed the card receives
  6944. * duplicate packets but the FCTL_RETRY bit is not set in the
  6945. * IBSS mode with fragmentation enabled.
  6946. BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
  6947. return 1;
  6948. }
  6949. static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
  6950. struct ipw_rx_mem_buffer *rxb,
  6951. struct libipw_rx_stats *stats)
  6952. {
  6953. struct sk_buff *skb = rxb->skb;
  6954. struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
  6955. struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
  6956. (skb->data + IPW_RX_FRAME_SIZE);
  6957. libipw_rx_mgt(priv->ieee, header, stats);
  6958. if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
  6959. ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
  6960. IEEE80211_STYPE_PROBE_RESP) ||
  6961. (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
  6962. IEEE80211_STYPE_BEACON))) {
  6963. if (ether_addr_equal(header->addr3, priv->bssid))
  6964. ipw_add_station(priv, header->addr2);
  6965. }
  6966. if (priv->config & CFG_NET_STATS) {
  6967. IPW_DEBUG_HC("sending stat packet\n");
  6968. /* Set the size of the skb to the size of the full
  6969. * ipw header and 802.11 frame */
  6970. skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
  6971. IPW_RX_FRAME_SIZE);
  6972. /* Advance past the ipw packet header to the 802.11 frame */
  6973. skb_pull(skb, IPW_RX_FRAME_SIZE);
  6974. /* Push the libipw_rx_stats before the 802.11 frame */
  6975. memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
  6976. skb->dev = priv->ieee->dev;
  6977. /* Point raw at the libipw_stats */
  6978. skb_reset_mac_header(skb);
  6979. skb->pkt_type = PACKET_OTHERHOST;
  6980. skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
  6981. memset(skb->cb, 0, sizeof(rxb->skb->cb));
  6982. netif_rx(skb);
  6983. rxb->skb = NULL;
  6984. }
  6985. }
  6986. /*
  6987. * Main entry function for receiving a packet with 80211 headers. This
  6988. * should be called when ever the FW has notified us that there is a new
  6989. * skb in the receive queue.
  6990. */
  6991. static void ipw_rx(struct ipw_priv *priv)
  6992. {
  6993. struct ipw_rx_mem_buffer *rxb;
  6994. struct ipw_rx_packet *pkt;
  6995. struct libipw_hdr_4addr *header;
  6996. u32 r, i;
  6997. u8 network_packet;
  6998. u8 fill_rx = 0;
  6999. r = ipw_read32(priv, IPW_RX_READ_INDEX);
  7000. ipw_read32(priv, IPW_RX_WRITE_INDEX);
  7001. i = priv->rxq->read;
  7002. if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
  7003. fill_rx = 1;
  7004. while (i != r) {
  7005. rxb = priv->rxq->queue[i];
  7006. if (unlikely(rxb == NULL)) {
  7007. printk(KERN_CRIT "Queue not allocated!\n");
  7008. break;
  7009. }
  7010. priv->rxq->queue[i] = NULL;
  7011. dma_sync_single_for_cpu(&priv->pci_dev->dev, rxb->dma_addr,
  7012. IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
  7013. pkt = (struct ipw_rx_packet *)rxb->skb->data;
  7014. IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
  7015. pkt->header.message_type,
  7016. pkt->header.rx_seq_num, pkt->header.control_bits);
  7017. switch (pkt->header.message_type) {
  7018. case RX_FRAME_TYPE: /* 802.11 frame */ {
  7019. struct libipw_rx_stats stats = {
  7020. .rssi = pkt->u.frame.rssi_dbm -
  7021. IPW_RSSI_TO_DBM,
  7022. .signal =
  7023. pkt->u.frame.rssi_dbm -
  7024. IPW_RSSI_TO_DBM + 0x100,
  7025. .noise =
  7026. le16_to_cpu(pkt->u.frame.noise),
  7027. .rate = pkt->u.frame.rate,
  7028. .mac_time = jiffies,
  7029. .received_channel =
  7030. pkt->u.frame.received_channel,
  7031. .freq =
  7032. (pkt->u.frame.
  7033. control & (1 << 0)) ?
  7034. LIBIPW_24GHZ_BAND :
  7035. LIBIPW_52GHZ_BAND,
  7036. .len = le16_to_cpu(pkt->u.frame.length),
  7037. };
  7038. if (stats.rssi != 0)
  7039. stats.mask |= LIBIPW_STATMASK_RSSI;
  7040. if (stats.signal != 0)
  7041. stats.mask |= LIBIPW_STATMASK_SIGNAL;
  7042. if (stats.noise != 0)
  7043. stats.mask |= LIBIPW_STATMASK_NOISE;
  7044. if (stats.rate != 0)
  7045. stats.mask |= LIBIPW_STATMASK_RATE;
  7046. priv->rx_packets++;
  7047. #ifdef CONFIG_IPW2200_PROMISCUOUS
  7048. if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
  7049. ipw_handle_promiscuous_rx(priv, rxb, &stats);
  7050. #endif
  7051. #ifdef CONFIG_IPW2200_MONITOR
  7052. if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
  7053. #ifdef CONFIG_IPW2200_RADIOTAP
  7054. ipw_handle_data_packet_monitor(priv,
  7055. rxb,
  7056. &stats);
  7057. #else
  7058. ipw_handle_data_packet(priv, rxb,
  7059. &stats);
  7060. #endif
  7061. break;
  7062. }
  7063. #endif
  7064. header =
  7065. (struct libipw_hdr_4addr *)(rxb->skb->
  7066. data +
  7067. IPW_RX_FRAME_SIZE);
  7068. /* TODO: Check Ad-Hoc dest/source and make sure
  7069. * that we are actually parsing these packets
  7070. * correctly -- we should probably use the
  7071. * frame control of the packet and disregard
  7072. * the current iw_mode */
  7073. network_packet =
  7074. is_network_packet(priv, header);
  7075. if (network_packet && priv->assoc_network) {
  7076. priv->assoc_network->stats.rssi =
  7077. stats.rssi;
  7078. priv->exp_avg_rssi =
  7079. exponential_average(priv->exp_avg_rssi,
  7080. stats.rssi, DEPTH_RSSI);
  7081. }
  7082. IPW_DEBUG_RX("Frame: len=%u\n",
  7083. le16_to_cpu(pkt->u.frame.length));
  7084. if (le16_to_cpu(pkt->u.frame.length) <
  7085. libipw_get_hdrlen(le16_to_cpu(
  7086. header->frame_ctl))) {
  7087. IPW_DEBUG_DROP
  7088. ("Received packet is too small. "
  7089. "Dropping.\n");
  7090. priv->net_dev->stats.rx_errors++;
  7091. priv->wstats.discard.misc++;
  7092. break;
  7093. }
  7094. switch (WLAN_FC_GET_TYPE
  7095. (le16_to_cpu(header->frame_ctl))) {
  7096. case IEEE80211_FTYPE_MGMT:
  7097. ipw_handle_mgmt_packet(priv, rxb,
  7098. &stats);
  7099. break;
  7100. case IEEE80211_FTYPE_CTL:
  7101. break;
  7102. case IEEE80211_FTYPE_DATA:
  7103. if (unlikely(!network_packet ||
  7104. is_duplicate_packet(priv,
  7105. header)))
  7106. {
  7107. IPW_DEBUG_DROP("Dropping: "
  7108. "%pM, "
  7109. "%pM, "
  7110. "%pM\n",
  7111. header->addr1,
  7112. header->addr2,
  7113. header->addr3);
  7114. break;
  7115. }
  7116. ipw_handle_data_packet(priv, rxb,
  7117. &stats);
  7118. break;
  7119. }
  7120. break;
  7121. }
  7122. case RX_HOST_NOTIFICATION_TYPE:{
  7123. IPW_DEBUG_RX
  7124. ("Notification: subtype=%02X flags=%02X size=%d\n",
  7125. pkt->u.notification.subtype,
  7126. pkt->u.notification.flags,
  7127. le16_to_cpu(pkt->u.notification.size));
  7128. ipw_rx_notification(priv, &pkt->u.notification);
  7129. break;
  7130. }
  7131. default:
  7132. IPW_DEBUG_RX("Bad Rx packet of type %d\n",
  7133. pkt->header.message_type);
  7134. break;
  7135. }
  7136. /* For now we just don't re-use anything. We can tweak this
  7137. * later to try and re-use notification packets and SKBs that
  7138. * fail to Rx correctly */
  7139. if (rxb->skb != NULL) {
  7140. dev_kfree_skb_any(rxb->skb);
  7141. rxb->skb = NULL;
  7142. }
  7143. dma_unmap_single(&priv->pci_dev->dev, rxb->dma_addr,
  7144. IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
  7145. list_add_tail(&rxb->list, &priv->rxq->rx_used);
  7146. i = (i + 1) % RX_QUEUE_SIZE;
  7147. /* If there are a lot of unsued frames, restock the Rx queue
  7148. * so the ucode won't assert */
  7149. if (fill_rx) {
  7150. priv->rxq->read = i;
  7151. ipw_rx_queue_replenish(priv);
  7152. }
  7153. }
  7154. /* Backtrack one entry */
  7155. priv->rxq->read = i;
  7156. ipw_rx_queue_restock(priv);
  7157. }
  7158. #define DEFAULT_RTS_THRESHOLD 2304U
  7159. #define MIN_RTS_THRESHOLD 1U
  7160. #define MAX_RTS_THRESHOLD 2304U
  7161. #define DEFAULT_BEACON_INTERVAL 100U
  7162. #define DEFAULT_SHORT_RETRY_LIMIT 7U
  7163. #define DEFAULT_LONG_RETRY_LIMIT 4U
  7164. /*
  7165. * ipw_sw_reset
  7166. * @option: options to control different reset behaviour
  7167. * 0 = reset everything except the 'disable' module_param
  7168. * 1 = reset everything and print out driver info (for probe only)
  7169. * 2 = reset everything
  7170. */
  7171. static int ipw_sw_reset(struct ipw_priv *priv, int option)
  7172. {
  7173. int band, modulation;
  7174. int old_mode = priv->ieee->iw_mode;
  7175. /* Initialize module parameter values here */
  7176. priv->config = 0;
  7177. /* We default to disabling the LED code as right now it causes
  7178. * too many systems to lock up... */
  7179. if (!led_support)
  7180. priv->config |= CFG_NO_LED;
  7181. if (associate)
  7182. priv->config |= CFG_ASSOCIATE;
  7183. else
  7184. IPW_DEBUG_INFO("Auto associate disabled.\n");
  7185. if (auto_create)
  7186. priv->config |= CFG_ADHOC_CREATE;
  7187. else
  7188. IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
  7189. priv->config &= ~CFG_STATIC_ESSID;
  7190. priv->essid_len = 0;
  7191. memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
  7192. if (disable && option) {
  7193. priv->status |= STATUS_RF_KILL_SW;
  7194. IPW_DEBUG_INFO("Radio disabled.\n");
  7195. }
  7196. if (default_channel != 0) {
  7197. priv->config |= CFG_STATIC_CHANNEL;
  7198. priv->channel = default_channel;
  7199. IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
  7200. /* TODO: Validate that provided channel is in range */
  7201. }
  7202. #ifdef CONFIG_IPW2200_QOS
  7203. ipw_qos_init(priv, qos_enable, qos_burst_enable,
  7204. burst_duration_CCK, burst_duration_OFDM);
  7205. #endif /* CONFIG_IPW2200_QOS */
  7206. switch (network_mode) {
  7207. case 1:
  7208. priv->ieee->iw_mode = IW_MODE_ADHOC;
  7209. priv->net_dev->type = ARPHRD_ETHER;
  7210. break;
  7211. #ifdef CONFIG_IPW2200_MONITOR
  7212. case 2:
  7213. priv->ieee->iw_mode = IW_MODE_MONITOR;
  7214. #ifdef CONFIG_IPW2200_RADIOTAP
  7215. priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
  7216. #else
  7217. priv->net_dev->type = ARPHRD_IEEE80211;
  7218. #endif
  7219. break;
  7220. #endif
  7221. default:
  7222. case 0:
  7223. priv->net_dev->type = ARPHRD_ETHER;
  7224. priv->ieee->iw_mode = IW_MODE_INFRA;
  7225. break;
  7226. }
  7227. if (hwcrypto) {
  7228. priv->ieee->host_encrypt = 0;
  7229. priv->ieee->host_encrypt_msdu = 0;
  7230. priv->ieee->host_decrypt = 0;
  7231. priv->ieee->host_mc_decrypt = 0;
  7232. }
  7233. IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
  7234. /* IPW2200/2915 is abled to do hardware fragmentation. */
  7235. priv->ieee->host_open_frag = 0;
  7236. if ((priv->pci_dev->device == 0x4223) ||
  7237. (priv->pci_dev->device == 0x4224)) {
  7238. if (option == 1)
  7239. printk(KERN_INFO DRV_NAME
  7240. ": Detected Intel PRO/Wireless 2915ABG Network "
  7241. "Connection\n");
  7242. priv->ieee->abg_true = 1;
  7243. band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
  7244. modulation = LIBIPW_OFDM_MODULATION |
  7245. LIBIPW_CCK_MODULATION;
  7246. priv->adapter = IPW_2915ABG;
  7247. priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
  7248. } else {
  7249. if (option == 1)
  7250. printk(KERN_INFO DRV_NAME
  7251. ": Detected Intel PRO/Wireless 2200BG Network "
  7252. "Connection\n");
  7253. priv->ieee->abg_true = 0;
  7254. band = LIBIPW_24GHZ_BAND;
  7255. modulation = LIBIPW_OFDM_MODULATION |
  7256. LIBIPW_CCK_MODULATION;
  7257. priv->adapter = IPW_2200BG;
  7258. priv->ieee->mode = IEEE_G | IEEE_B;
  7259. }
  7260. priv->ieee->freq_band = band;
  7261. priv->ieee->modulation = modulation;
  7262. priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
  7263. priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
  7264. priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
  7265. priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
  7266. priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
  7267. priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
  7268. /* If power management is turned on, default to AC mode */
  7269. priv->power_mode = IPW_POWER_AC;
  7270. priv->tx_power = IPW_TX_POWER_DEFAULT;
  7271. return old_mode == priv->ieee->iw_mode;
  7272. }
  7273. /*
  7274. * This file defines the Wireless Extension handlers. It does not
  7275. * define any methods of hardware manipulation and relies on the
  7276. * functions defined in ipw_main to provide the HW interaction.
  7277. *
  7278. * The exception to this is the use of the ipw_get_ordinal()
  7279. * function used to poll the hardware vs. making unnecessary calls.
  7280. *
  7281. */
  7282. static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
  7283. {
  7284. if (channel == 0) {
  7285. IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
  7286. priv->config &= ~CFG_STATIC_CHANNEL;
  7287. IPW_DEBUG_ASSOC("Attempting to associate with new "
  7288. "parameters.\n");
  7289. ipw_associate(priv);
  7290. return 0;
  7291. }
  7292. priv->config |= CFG_STATIC_CHANNEL;
  7293. if (priv->channel == channel) {
  7294. IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
  7295. channel);
  7296. return 0;
  7297. }
  7298. IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
  7299. priv->channel = channel;
  7300. #ifdef CONFIG_IPW2200_MONITOR
  7301. if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
  7302. int i;
  7303. if (priv->status & STATUS_SCANNING) {
  7304. IPW_DEBUG_SCAN("Scan abort triggered due to "
  7305. "channel change.\n");
  7306. ipw_abort_scan(priv);
  7307. }
  7308. for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
  7309. udelay(10);
  7310. if (priv->status & STATUS_SCANNING)
  7311. IPW_DEBUG_SCAN("Still scanning...\n");
  7312. else
  7313. IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
  7314. 1000 - i);
  7315. return 0;
  7316. }
  7317. #endif /* CONFIG_IPW2200_MONITOR */
  7318. /* Network configuration changed -- force [re]association */
  7319. IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
  7320. if (!ipw_disassociate(priv))
  7321. ipw_associate(priv);
  7322. return 0;
  7323. }
  7324. static int ipw_wx_set_freq(struct net_device *dev,
  7325. struct iw_request_info *info,
  7326. union iwreq_data *wrqu, char *extra)
  7327. {
  7328. struct ipw_priv *priv = libipw_priv(dev);
  7329. const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
  7330. struct iw_freq *fwrq = &wrqu->freq;
  7331. int ret = 0, i;
  7332. u8 channel, flags;
  7333. int band;
  7334. if (fwrq->m == 0) {
  7335. IPW_DEBUG_WX("SET Freq/Channel -> any\n");
  7336. mutex_lock(&priv->mutex);
  7337. ret = ipw_set_channel(priv, 0);
  7338. mutex_unlock(&priv->mutex);
  7339. return ret;
  7340. }
  7341. /* if setting by freq convert to channel */
  7342. if (fwrq->e == 1) {
  7343. channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
  7344. if (channel == 0)
  7345. return -EINVAL;
  7346. } else
  7347. channel = fwrq->m;
  7348. if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
  7349. return -EINVAL;
  7350. if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
  7351. i = libipw_channel_to_index(priv->ieee, channel);
  7352. if (i == -1)
  7353. return -EINVAL;
  7354. flags = (band == LIBIPW_24GHZ_BAND) ?
  7355. geo->bg[i].flags : geo->a[i].flags;
  7356. if (flags & LIBIPW_CH_PASSIVE_ONLY) {
  7357. IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
  7358. return -EINVAL;
  7359. }
  7360. }
  7361. IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
  7362. mutex_lock(&priv->mutex);
  7363. ret = ipw_set_channel(priv, channel);
  7364. mutex_unlock(&priv->mutex);
  7365. return ret;
  7366. }
  7367. static int ipw_wx_get_freq(struct net_device *dev,
  7368. struct iw_request_info *info,
  7369. union iwreq_data *wrqu, char *extra)
  7370. {
  7371. struct ipw_priv *priv = libipw_priv(dev);
  7372. wrqu->freq.e = 0;
  7373. /* If we are associated, trying to associate, or have a statically
  7374. * configured CHANNEL then return that; otherwise return ANY */
  7375. mutex_lock(&priv->mutex);
  7376. if (priv->config & CFG_STATIC_CHANNEL ||
  7377. priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
  7378. int i;
  7379. i = libipw_channel_to_index(priv->ieee, priv->channel);
  7380. BUG_ON(i == -1);
  7381. wrqu->freq.e = 1;
  7382. switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
  7383. case LIBIPW_52GHZ_BAND:
  7384. wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
  7385. break;
  7386. case LIBIPW_24GHZ_BAND:
  7387. wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
  7388. break;
  7389. default:
  7390. BUG();
  7391. }
  7392. } else
  7393. wrqu->freq.m = 0;
  7394. mutex_unlock(&priv->mutex);
  7395. IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
  7396. return 0;
  7397. }
  7398. static int ipw_wx_set_mode(struct net_device *dev,
  7399. struct iw_request_info *info,
  7400. union iwreq_data *wrqu, char *extra)
  7401. {
  7402. struct ipw_priv *priv = libipw_priv(dev);
  7403. int err = 0;
  7404. IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
  7405. switch (wrqu->mode) {
  7406. #ifdef CONFIG_IPW2200_MONITOR
  7407. case IW_MODE_MONITOR:
  7408. #endif
  7409. case IW_MODE_ADHOC:
  7410. case IW_MODE_INFRA:
  7411. break;
  7412. case IW_MODE_AUTO:
  7413. wrqu->mode = IW_MODE_INFRA;
  7414. break;
  7415. default:
  7416. return -EINVAL;
  7417. }
  7418. if (wrqu->mode == priv->ieee->iw_mode)
  7419. return 0;
  7420. mutex_lock(&priv->mutex);
  7421. ipw_sw_reset(priv, 0);
  7422. #ifdef CONFIG_IPW2200_MONITOR
  7423. if (priv->ieee->iw_mode == IW_MODE_MONITOR)
  7424. priv->net_dev->type = ARPHRD_ETHER;
  7425. if (wrqu->mode == IW_MODE_MONITOR)
  7426. #ifdef CONFIG_IPW2200_RADIOTAP
  7427. priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
  7428. #else
  7429. priv->net_dev->type = ARPHRD_IEEE80211;
  7430. #endif
  7431. #endif /* CONFIG_IPW2200_MONITOR */
  7432. /* Free the existing firmware and reset the fw_loaded
  7433. * flag so ipw_load() will bring in the new firmware */
  7434. free_firmware();
  7435. priv->ieee->iw_mode = wrqu->mode;
  7436. schedule_work(&priv->adapter_restart);
  7437. mutex_unlock(&priv->mutex);
  7438. return err;
  7439. }
  7440. static int ipw_wx_get_mode(struct net_device *dev,
  7441. struct iw_request_info *info,
  7442. union iwreq_data *wrqu, char *extra)
  7443. {
  7444. struct ipw_priv *priv = libipw_priv(dev);
  7445. mutex_lock(&priv->mutex);
  7446. wrqu->mode = priv->ieee->iw_mode;
  7447. IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
  7448. mutex_unlock(&priv->mutex);
  7449. return 0;
  7450. }
  7451. /* Values are in microsecond */
  7452. static const s32 timeout_duration[] = {
  7453. 350000,
  7454. 250000,
  7455. 75000,
  7456. 37000,
  7457. 25000,
  7458. };
  7459. static const s32 period_duration[] = {
  7460. 400000,
  7461. 700000,
  7462. 1000000,
  7463. 1000000,
  7464. 1000000
  7465. };
  7466. static int ipw_wx_get_range(struct net_device *dev,
  7467. struct iw_request_info *info,
  7468. union iwreq_data *wrqu, char *extra)
  7469. {
  7470. struct ipw_priv *priv = libipw_priv(dev);
  7471. struct iw_range *range = (struct iw_range *)extra;
  7472. const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
  7473. int i = 0, j;
  7474. wrqu->data.length = sizeof(*range);
  7475. memset(range, 0, sizeof(*range));
  7476. /* 54Mbs == ~27 Mb/s real (802.11g) */
  7477. range->throughput = 27 * 1000 * 1000;
  7478. range->max_qual.qual = 100;
  7479. /* TODO: Find real max RSSI and stick here */
  7480. range->max_qual.level = 0;
  7481. range->max_qual.noise = 0;
  7482. range->max_qual.updated = 7; /* Updated all three */
  7483. range->avg_qual.qual = 70;
  7484. /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
  7485. range->avg_qual.level = 0; /* FIXME to real average level */
  7486. range->avg_qual.noise = 0;
  7487. range->avg_qual.updated = 7; /* Updated all three */
  7488. mutex_lock(&priv->mutex);
  7489. range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
  7490. for (i = 0; i < range->num_bitrates; i++)
  7491. range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
  7492. 500000;
  7493. range->max_rts = DEFAULT_RTS_THRESHOLD;
  7494. range->min_frag = MIN_FRAG_THRESHOLD;
  7495. range->max_frag = MAX_FRAG_THRESHOLD;
  7496. range->encoding_size[0] = 5;
  7497. range->encoding_size[1] = 13;
  7498. range->num_encoding_sizes = 2;
  7499. range->max_encoding_tokens = WEP_KEYS;
  7500. /* Set the Wireless Extension versions */
  7501. range->we_version_compiled = WIRELESS_EXT;
  7502. range->we_version_source = 18;
  7503. i = 0;
  7504. if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
  7505. for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
  7506. if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
  7507. (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
  7508. continue;
  7509. range->freq[i].i = geo->bg[j].channel;
  7510. range->freq[i].m = geo->bg[j].freq * 100000;
  7511. range->freq[i].e = 1;
  7512. i++;
  7513. }
  7514. }
  7515. if (priv->ieee->mode & IEEE_A) {
  7516. for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
  7517. if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
  7518. (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
  7519. continue;
  7520. range->freq[i].i = geo->a[j].channel;
  7521. range->freq[i].m = geo->a[j].freq * 100000;
  7522. range->freq[i].e = 1;
  7523. i++;
  7524. }
  7525. }
  7526. range->num_channels = i;
  7527. range->num_frequency = i;
  7528. mutex_unlock(&priv->mutex);
  7529. /* Event capability (kernel + driver) */
  7530. range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
  7531. IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
  7532. IW_EVENT_CAPA_MASK(SIOCGIWAP) |
  7533. IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
  7534. range->event_capa[1] = IW_EVENT_CAPA_K_1;
  7535. range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
  7536. IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
  7537. range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
  7538. IPW_DEBUG_WX("GET Range\n");
  7539. return 0;
  7540. }
  7541. static int ipw_wx_set_wap(struct net_device *dev,
  7542. struct iw_request_info *info,
  7543. union iwreq_data *wrqu, char *extra)
  7544. {
  7545. struct ipw_priv *priv = libipw_priv(dev);
  7546. if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
  7547. return -EINVAL;
  7548. mutex_lock(&priv->mutex);
  7549. if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
  7550. is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
  7551. /* we disable mandatory BSSID association */
  7552. IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
  7553. priv->config &= ~CFG_STATIC_BSSID;
  7554. IPW_DEBUG_ASSOC("Attempting to associate with new "
  7555. "parameters.\n");
  7556. ipw_associate(priv);
  7557. mutex_unlock(&priv->mutex);
  7558. return 0;
  7559. }
  7560. priv->config |= CFG_STATIC_BSSID;
  7561. if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) {
  7562. IPW_DEBUG_WX("BSSID set to current BSSID.\n");
  7563. mutex_unlock(&priv->mutex);
  7564. return 0;
  7565. }
  7566. IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
  7567. wrqu->ap_addr.sa_data);
  7568. memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
  7569. /* Network configuration changed -- force [re]association */
  7570. IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
  7571. if (!ipw_disassociate(priv))
  7572. ipw_associate(priv);
  7573. mutex_unlock(&priv->mutex);
  7574. return 0;
  7575. }
  7576. static int ipw_wx_get_wap(struct net_device *dev,
  7577. struct iw_request_info *info,
  7578. union iwreq_data *wrqu, char *extra)
  7579. {
  7580. struct ipw_priv *priv = libipw_priv(dev);
  7581. /* If we are associated, trying to associate, or have a statically
  7582. * configured BSSID then return that; otherwise return ANY */
  7583. mutex_lock(&priv->mutex);
  7584. if (priv->config & CFG_STATIC_BSSID ||
  7585. priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
  7586. wrqu->ap_addr.sa_family = ARPHRD_ETHER;
  7587. memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
  7588. } else
  7589. eth_zero_addr(wrqu->ap_addr.sa_data);
  7590. IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
  7591. wrqu->ap_addr.sa_data);
  7592. mutex_unlock(&priv->mutex);
  7593. return 0;
  7594. }
  7595. static int ipw_wx_set_essid(struct net_device *dev,
  7596. struct iw_request_info *info,
  7597. union iwreq_data *wrqu, char *extra)
  7598. {
  7599. struct ipw_priv *priv = libipw_priv(dev);
  7600. int length;
  7601. mutex_lock(&priv->mutex);
  7602. if (!wrqu->essid.flags)
  7603. {
  7604. IPW_DEBUG_WX("Setting ESSID to ANY\n");
  7605. ipw_disassociate(priv);
  7606. priv->config &= ~CFG_STATIC_ESSID;
  7607. ipw_associate(priv);
  7608. mutex_unlock(&priv->mutex);
  7609. return 0;
  7610. }
  7611. length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
  7612. priv->config |= CFG_STATIC_ESSID;
  7613. if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
  7614. && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
  7615. IPW_DEBUG_WX("ESSID set to current ESSID.\n");
  7616. mutex_unlock(&priv->mutex);
  7617. return 0;
  7618. }
  7619. IPW_DEBUG_WX("Setting ESSID: '%*pE' (%d)\n", length, extra, length);
  7620. priv->essid_len = length;
  7621. memcpy(priv->essid, extra, priv->essid_len);
  7622. /* Network configuration changed -- force [re]association */
  7623. IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
  7624. if (!ipw_disassociate(priv))
  7625. ipw_associate(priv);
  7626. mutex_unlock(&priv->mutex);
  7627. return 0;
  7628. }
  7629. static int ipw_wx_get_essid(struct net_device *dev,
  7630. struct iw_request_info *info,
  7631. union iwreq_data *wrqu, char *extra)
  7632. {
  7633. struct ipw_priv *priv = libipw_priv(dev);
  7634. /* If we are associated, trying to associate, or have a statically
  7635. * configured ESSID then return that; otherwise return ANY */
  7636. mutex_lock(&priv->mutex);
  7637. if (priv->config & CFG_STATIC_ESSID ||
  7638. priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
  7639. IPW_DEBUG_WX("Getting essid: '%*pE'\n",
  7640. priv->essid_len, priv->essid);
  7641. memcpy(extra, priv->essid, priv->essid_len);
  7642. wrqu->essid.length = priv->essid_len;
  7643. wrqu->essid.flags = 1; /* active */
  7644. } else {
  7645. IPW_DEBUG_WX("Getting essid: ANY\n");
  7646. wrqu->essid.length = 0;
  7647. wrqu->essid.flags = 0; /* active */
  7648. }
  7649. mutex_unlock(&priv->mutex);
  7650. return 0;
  7651. }
  7652. static int ipw_wx_set_nick(struct net_device *dev,
  7653. struct iw_request_info *info,
  7654. union iwreq_data *wrqu, char *extra)
  7655. {
  7656. struct ipw_priv *priv = libipw_priv(dev);
  7657. IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
  7658. if (wrqu->data.length > IW_ESSID_MAX_SIZE)
  7659. return -E2BIG;
  7660. mutex_lock(&priv->mutex);
  7661. wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
  7662. memset(priv->nick, 0, sizeof(priv->nick));
  7663. memcpy(priv->nick, extra, wrqu->data.length);
  7664. IPW_DEBUG_TRACE("<<\n");
  7665. mutex_unlock(&priv->mutex);
  7666. return 0;
  7667. }
  7668. static int ipw_wx_get_nick(struct net_device *dev,
  7669. struct iw_request_info *info,
  7670. union iwreq_data *wrqu, char *extra)
  7671. {
  7672. struct ipw_priv *priv = libipw_priv(dev);
  7673. IPW_DEBUG_WX("Getting nick\n");
  7674. mutex_lock(&priv->mutex);
  7675. wrqu->data.length = strlen(priv->nick);
  7676. memcpy(extra, priv->nick, wrqu->data.length);
  7677. wrqu->data.flags = 1; /* active */
  7678. mutex_unlock(&priv->mutex);
  7679. return 0;
  7680. }
  7681. static int ipw_wx_set_sens(struct net_device *dev,
  7682. struct iw_request_info *info,
  7683. union iwreq_data *wrqu, char *extra)
  7684. {
  7685. struct ipw_priv *priv = libipw_priv(dev);
  7686. int err = 0;
  7687. IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
  7688. IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
  7689. mutex_lock(&priv->mutex);
  7690. if (wrqu->sens.fixed == 0)
  7691. {
  7692. priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
  7693. priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
  7694. goto out;
  7695. }
  7696. if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
  7697. (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
  7698. err = -EINVAL;
  7699. goto out;
  7700. }
  7701. priv->roaming_threshold = wrqu->sens.value;
  7702. priv->disassociate_threshold = 3*wrqu->sens.value;
  7703. out:
  7704. mutex_unlock(&priv->mutex);
  7705. return err;
  7706. }
  7707. static int ipw_wx_get_sens(struct net_device *dev,
  7708. struct iw_request_info *info,
  7709. union iwreq_data *wrqu, char *extra)
  7710. {
  7711. struct ipw_priv *priv = libipw_priv(dev);
  7712. mutex_lock(&priv->mutex);
  7713. wrqu->sens.fixed = 1;
  7714. wrqu->sens.value = priv->roaming_threshold;
  7715. mutex_unlock(&priv->mutex);
  7716. IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
  7717. wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
  7718. return 0;
  7719. }
  7720. static int ipw_wx_set_rate(struct net_device *dev,
  7721. struct iw_request_info *info,
  7722. union iwreq_data *wrqu, char *extra)
  7723. {
  7724. /* TODO: We should use semaphores or locks for access to priv */
  7725. struct ipw_priv *priv = libipw_priv(dev);
  7726. u32 target_rate = wrqu->bitrate.value;
  7727. u32 fixed, mask;
  7728. /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
  7729. /* value = X, fixed = 1 means only rate X */
  7730. /* value = X, fixed = 0 means all rates lower equal X */
  7731. if (target_rate == -1) {
  7732. fixed = 0;
  7733. mask = LIBIPW_DEFAULT_RATES_MASK;
  7734. /* Now we should reassociate */
  7735. goto apply;
  7736. }
  7737. mask = 0;
  7738. fixed = wrqu->bitrate.fixed;
  7739. if (target_rate == 1000000 || !fixed)
  7740. mask |= LIBIPW_CCK_RATE_1MB_MASK;
  7741. if (target_rate == 1000000)
  7742. goto apply;
  7743. if (target_rate == 2000000 || !fixed)
  7744. mask |= LIBIPW_CCK_RATE_2MB_MASK;
  7745. if (target_rate == 2000000)
  7746. goto apply;
  7747. if (target_rate == 5500000 || !fixed)
  7748. mask |= LIBIPW_CCK_RATE_5MB_MASK;
  7749. if (target_rate == 5500000)
  7750. goto apply;
  7751. if (target_rate == 6000000 || !fixed)
  7752. mask |= LIBIPW_OFDM_RATE_6MB_MASK;
  7753. if (target_rate == 6000000)
  7754. goto apply;
  7755. if (target_rate == 9000000 || !fixed)
  7756. mask |= LIBIPW_OFDM_RATE_9MB_MASK;
  7757. if (target_rate == 9000000)
  7758. goto apply;
  7759. if (target_rate == 11000000 || !fixed)
  7760. mask |= LIBIPW_CCK_RATE_11MB_MASK;
  7761. if (target_rate == 11000000)
  7762. goto apply;
  7763. if (target_rate == 12000000 || !fixed)
  7764. mask |= LIBIPW_OFDM_RATE_12MB_MASK;
  7765. if (target_rate == 12000000)
  7766. goto apply;
  7767. if (target_rate == 18000000 || !fixed)
  7768. mask |= LIBIPW_OFDM_RATE_18MB_MASK;
  7769. if (target_rate == 18000000)
  7770. goto apply;
  7771. if (target_rate == 24000000 || !fixed)
  7772. mask |= LIBIPW_OFDM_RATE_24MB_MASK;
  7773. if (target_rate == 24000000)
  7774. goto apply;
  7775. if (target_rate == 36000000 || !fixed)
  7776. mask |= LIBIPW_OFDM_RATE_36MB_MASK;
  7777. if (target_rate == 36000000)
  7778. goto apply;
  7779. if (target_rate == 48000000 || !fixed)
  7780. mask |= LIBIPW_OFDM_RATE_48MB_MASK;
  7781. if (target_rate == 48000000)
  7782. goto apply;
  7783. if (target_rate == 54000000 || !fixed)
  7784. mask |= LIBIPW_OFDM_RATE_54MB_MASK;
  7785. if (target_rate == 54000000)
  7786. goto apply;
  7787. IPW_DEBUG_WX("invalid rate specified, returning error\n");
  7788. return -EINVAL;
  7789. apply:
  7790. IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
  7791. mask, fixed ? "fixed" : "sub-rates");
  7792. mutex_lock(&priv->mutex);
  7793. if (mask == LIBIPW_DEFAULT_RATES_MASK) {
  7794. priv->config &= ~CFG_FIXED_RATE;
  7795. ipw_set_fixed_rate(priv, priv->ieee->mode);
  7796. } else
  7797. priv->config |= CFG_FIXED_RATE;
  7798. if (priv->rates_mask == mask) {
  7799. IPW_DEBUG_WX("Mask set to current mask.\n");
  7800. mutex_unlock(&priv->mutex);
  7801. return 0;
  7802. }
  7803. priv->rates_mask = mask;
  7804. /* Network configuration changed -- force [re]association */
  7805. IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
  7806. if (!ipw_disassociate(priv))
  7807. ipw_associate(priv);
  7808. mutex_unlock(&priv->mutex);
  7809. return 0;
  7810. }
  7811. static int ipw_wx_get_rate(struct net_device *dev,
  7812. struct iw_request_info *info,
  7813. union iwreq_data *wrqu, char *extra)
  7814. {
  7815. struct ipw_priv *priv = libipw_priv(dev);
  7816. mutex_lock(&priv->mutex);
  7817. wrqu->bitrate.value = priv->last_rate;
  7818. wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
  7819. mutex_unlock(&priv->mutex);
  7820. IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
  7821. return 0;
  7822. }
  7823. static int ipw_wx_set_rts(struct net_device *dev,
  7824. struct iw_request_info *info,
  7825. union iwreq_data *wrqu, char *extra)
  7826. {
  7827. struct ipw_priv *priv = libipw_priv(dev);
  7828. mutex_lock(&priv->mutex);
  7829. if (wrqu->rts.disabled || !wrqu->rts.fixed)
  7830. priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
  7831. else {
  7832. if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
  7833. wrqu->rts.value > MAX_RTS_THRESHOLD) {
  7834. mutex_unlock(&priv->mutex);
  7835. return -EINVAL;
  7836. }
  7837. priv->rts_threshold = wrqu->rts.value;
  7838. }
  7839. ipw_send_rts_threshold(priv, priv->rts_threshold);
  7840. mutex_unlock(&priv->mutex);
  7841. IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
  7842. return 0;
  7843. }
  7844. static int ipw_wx_get_rts(struct net_device *dev,
  7845. struct iw_request_info *info,
  7846. union iwreq_data *wrqu, char *extra)
  7847. {
  7848. struct ipw_priv *priv = libipw_priv(dev);
  7849. mutex_lock(&priv->mutex);
  7850. wrqu->rts.value = priv->rts_threshold;
  7851. wrqu->rts.fixed = 0; /* no auto select */
  7852. wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
  7853. mutex_unlock(&priv->mutex);
  7854. IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
  7855. return 0;
  7856. }
  7857. static int ipw_wx_set_txpow(struct net_device *dev,
  7858. struct iw_request_info *info,
  7859. union iwreq_data *wrqu, char *extra)
  7860. {
  7861. struct ipw_priv *priv = libipw_priv(dev);
  7862. int err = 0;
  7863. mutex_lock(&priv->mutex);
  7864. if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
  7865. err = -EINPROGRESS;
  7866. goto out;
  7867. }
  7868. if (!wrqu->power.fixed)
  7869. wrqu->power.value = IPW_TX_POWER_DEFAULT;
  7870. if (wrqu->power.flags != IW_TXPOW_DBM) {
  7871. err = -EINVAL;
  7872. goto out;
  7873. }
  7874. if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
  7875. (wrqu->power.value < IPW_TX_POWER_MIN)) {
  7876. err = -EINVAL;
  7877. goto out;
  7878. }
  7879. priv->tx_power = wrqu->power.value;
  7880. err = ipw_set_tx_power(priv);
  7881. out:
  7882. mutex_unlock(&priv->mutex);
  7883. return err;
  7884. }
  7885. static int ipw_wx_get_txpow(struct net_device *dev,
  7886. struct iw_request_info *info,
  7887. union iwreq_data *wrqu, char *extra)
  7888. {
  7889. struct ipw_priv *priv = libipw_priv(dev);
  7890. mutex_lock(&priv->mutex);
  7891. wrqu->power.value = priv->tx_power;
  7892. wrqu->power.fixed = 1;
  7893. wrqu->power.flags = IW_TXPOW_DBM;
  7894. wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
  7895. mutex_unlock(&priv->mutex);
  7896. IPW_DEBUG_WX("GET TX Power -> %s %d\n",
  7897. wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
  7898. return 0;
  7899. }
  7900. static int ipw_wx_set_frag(struct net_device *dev,
  7901. struct iw_request_info *info,
  7902. union iwreq_data *wrqu, char *extra)
  7903. {
  7904. struct ipw_priv *priv = libipw_priv(dev);
  7905. mutex_lock(&priv->mutex);
  7906. if (wrqu->frag.disabled || !wrqu->frag.fixed)
  7907. priv->ieee->fts = DEFAULT_FTS;
  7908. else {
  7909. if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
  7910. wrqu->frag.value > MAX_FRAG_THRESHOLD) {
  7911. mutex_unlock(&priv->mutex);
  7912. return -EINVAL;
  7913. }
  7914. priv->ieee->fts = wrqu->frag.value & ~0x1;
  7915. }
  7916. ipw_send_frag_threshold(priv, wrqu->frag.value);
  7917. mutex_unlock(&priv->mutex);
  7918. IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
  7919. return 0;
  7920. }
  7921. static int ipw_wx_get_frag(struct net_device *dev,
  7922. struct iw_request_info *info,
  7923. union iwreq_data *wrqu, char *extra)
  7924. {
  7925. struct ipw_priv *priv = libipw_priv(dev);
  7926. mutex_lock(&priv->mutex);
  7927. wrqu->frag.value = priv->ieee->fts;
  7928. wrqu->frag.fixed = 0; /* no auto select */
  7929. wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
  7930. mutex_unlock(&priv->mutex);
  7931. IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
  7932. return 0;
  7933. }
  7934. static int ipw_wx_set_retry(struct net_device *dev,
  7935. struct iw_request_info *info,
  7936. union iwreq_data *wrqu, char *extra)
  7937. {
  7938. struct ipw_priv *priv = libipw_priv(dev);
  7939. if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
  7940. return -EINVAL;
  7941. if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
  7942. return 0;
  7943. if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
  7944. return -EINVAL;
  7945. mutex_lock(&priv->mutex);
  7946. if (wrqu->retry.flags & IW_RETRY_SHORT)
  7947. priv->short_retry_limit = (u8) wrqu->retry.value;
  7948. else if (wrqu->retry.flags & IW_RETRY_LONG)
  7949. priv->long_retry_limit = (u8) wrqu->retry.value;
  7950. else {
  7951. priv->short_retry_limit = (u8) wrqu->retry.value;
  7952. priv->long_retry_limit = (u8) wrqu->retry.value;
  7953. }
  7954. ipw_send_retry_limit(priv, priv->short_retry_limit,
  7955. priv->long_retry_limit);
  7956. mutex_unlock(&priv->mutex);
  7957. IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
  7958. priv->short_retry_limit, priv->long_retry_limit);
  7959. return 0;
  7960. }
  7961. static int ipw_wx_get_retry(struct net_device *dev,
  7962. struct iw_request_info *info,
  7963. union iwreq_data *wrqu, char *extra)
  7964. {
  7965. struct ipw_priv *priv = libipw_priv(dev);
  7966. mutex_lock(&priv->mutex);
  7967. wrqu->retry.disabled = 0;
  7968. if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
  7969. mutex_unlock(&priv->mutex);
  7970. return -EINVAL;
  7971. }
  7972. if (wrqu->retry.flags & IW_RETRY_LONG) {
  7973. wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
  7974. wrqu->retry.value = priv->long_retry_limit;
  7975. } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
  7976. wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
  7977. wrqu->retry.value = priv->short_retry_limit;
  7978. } else {
  7979. wrqu->retry.flags = IW_RETRY_LIMIT;
  7980. wrqu->retry.value = priv->short_retry_limit;
  7981. }
  7982. mutex_unlock(&priv->mutex);
  7983. IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
  7984. return 0;
  7985. }
  7986. static int ipw_wx_set_scan(struct net_device *dev,
  7987. struct iw_request_info *info,
  7988. union iwreq_data *wrqu, char *extra)
  7989. {
  7990. struct ipw_priv *priv = libipw_priv(dev);
  7991. struct iw_scan_req *req = (struct iw_scan_req *)extra;
  7992. struct delayed_work *work = NULL;
  7993. mutex_lock(&priv->mutex);
  7994. priv->user_requested_scan = 1;
  7995. if (wrqu->data.length == sizeof(struct iw_scan_req)) {
  7996. if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
  7997. int len = min((int)req->essid_len,
  7998. (int)sizeof(priv->direct_scan_ssid));
  7999. memcpy(priv->direct_scan_ssid, req->essid, len);
  8000. priv->direct_scan_ssid_len = len;
  8001. work = &priv->request_direct_scan;
  8002. } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
  8003. work = &priv->request_passive_scan;
  8004. }
  8005. } else {
  8006. /* Normal active broadcast scan */
  8007. work = &priv->request_scan;
  8008. }
  8009. mutex_unlock(&priv->mutex);
  8010. IPW_DEBUG_WX("Start scan\n");
  8011. schedule_delayed_work(work, 0);
  8012. return 0;
  8013. }
  8014. static int ipw_wx_get_scan(struct net_device *dev,
  8015. struct iw_request_info *info,
  8016. union iwreq_data *wrqu, char *extra)
  8017. {
  8018. struct ipw_priv *priv = libipw_priv(dev);
  8019. return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
  8020. }
  8021. static int ipw_wx_set_encode(struct net_device *dev,
  8022. struct iw_request_info *info,
  8023. union iwreq_data *wrqu, char *key)
  8024. {
  8025. struct ipw_priv *priv = libipw_priv(dev);
  8026. int ret;
  8027. u32 cap = priv->capability;
  8028. mutex_lock(&priv->mutex);
  8029. ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
  8030. /* In IBSS mode, we need to notify the firmware to update
  8031. * the beacon info after we changed the capability. */
  8032. if (cap != priv->capability &&
  8033. priv->ieee->iw_mode == IW_MODE_ADHOC &&
  8034. priv->status & STATUS_ASSOCIATED)
  8035. ipw_disassociate(priv);
  8036. mutex_unlock(&priv->mutex);
  8037. return ret;
  8038. }
  8039. static int ipw_wx_get_encode(struct net_device *dev,
  8040. struct iw_request_info *info,
  8041. union iwreq_data *wrqu, char *key)
  8042. {
  8043. struct ipw_priv *priv = libipw_priv(dev);
  8044. return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
  8045. }
  8046. static int ipw_wx_set_power(struct net_device *dev,
  8047. struct iw_request_info *info,
  8048. union iwreq_data *wrqu, char *extra)
  8049. {
  8050. struct ipw_priv *priv = libipw_priv(dev);
  8051. int err;
  8052. mutex_lock(&priv->mutex);
  8053. if (wrqu->power.disabled) {
  8054. priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
  8055. err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
  8056. if (err) {
  8057. IPW_DEBUG_WX("failed setting power mode.\n");
  8058. mutex_unlock(&priv->mutex);
  8059. return err;
  8060. }
  8061. IPW_DEBUG_WX("SET Power Management Mode -> off\n");
  8062. mutex_unlock(&priv->mutex);
  8063. return 0;
  8064. }
  8065. switch (wrqu->power.flags & IW_POWER_MODE) {
  8066. case IW_POWER_ON: /* If not specified */
  8067. case IW_POWER_MODE: /* If set all mask */
  8068. case IW_POWER_ALL_R: /* If explicitly state all */
  8069. break;
  8070. default: /* Otherwise we don't support it */
  8071. IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
  8072. wrqu->power.flags);
  8073. mutex_unlock(&priv->mutex);
  8074. return -EOPNOTSUPP;
  8075. }
  8076. /* If the user hasn't specified a power management mode yet, default
  8077. * to BATTERY */
  8078. if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
  8079. priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
  8080. else
  8081. priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
  8082. err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
  8083. if (err) {
  8084. IPW_DEBUG_WX("failed setting power mode.\n");
  8085. mutex_unlock(&priv->mutex);
  8086. return err;
  8087. }
  8088. IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
  8089. mutex_unlock(&priv->mutex);
  8090. return 0;
  8091. }
  8092. static int ipw_wx_get_power(struct net_device *dev,
  8093. struct iw_request_info *info,
  8094. union iwreq_data *wrqu, char *extra)
  8095. {
  8096. struct ipw_priv *priv = libipw_priv(dev);
  8097. mutex_lock(&priv->mutex);
  8098. if (!(priv->power_mode & IPW_POWER_ENABLED))
  8099. wrqu->power.disabled = 1;
  8100. else
  8101. wrqu->power.disabled = 0;
  8102. mutex_unlock(&priv->mutex);
  8103. IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
  8104. return 0;
  8105. }
  8106. static int ipw_wx_set_powermode(struct net_device *dev,
  8107. struct iw_request_info *info,
  8108. union iwreq_data *wrqu, char *extra)
  8109. {
  8110. struct ipw_priv *priv = libipw_priv(dev);
  8111. int mode = *(int *)extra;
  8112. int err;
  8113. mutex_lock(&priv->mutex);
  8114. if ((mode < 1) || (mode > IPW_POWER_LIMIT))
  8115. mode = IPW_POWER_AC;
  8116. if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
  8117. err = ipw_send_power_mode(priv, mode);
  8118. if (err) {
  8119. IPW_DEBUG_WX("failed setting power mode.\n");
  8120. mutex_unlock(&priv->mutex);
  8121. return err;
  8122. }
  8123. priv->power_mode = IPW_POWER_ENABLED | mode;
  8124. }
  8125. mutex_unlock(&priv->mutex);
  8126. return 0;
  8127. }
  8128. #define MAX_WX_STRING 80
  8129. static int ipw_wx_get_powermode(struct net_device *dev,
  8130. struct iw_request_info *info,
  8131. union iwreq_data *wrqu, char *extra)
  8132. {
  8133. struct ipw_priv *priv = libipw_priv(dev);
  8134. int level = IPW_POWER_LEVEL(priv->power_mode);
  8135. char *p = extra;
  8136. p += scnprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
  8137. switch (level) {
  8138. case IPW_POWER_AC:
  8139. p += scnprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
  8140. break;
  8141. case IPW_POWER_BATTERY:
  8142. p += scnprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
  8143. break;
  8144. default:
  8145. p += scnprintf(p, MAX_WX_STRING - (p - extra),
  8146. "(Timeout %dms, Period %dms)",
  8147. timeout_duration[level - 1] / 1000,
  8148. period_duration[level - 1] / 1000);
  8149. }
  8150. if (!(priv->power_mode & IPW_POWER_ENABLED))
  8151. p += scnprintf(p, MAX_WX_STRING - (p - extra), " OFF");
  8152. wrqu->data.length = p - extra + 1;
  8153. return 0;
  8154. }
  8155. static int ipw_wx_set_wireless_mode(struct net_device *dev,
  8156. struct iw_request_info *info,
  8157. union iwreq_data *wrqu, char *extra)
  8158. {
  8159. struct ipw_priv *priv = libipw_priv(dev);
  8160. int mode = *(int *)extra;
  8161. u8 band = 0, modulation = 0;
  8162. if (mode == 0 || mode & ~IEEE_MODE_MASK) {
  8163. IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
  8164. return -EINVAL;
  8165. }
  8166. mutex_lock(&priv->mutex);
  8167. if (priv->adapter == IPW_2915ABG) {
  8168. priv->ieee->abg_true = 1;
  8169. if (mode & IEEE_A) {
  8170. band |= LIBIPW_52GHZ_BAND;
  8171. modulation |= LIBIPW_OFDM_MODULATION;
  8172. } else
  8173. priv->ieee->abg_true = 0;
  8174. } else {
  8175. if (mode & IEEE_A) {
  8176. IPW_WARNING("Attempt to set 2200BG into "
  8177. "802.11a mode\n");
  8178. mutex_unlock(&priv->mutex);
  8179. return -EINVAL;
  8180. }
  8181. priv->ieee->abg_true = 0;
  8182. }
  8183. if (mode & IEEE_B) {
  8184. band |= LIBIPW_24GHZ_BAND;
  8185. modulation |= LIBIPW_CCK_MODULATION;
  8186. } else
  8187. priv->ieee->abg_true = 0;
  8188. if (mode & IEEE_G) {
  8189. band |= LIBIPW_24GHZ_BAND;
  8190. modulation |= LIBIPW_OFDM_MODULATION;
  8191. } else
  8192. priv->ieee->abg_true = 0;
  8193. priv->ieee->mode = mode;
  8194. priv->ieee->freq_band = band;
  8195. priv->ieee->modulation = modulation;
  8196. init_supported_rates(priv, &priv->rates);
  8197. /* Network configuration changed -- force [re]association */
  8198. IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
  8199. if (!ipw_disassociate(priv)) {
  8200. ipw_send_supported_rates(priv, &priv->rates);
  8201. ipw_associate(priv);
  8202. }
  8203. /* Update the band LEDs */
  8204. ipw_led_band_on(priv);
  8205. IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
  8206. mode & IEEE_A ? 'a' : '.',
  8207. mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
  8208. mutex_unlock(&priv->mutex);
  8209. return 0;
  8210. }
  8211. static int ipw_wx_get_wireless_mode(struct net_device *dev,
  8212. struct iw_request_info *info,
  8213. union iwreq_data *wrqu, char *extra)
  8214. {
  8215. struct ipw_priv *priv = libipw_priv(dev);
  8216. mutex_lock(&priv->mutex);
  8217. switch (priv->ieee->mode) {
  8218. case IEEE_A:
  8219. strncpy(extra, "802.11a (1)", MAX_WX_STRING);
  8220. break;
  8221. case IEEE_B:
  8222. strncpy(extra, "802.11b (2)", MAX_WX_STRING);
  8223. break;
  8224. case IEEE_A | IEEE_B:
  8225. strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
  8226. break;
  8227. case IEEE_G:
  8228. strncpy(extra, "802.11g (4)", MAX_WX_STRING);
  8229. break;
  8230. case IEEE_A | IEEE_G:
  8231. strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
  8232. break;
  8233. case IEEE_B | IEEE_G:
  8234. strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
  8235. break;
  8236. case IEEE_A | IEEE_B | IEEE_G:
  8237. strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
  8238. break;
  8239. default:
  8240. strncpy(extra, "unknown", MAX_WX_STRING);
  8241. break;
  8242. }
  8243. extra[MAX_WX_STRING - 1] = '\0';
  8244. IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
  8245. wrqu->data.length = strlen(extra) + 1;
  8246. mutex_unlock(&priv->mutex);
  8247. return 0;
  8248. }
  8249. static int ipw_wx_set_preamble(struct net_device *dev,
  8250. struct iw_request_info *info,
  8251. union iwreq_data *wrqu, char *extra)
  8252. {
  8253. struct ipw_priv *priv = libipw_priv(dev);
  8254. int mode = *(int *)extra;
  8255. mutex_lock(&priv->mutex);
  8256. /* Switching from SHORT -> LONG requires a disassociation */
  8257. if (mode == 1) {
  8258. if (!(priv->config & CFG_PREAMBLE_LONG)) {
  8259. priv->config |= CFG_PREAMBLE_LONG;
  8260. /* Network configuration changed -- force [re]association */
  8261. IPW_DEBUG_ASSOC
  8262. ("[re]association triggered due to preamble change.\n");
  8263. if (!ipw_disassociate(priv))
  8264. ipw_associate(priv);
  8265. }
  8266. goto done;
  8267. }
  8268. if (mode == 0) {
  8269. priv->config &= ~CFG_PREAMBLE_LONG;
  8270. goto done;
  8271. }
  8272. mutex_unlock(&priv->mutex);
  8273. return -EINVAL;
  8274. done:
  8275. mutex_unlock(&priv->mutex);
  8276. return 0;
  8277. }
  8278. static int ipw_wx_get_preamble(struct net_device *dev,
  8279. struct iw_request_info *info,
  8280. union iwreq_data *wrqu, char *extra)
  8281. {
  8282. struct ipw_priv *priv = libipw_priv(dev);
  8283. mutex_lock(&priv->mutex);
  8284. if (priv->config & CFG_PREAMBLE_LONG)
  8285. snprintf(wrqu->name, IFNAMSIZ, "long (1)");
  8286. else
  8287. snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
  8288. mutex_unlock(&priv->mutex);
  8289. return 0;
  8290. }
  8291. #ifdef CONFIG_IPW2200_MONITOR
  8292. static int ipw_wx_set_monitor(struct net_device *dev,
  8293. struct iw_request_info *info,
  8294. union iwreq_data *wrqu, char *extra)
  8295. {
  8296. struct ipw_priv *priv = libipw_priv(dev);
  8297. int *parms = (int *)extra;
  8298. int enable = (parms[0] > 0);
  8299. mutex_lock(&priv->mutex);
  8300. IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
  8301. if (enable) {
  8302. if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
  8303. #ifdef CONFIG_IPW2200_RADIOTAP
  8304. priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
  8305. #else
  8306. priv->net_dev->type = ARPHRD_IEEE80211;
  8307. #endif
  8308. schedule_work(&priv->adapter_restart);
  8309. }
  8310. ipw_set_channel(priv, parms[1]);
  8311. } else {
  8312. if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
  8313. mutex_unlock(&priv->mutex);
  8314. return 0;
  8315. }
  8316. priv->net_dev->type = ARPHRD_ETHER;
  8317. schedule_work(&priv->adapter_restart);
  8318. }
  8319. mutex_unlock(&priv->mutex);
  8320. return 0;
  8321. }
  8322. #endif /* CONFIG_IPW2200_MONITOR */
  8323. static int ipw_wx_reset(struct net_device *dev,
  8324. struct iw_request_info *info,
  8325. union iwreq_data *wrqu, char *extra)
  8326. {
  8327. struct ipw_priv *priv = libipw_priv(dev);
  8328. IPW_DEBUG_WX("RESET\n");
  8329. schedule_work(&priv->adapter_restart);
  8330. return 0;
  8331. }
  8332. static int ipw_wx_sw_reset(struct net_device *dev,
  8333. struct iw_request_info *info,
  8334. union iwreq_data *wrqu, char *extra)
  8335. {
  8336. struct ipw_priv *priv = libipw_priv(dev);
  8337. union iwreq_data wrqu_sec = {
  8338. .encoding = {
  8339. .flags = IW_ENCODE_DISABLED,
  8340. },
  8341. };
  8342. int ret;
  8343. IPW_DEBUG_WX("SW_RESET\n");
  8344. mutex_lock(&priv->mutex);
  8345. ret = ipw_sw_reset(priv, 2);
  8346. if (!ret) {
  8347. free_firmware();
  8348. ipw_adapter_restart(priv);
  8349. }
  8350. /* The SW reset bit might have been toggled on by the 'disable'
  8351. * module parameter, so take appropriate action */
  8352. ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
  8353. mutex_unlock(&priv->mutex);
  8354. libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
  8355. mutex_lock(&priv->mutex);
  8356. if (!(priv->status & STATUS_RF_KILL_MASK)) {
  8357. /* Configuration likely changed -- force [re]association */
  8358. IPW_DEBUG_ASSOC("[re]association triggered due to sw "
  8359. "reset.\n");
  8360. if (!ipw_disassociate(priv))
  8361. ipw_associate(priv);
  8362. }
  8363. mutex_unlock(&priv->mutex);
  8364. return 0;
  8365. }
  8366. /* Rebase the WE IOCTLs to zero for the handler array */
  8367. static iw_handler ipw_wx_handlers[] = {
  8368. IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
  8369. IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
  8370. IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
  8371. IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
  8372. IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
  8373. IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
  8374. IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
  8375. IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
  8376. IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
  8377. IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
  8378. IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
  8379. IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
  8380. IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
  8381. IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
  8382. IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
  8383. IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
  8384. IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
  8385. IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
  8386. IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
  8387. IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
  8388. IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
  8389. IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
  8390. IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
  8391. IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
  8392. IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
  8393. IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
  8394. IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
  8395. IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
  8396. IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
  8397. IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
  8398. IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
  8399. IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
  8400. IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
  8401. IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
  8402. IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
  8403. IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
  8404. IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
  8405. IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
  8406. IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
  8407. IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
  8408. IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
  8409. };
  8410. enum {
  8411. IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
  8412. IPW_PRIV_GET_POWER,
  8413. IPW_PRIV_SET_MODE,
  8414. IPW_PRIV_GET_MODE,
  8415. IPW_PRIV_SET_PREAMBLE,
  8416. IPW_PRIV_GET_PREAMBLE,
  8417. IPW_PRIV_RESET,
  8418. IPW_PRIV_SW_RESET,
  8419. #ifdef CONFIG_IPW2200_MONITOR
  8420. IPW_PRIV_SET_MONITOR,
  8421. #endif
  8422. };
  8423. static struct iw_priv_args ipw_priv_args[] = {
  8424. {
  8425. .cmd = IPW_PRIV_SET_POWER,
  8426. .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
  8427. .name = "set_power"},
  8428. {
  8429. .cmd = IPW_PRIV_GET_POWER,
  8430. .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
  8431. .name = "get_power"},
  8432. {
  8433. .cmd = IPW_PRIV_SET_MODE,
  8434. .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
  8435. .name = "set_mode"},
  8436. {
  8437. .cmd = IPW_PRIV_GET_MODE,
  8438. .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
  8439. .name = "get_mode"},
  8440. {
  8441. .cmd = IPW_PRIV_SET_PREAMBLE,
  8442. .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
  8443. .name = "set_preamble"},
  8444. {
  8445. .cmd = IPW_PRIV_GET_PREAMBLE,
  8446. .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
  8447. .name = "get_preamble"},
  8448. {
  8449. IPW_PRIV_RESET,
  8450. IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
  8451. {
  8452. IPW_PRIV_SW_RESET,
  8453. IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
  8454. #ifdef CONFIG_IPW2200_MONITOR
  8455. {
  8456. IPW_PRIV_SET_MONITOR,
  8457. IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
  8458. #endif /* CONFIG_IPW2200_MONITOR */
  8459. };
  8460. static iw_handler ipw_priv_handler[] = {
  8461. ipw_wx_set_powermode,
  8462. ipw_wx_get_powermode,
  8463. ipw_wx_set_wireless_mode,
  8464. ipw_wx_get_wireless_mode,
  8465. ipw_wx_set_preamble,
  8466. ipw_wx_get_preamble,
  8467. ipw_wx_reset,
  8468. ipw_wx_sw_reset,
  8469. #ifdef CONFIG_IPW2200_MONITOR
  8470. ipw_wx_set_monitor,
  8471. #endif
  8472. };
  8473. static const struct iw_handler_def ipw_wx_handler_def = {
  8474. .standard = ipw_wx_handlers,
  8475. .num_standard = ARRAY_SIZE(ipw_wx_handlers),
  8476. .num_private = ARRAY_SIZE(ipw_priv_handler),
  8477. .num_private_args = ARRAY_SIZE(ipw_priv_args),
  8478. .private = ipw_priv_handler,
  8479. .private_args = ipw_priv_args,
  8480. .get_wireless_stats = ipw_get_wireless_stats,
  8481. };
  8482. /*
  8483. * Get wireless statistics.
  8484. * Called by /proc/net/wireless
  8485. * Also called by SIOCGIWSTATS
  8486. */
  8487. static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
  8488. {
  8489. struct ipw_priv *priv = libipw_priv(dev);
  8490. struct iw_statistics *wstats;
  8491. wstats = &priv->wstats;
  8492. /* if hw is disabled, then ipw_get_ordinal() can't be called.
  8493. * netdev->get_wireless_stats seems to be called before fw is
  8494. * initialized. STATUS_ASSOCIATED will only be set if the hw is up
  8495. * and associated; if not associcated, the values are all meaningless
  8496. * anyway, so set them all to NULL and INVALID */
  8497. if (!(priv->status & STATUS_ASSOCIATED)) {
  8498. wstats->miss.beacon = 0;
  8499. wstats->discard.retries = 0;
  8500. wstats->qual.qual = 0;
  8501. wstats->qual.level = 0;
  8502. wstats->qual.noise = 0;
  8503. wstats->qual.updated = 7;
  8504. wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
  8505. IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
  8506. return wstats;
  8507. }
  8508. wstats->qual.qual = priv->quality;
  8509. wstats->qual.level = priv->exp_avg_rssi;
  8510. wstats->qual.noise = priv->exp_avg_noise;
  8511. wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
  8512. IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
  8513. wstats->miss.beacon = average_value(&priv->average_missed_beacons);
  8514. wstats->discard.retries = priv->last_tx_failures;
  8515. wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
  8516. /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
  8517. goto fail_get_ordinal;
  8518. wstats->discard.retries += tx_retry; */
  8519. return wstats;
  8520. }
  8521. /* net device stuff */
  8522. static void init_sys_config(struct ipw_sys_config *sys_config)
  8523. {
  8524. memset(sys_config, 0, sizeof(struct ipw_sys_config));
  8525. sys_config->bt_coexistence = 0;
  8526. sys_config->answer_broadcast_ssid_probe = 0;
  8527. sys_config->accept_all_data_frames = 0;
  8528. sys_config->accept_non_directed_frames = 1;
  8529. sys_config->exclude_unicast_unencrypted = 0;
  8530. sys_config->disable_unicast_decryption = 1;
  8531. sys_config->exclude_multicast_unencrypted = 0;
  8532. sys_config->disable_multicast_decryption = 1;
  8533. if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
  8534. antenna = CFG_SYS_ANTENNA_BOTH;
  8535. sys_config->antenna_diversity = antenna;
  8536. sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
  8537. sys_config->dot11g_auto_detection = 0;
  8538. sys_config->enable_cts_to_self = 0;
  8539. sys_config->bt_coexist_collision_thr = 0;
  8540. sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
  8541. sys_config->silence_threshold = 0x1e;
  8542. }
  8543. static int ipw_net_open(struct net_device *dev)
  8544. {
  8545. IPW_DEBUG_INFO("dev->open\n");
  8546. netif_start_queue(dev);
  8547. return 0;
  8548. }
  8549. static int ipw_net_stop(struct net_device *dev)
  8550. {
  8551. IPW_DEBUG_INFO("dev->close\n");
  8552. netif_stop_queue(dev);
  8553. return 0;
  8554. }
  8555. /*
  8556. todo:
  8557. modify to send one tfd per fragment instead of using chunking. otherwise
  8558. we need to heavily modify the libipw_skb_to_txb.
  8559. */
  8560. static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
  8561. int pri)
  8562. {
  8563. struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
  8564. txb->fragments[0]->data;
  8565. int i = 0;
  8566. struct tfd_frame *tfd;
  8567. #ifdef CONFIG_IPW2200_QOS
  8568. int tx_id = ipw_get_tx_queue_number(priv, pri);
  8569. struct clx2_tx_queue *txq = &priv->txq[tx_id];
  8570. #else
  8571. struct clx2_tx_queue *txq = &priv->txq[0];
  8572. #endif
  8573. struct clx2_queue *q = &txq->q;
  8574. u8 id, hdr_len, unicast;
  8575. int fc;
  8576. if (!(priv->status & STATUS_ASSOCIATED))
  8577. goto drop;
  8578. hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
  8579. switch (priv->ieee->iw_mode) {
  8580. case IW_MODE_ADHOC:
  8581. unicast = !is_multicast_ether_addr(hdr->addr1);
  8582. id = ipw_find_station(priv, hdr->addr1);
  8583. if (id == IPW_INVALID_STATION) {
  8584. id = ipw_add_station(priv, hdr->addr1);
  8585. if (id == IPW_INVALID_STATION) {
  8586. IPW_WARNING("Attempt to send data to "
  8587. "invalid cell: %pM\n",
  8588. hdr->addr1);
  8589. goto drop;
  8590. }
  8591. }
  8592. break;
  8593. case IW_MODE_INFRA:
  8594. default:
  8595. unicast = !is_multicast_ether_addr(hdr->addr3);
  8596. id = 0;
  8597. break;
  8598. }
  8599. tfd = &txq->bd[q->first_empty];
  8600. txq->txb[q->first_empty] = txb;
  8601. memset(tfd, 0, sizeof(*tfd));
  8602. tfd->u.data.station_number = id;
  8603. tfd->control_flags.message_type = TX_FRAME_TYPE;
  8604. tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
  8605. tfd->u.data.cmd_id = DINO_CMD_TX;
  8606. tfd->u.data.len = cpu_to_le16(txb->payload_size);
  8607. if (priv->assoc_request.ieee_mode == IPW_B_MODE)
  8608. tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
  8609. else
  8610. tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
  8611. if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
  8612. tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
  8613. fc = le16_to_cpu(hdr->frame_ctl);
  8614. hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
  8615. memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
  8616. if (likely(unicast))
  8617. tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
  8618. if (txb->encrypted && !priv->ieee->host_encrypt) {
  8619. switch (priv->ieee->sec.level) {
  8620. case SEC_LEVEL_3:
  8621. tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
  8622. cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  8623. /* XXX: ACK flag must be set for CCMP even if it
  8624. * is a multicast/broadcast packet, because CCMP
  8625. * group communication encrypted by GTK is
  8626. * actually done by the AP. */
  8627. if (!unicast)
  8628. tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
  8629. tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
  8630. tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
  8631. tfd->u.data.key_index = 0;
  8632. tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
  8633. break;
  8634. case SEC_LEVEL_2:
  8635. tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
  8636. cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  8637. tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
  8638. tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
  8639. tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
  8640. break;
  8641. case SEC_LEVEL_1:
  8642. tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
  8643. cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  8644. tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
  8645. if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
  8646. 40)
  8647. tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
  8648. else
  8649. tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
  8650. break;
  8651. case SEC_LEVEL_0:
  8652. break;
  8653. default:
  8654. printk(KERN_ERR "Unknown security level %d\n",
  8655. priv->ieee->sec.level);
  8656. break;
  8657. }
  8658. } else
  8659. /* No hardware encryption */
  8660. tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
  8661. #ifdef CONFIG_IPW2200_QOS
  8662. if (fc & IEEE80211_STYPE_QOS_DATA)
  8663. ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
  8664. #endif /* CONFIG_IPW2200_QOS */
  8665. /* payload */
  8666. tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
  8667. txb->nr_frags));
  8668. IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
  8669. txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
  8670. for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
  8671. IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
  8672. i, le32_to_cpu(tfd->u.data.num_chunks),
  8673. txb->fragments[i]->len - hdr_len);
  8674. IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
  8675. i, tfd->u.data.num_chunks,
  8676. txb->fragments[i]->len - hdr_len);
  8677. printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
  8678. txb->fragments[i]->len - hdr_len);
  8679. tfd->u.data.chunk_ptr[i] =
  8680. cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
  8681. txb->fragments[i]->data + hdr_len,
  8682. txb->fragments[i]->len - hdr_len,
  8683. DMA_TO_DEVICE));
  8684. tfd->u.data.chunk_len[i] =
  8685. cpu_to_le16(txb->fragments[i]->len - hdr_len);
  8686. }
  8687. if (i != txb->nr_frags) {
  8688. struct sk_buff *skb;
  8689. u16 remaining_bytes = 0;
  8690. int j;
  8691. for (j = i; j < txb->nr_frags; j++)
  8692. remaining_bytes += txb->fragments[j]->len - hdr_len;
  8693. printk(KERN_INFO "Trying to reallocate for %d bytes\n",
  8694. remaining_bytes);
  8695. skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
  8696. if (skb != NULL) {
  8697. tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
  8698. for (j = i; j < txb->nr_frags; j++) {
  8699. int size = txb->fragments[j]->len - hdr_len;
  8700. printk(KERN_INFO "Adding frag %d %d...\n",
  8701. j, size);
  8702. skb_put_data(skb,
  8703. txb->fragments[j]->data + hdr_len,
  8704. size);
  8705. }
  8706. dev_kfree_skb_any(txb->fragments[i]);
  8707. txb->fragments[i] = skb;
  8708. tfd->u.data.chunk_ptr[i] =
  8709. cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
  8710. skb->data,
  8711. remaining_bytes,
  8712. DMA_TO_DEVICE));
  8713. le32_add_cpu(&tfd->u.data.num_chunks, 1);
  8714. }
  8715. }
  8716. /* kick DMA */
  8717. q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
  8718. ipw_write32(priv, q->reg_w, q->first_empty);
  8719. if (ipw_tx_queue_space(q) < q->high_mark)
  8720. netif_stop_queue(priv->net_dev);
  8721. return NETDEV_TX_OK;
  8722. drop:
  8723. IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
  8724. libipw_txb_free(txb);
  8725. return NETDEV_TX_OK;
  8726. }
  8727. static int ipw_net_is_queue_full(struct net_device *dev, int pri)
  8728. {
  8729. struct ipw_priv *priv = libipw_priv(dev);
  8730. #ifdef CONFIG_IPW2200_QOS
  8731. int tx_id = ipw_get_tx_queue_number(priv, pri);
  8732. struct clx2_tx_queue *txq = &priv->txq[tx_id];
  8733. #else
  8734. struct clx2_tx_queue *txq = &priv->txq[0];
  8735. #endif /* CONFIG_IPW2200_QOS */
  8736. if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
  8737. return 1;
  8738. return 0;
  8739. }
  8740. #ifdef CONFIG_IPW2200_PROMISCUOUS
  8741. static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
  8742. struct libipw_txb *txb)
  8743. {
  8744. struct libipw_rx_stats dummystats;
  8745. struct ieee80211_hdr *hdr;
  8746. u8 n;
  8747. u16 filter = priv->prom_priv->filter;
  8748. int hdr_only = 0;
  8749. if (filter & IPW_PROM_NO_TX)
  8750. return;
  8751. memset(&dummystats, 0, sizeof(dummystats));
  8752. /* Filtering of fragment chains is done against the first fragment */
  8753. hdr = (void *)txb->fragments[0]->data;
  8754. if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
  8755. if (filter & IPW_PROM_NO_MGMT)
  8756. return;
  8757. if (filter & IPW_PROM_MGMT_HEADER_ONLY)
  8758. hdr_only = 1;
  8759. } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
  8760. if (filter & IPW_PROM_NO_CTL)
  8761. return;
  8762. if (filter & IPW_PROM_CTL_HEADER_ONLY)
  8763. hdr_only = 1;
  8764. } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
  8765. if (filter & IPW_PROM_NO_DATA)
  8766. return;
  8767. if (filter & IPW_PROM_DATA_HEADER_ONLY)
  8768. hdr_only = 1;
  8769. }
  8770. for(n=0; n<txb->nr_frags; ++n) {
  8771. struct sk_buff *src = txb->fragments[n];
  8772. struct sk_buff *dst;
  8773. struct ieee80211_radiotap_header *rt_hdr;
  8774. int len;
  8775. if (hdr_only) {
  8776. hdr = (void *)src->data;
  8777. len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
  8778. } else
  8779. len = src->len;
  8780. dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
  8781. if (!dst)
  8782. continue;
  8783. rt_hdr = skb_put(dst, sizeof(*rt_hdr));
  8784. rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
  8785. rt_hdr->it_pad = 0;
  8786. rt_hdr->it_present = 0; /* after all, it's just an idea */
  8787. rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
  8788. *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
  8789. ieee80211chan2mhz(priv->channel));
  8790. if (priv->channel > 14) /* 802.11a */
  8791. *(__le16*)skb_put(dst, sizeof(u16)) =
  8792. cpu_to_le16(IEEE80211_CHAN_OFDM |
  8793. IEEE80211_CHAN_5GHZ);
  8794. else if (priv->ieee->mode == IEEE_B) /* 802.11b */
  8795. *(__le16*)skb_put(dst, sizeof(u16)) =
  8796. cpu_to_le16(IEEE80211_CHAN_CCK |
  8797. IEEE80211_CHAN_2GHZ);
  8798. else /* 802.11g */
  8799. *(__le16*)skb_put(dst, sizeof(u16)) =
  8800. cpu_to_le16(IEEE80211_CHAN_OFDM |
  8801. IEEE80211_CHAN_2GHZ);
  8802. rt_hdr->it_len = cpu_to_le16(dst->len);
  8803. skb_copy_from_linear_data(src, skb_put(dst, len), len);
  8804. if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
  8805. dev_kfree_skb_any(dst);
  8806. }
  8807. }
  8808. #endif
  8809. static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
  8810. struct net_device *dev, int pri)
  8811. {
  8812. struct ipw_priv *priv = libipw_priv(dev);
  8813. unsigned long flags;
  8814. netdev_tx_t ret;
  8815. IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
  8816. spin_lock_irqsave(&priv->lock, flags);
  8817. #ifdef CONFIG_IPW2200_PROMISCUOUS
  8818. if (rtap_iface && netif_running(priv->prom_net_dev))
  8819. ipw_handle_promiscuous_tx(priv, txb);
  8820. #endif
  8821. ret = ipw_tx_skb(priv, txb, pri);
  8822. if (ret == NETDEV_TX_OK)
  8823. __ipw_led_activity_on(priv);
  8824. spin_unlock_irqrestore(&priv->lock, flags);
  8825. return ret;
  8826. }
  8827. static void ipw_net_set_multicast_list(struct net_device *dev)
  8828. {
  8829. }
  8830. static int ipw_net_set_mac_address(struct net_device *dev, void *p)
  8831. {
  8832. struct ipw_priv *priv = libipw_priv(dev);
  8833. struct sockaddr *addr = p;
  8834. if (!is_valid_ether_addr(addr->sa_data))
  8835. return -EADDRNOTAVAIL;
  8836. mutex_lock(&priv->mutex);
  8837. priv->config |= CFG_CUSTOM_MAC;
  8838. memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
  8839. printk(KERN_INFO "%s: Setting MAC to %pM\n",
  8840. priv->net_dev->name, priv->mac_addr);
  8841. schedule_work(&priv->adapter_restart);
  8842. mutex_unlock(&priv->mutex);
  8843. return 0;
  8844. }
  8845. static void ipw_ethtool_get_drvinfo(struct net_device *dev,
  8846. struct ethtool_drvinfo *info)
  8847. {
  8848. struct ipw_priv *p = libipw_priv(dev);
  8849. char vers[64];
  8850. char date[32];
  8851. u32 len;
  8852. strscpy(info->driver, DRV_NAME, sizeof(info->driver));
  8853. strscpy(info->version, DRV_VERSION, sizeof(info->version));
  8854. len = sizeof(vers);
  8855. ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
  8856. len = sizeof(date);
  8857. ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
  8858. snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
  8859. vers, date);
  8860. strscpy(info->bus_info, pci_name(p->pci_dev),
  8861. sizeof(info->bus_info));
  8862. }
  8863. static u32 ipw_ethtool_get_link(struct net_device *dev)
  8864. {
  8865. struct ipw_priv *priv = libipw_priv(dev);
  8866. return (priv->status & STATUS_ASSOCIATED) != 0;
  8867. }
  8868. static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
  8869. {
  8870. return IPW_EEPROM_IMAGE_SIZE;
  8871. }
  8872. static int ipw_ethtool_get_eeprom(struct net_device *dev,
  8873. struct ethtool_eeprom *eeprom, u8 * bytes)
  8874. {
  8875. struct ipw_priv *p = libipw_priv(dev);
  8876. if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
  8877. return -EINVAL;
  8878. mutex_lock(&p->mutex);
  8879. memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
  8880. mutex_unlock(&p->mutex);
  8881. return 0;
  8882. }
  8883. static int ipw_ethtool_set_eeprom(struct net_device *dev,
  8884. struct ethtool_eeprom *eeprom, u8 * bytes)
  8885. {
  8886. struct ipw_priv *p = libipw_priv(dev);
  8887. int i;
  8888. if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
  8889. return -EINVAL;
  8890. mutex_lock(&p->mutex);
  8891. memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
  8892. for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
  8893. ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
  8894. mutex_unlock(&p->mutex);
  8895. return 0;
  8896. }
  8897. static const struct ethtool_ops ipw_ethtool_ops = {
  8898. .get_link = ipw_ethtool_get_link,
  8899. .get_drvinfo = ipw_ethtool_get_drvinfo,
  8900. .get_eeprom_len = ipw_ethtool_get_eeprom_len,
  8901. .get_eeprom = ipw_ethtool_get_eeprom,
  8902. .set_eeprom = ipw_ethtool_set_eeprom,
  8903. };
  8904. static irqreturn_t ipw_isr(int irq, void *data)
  8905. {
  8906. struct ipw_priv *priv = data;
  8907. u32 inta, inta_mask;
  8908. if (!priv)
  8909. return IRQ_NONE;
  8910. spin_lock(&priv->irq_lock);
  8911. if (!(priv->status & STATUS_INT_ENABLED)) {
  8912. /* IRQ is disabled */
  8913. goto none;
  8914. }
  8915. inta = ipw_read32(priv, IPW_INTA_RW);
  8916. inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
  8917. if (inta == 0xFFFFFFFF) {
  8918. /* Hardware disappeared */
  8919. IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
  8920. goto none;
  8921. }
  8922. if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
  8923. /* Shared interrupt */
  8924. goto none;
  8925. }
  8926. /* tell the device to stop sending interrupts */
  8927. __ipw_disable_interrupts(priv);
  8928. /* ack current interrupts */
  8929. inta &= (IPW_INTA_MASK_ALL & inta_mask);
  8930. ipw_write32(priv, IPW_INTA_RW, inta);
  8931. /* Cache INTA value for our tasklet */
  8932. priv->isr_inta = inta;
  8933. tasklet_schedule(&priv->irq_tasklet);
  8934. spin_unlock(&priv->irq_lock);
  8935. return IRQ_HANDLED;
  8936. none:
  8937. spin_unlock(&priv->irq_lock);
  8938. return IRQ_NONE;
  8939. }
  8940. static void ipw_rf_kill(void *adapter)
  8941. {
  8942. struct ipw_priv *priv = adapter;
  8943. unsigned long flags;
  8944. spin_lock_irqsave(&priv->lock, flags);
  8945. if (rf_kill_active(priv)) {
  8946. IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
  8947. schedule_delayed_work(&priv->rf_kill, 2 * HZ);
  8948. goto exit_unlock;
  8949. }
  8950. /* RF Kill is now disabled, so bring the device back up */
  8951. if (!(priv->status & STATUS_RF_KILL_MASK)) {
  8952. IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
  8953. "device\n");
  8954. /* we can not do an adapter restart while inside an irq lock */
  8955. schedule_work(&priv->adapter_restart);
  8956. } else
  8957. IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
  8958. "enabled\n");
  8959. exit_unlock:
  8960. spin_unlock_irqrestore(&priv->lock, flags);
  8961. }
  8962. static void ipw_bg_rf_kill(struct work_struct *work)
  8963. {
  8964. struct ipw_priv *priv =
  8965. container_of(work, struct ipw_priv, rf_kill.work);
  8966. mutex_lock(&priv->mutex);
  8967. ipw_rf_kill(priv);
  8968. mutex_unlock(&priv->mutex);
  8969. }
  8970. static void ipw_link_up(struct ipw_priv *priv)
  8971. {
  8972. priv->last_seq_num = -1;
  8973. priv->last_frag_num = -1;
  8974. priv->last_packet_time = 0;
  8975. netif_carrier_on(priv->net_dev);
  8976. cancel_delayed_work(&priv->request_scan);
  8977. cancel_delayed_work(&priv->request_direct_scan);
  8978. cancel_delayed_work(&priv->request_passive_scan);
  8979. cancel_delayed_work(&priv->scan_event);
  8980. ipw_reset_stats(priv);
  8981. /* Ensure the rate is updated immediately */
  8982. priv->last_rate = ipw_get_current_rate(priv);
  8983. ipw_gather_stats(priv);
  8984. ipw_led_link_up(priv);
  8985. notify_wx_assoc_event(priv);
  8986. if (priv->config & CFG_BACKGROUND_SCAN)
  8987. schedule_delayed_work(&priv->request_scan, HZ);
  8988. }
  8989. static void ipw_bg_link_up(struct work_struct *work)
  8990. {
  8991. struct ipw_priv *priv =
  8992. container_of(work, struct ipw_priv, link_up);
  8993. mutex_lock(&priv->mutex);
  8994. ipw_link_up(priv);
  8995. mutex_unlock(&priv->mutex);
  8996. }
  8997. static void ipw_link_down(struct ipw_priv *priv)
  8998. {
  8999. ipw_led_link_down(priv);
  9000. netif_carrier_off(priv->net_dev);
  9001. notify_wx_assoc_event(priv);
  9002. /* Cancel any queued work ... */
  9003. cancel_delayed_work(&priv->request_scan);
  9004. cancel_delayed_work(&priv->request_direct_scan);
  9005. cancel_delayed_work(&priv->request_passive_scan);
  9006. cancel_delayed_work(&priv->adhoc_check);
  9007. cancel_delayed_work(&priv->gather_stats);
  9008. ipw_reset_stats(priv);
  9009. if (!(priv->status & STATUS_EXIT_PENDING)) {
  9010. /* Queue up another scan... */
  9011. schedule_delayed_work(&priv->request_scan, 0);
  9012. } else
  9013. cancel_delayed_work(&priv->scan_event);
  9014. }
  9015. static void ipw_bg_link_down(struct work_struct *work)
  9016. {
  9017. struct ipw_priv *priv =
  9018. container_of(work, struct ipw_priv, link_down);
  9019. mutex_lock(&priv->mutex);
  9020. ipw_link_down(priv);
  9021. mutex_unlock(&priv->mutex);
  9022. }
  9023. static void ipw_setup_deferred_work(struct ipw_priv *priv)
  9024. {
  9025. init_waitqueue_head(&priv->wait_command_queue);
  9026. init_waitqueue_head(&priv->wait_state);
  9027. INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
  9028. INIT_WORK(&priv->associate, ipw_bg_associate);
  9029. INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
  9030. INIT_WORK(&priv->system_config, ipw_system_config);
  9031. INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
  9032. INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
  9033. INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
  9034. INIT_WORK(&priv->up, ipw_bg_up);
  9035. INIT_WORK(&priv->down, ipw_bg_down);
  9036. INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
  9037. INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
  9038. INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
  9039. INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
  9040. INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
  9041. INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
  9042. INIT_WORK(&priv->roam, ipw_bg_roam);
  9043. INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
  9044. INIT_WORK(&priv->link_up, ipw_bg_link_up);
  9045. INIT_WORK(&priv->link_down, ipw_bg_link_down);
  9046. INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
  9047. INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
  9048. INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
  9049. INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
  9050. #ifdef CONFIG_IPW2200_QOS
  9051. INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
  9052. #endif /* CONFIG_IPW2200_QOS */
  9053. tasklet_setup(&priv->irq_tasklet, ipw_irq_tasklet);
  9054. }
  9055. static void shim__set_security(struct net_device *dev,
  9056. struct libipw_security *sec)
  9057. {
  9058. struct ipw_priv *priv = libipw_priv(dev);
  9059. int i;
  9060. for (i = 0; i < 4; i++) {
  9061. if (sec->flags & (1 << i)) {
  9062. priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
  9063. priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
  9064. if (sec->key_sizes[i] == 0)
  9065. priv->ieee->sec.flags &= ~(1 << i);
  9066. else {
  9067. memcpy(priv->ieee->sec.keys[i], sec->keys[i],
  9068. sec->key_sizes[i]);
  9069. priv->ieee->sec.flags |= (1 << i);
  9070. }
  9071. priv->status |= STATUS_SECURITY_UPDATED;
  9072. } else if (sec->level != SEC_LEVEL_1)
  9073. priv->ieee->sec.flags &= ~(1 << i);
  9074. }
  9075. if (sec->flags & SEC_ACTIVE_KEY) {
  9076. priv->ieee->sec.active_key = sec->active_key;
  9077. priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
  9078. priv->status |= STATUS_SECURITY_UPDATED;
  9079. } else
  9080. priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
  9081. if ((sec->flags & SEC_AUTH_MODE) &&
  9082. (priv->ieee->sec.auth_mode != sec->auth_mode)) {
  9083. priv->ieee->sec.auth_mode = sec->auth_mode;
  9084. priv->ieee->sec.flags |= SEC_AUTH_MODE;
  9085. if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
  9086. priv->capability |= CAP_SHARED_KEY;
  9087. else
  9088. priv->capability &= ~CAP_SHARED_KEY;
  9089. priv->status |= STATUS_SECURITY_UPDATED;
  9090. }
  9091. if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
  9092. priv->ieee->sec.flags |= SEC_ENABLED;
  9093. priv->ieee->sec.enabled = sec->enabled;
  9094. priv->status |= STATUS_SECURITY_UPDATED;
  9095. if (sec->enabled)
  9096. priv->capability |= CAP_PRIVACY_ON;
  9097. else
  9098. priv->capability &= ~CAP_PRIVACY_ON;
  9099. }
  9100. if (sec->flags & SEC_ENCRYPT)
  9101. priv->ieee->sec.encrypt = sec->encrypt;
  9102. if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
  9103. priv->ieee->sec.level = sec->level;
  9104. priv->ieee->sec.flags |= SEC_LEVEL;
  9105. priv->status |= STATUS_SECURITY_UPDATED;
  9106. }
  9107. if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
  9108. ipw_set_hwcrypto_keys(priv);
  9109. /* To match current functionality of ipw2100 (which works well w/
  9110. * various supplicants, we don't force a disassociate if the
  9111. * privacy capability changes ... */
  9112. #if 0
  9113. if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
  9114. (((priv->assoc_request.capability &
  9115. cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
  9116. (!(priv->assoc_request.capability &
  9117. cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
  9118. IPW_DEBUG_ASSOC("Disassociating due to capability "
  9119. "change.\n");
  9120. ipw_disassociate(priv);
  9121. }
  9122. #endif
  9123. }
  9124. static int init_supported_rates(struct ipw_priv *priv,
  9125. struct ipw_supported_rates *rates)
  9126. {
  9127. /* TODO: Mask out rates based on priv->rates_mask */
  9128. memset(rates, 0, sizeof(*rates));
  9129. /* configure supported rates */
  9130. switch (priv->ieee->freq_band) {
  9131. case LIBIPW_52GHZ_BAND:
  9132. rates->ieee_mode = IPW_A_MODE;
  9133. rates->purpose = IPW_RATE_CAPABILITIES;
  9134. ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
  9135. LIBIPW_OFDM_DEFAULT_RATES_MASK);
  9136. break;
  9137. default: /* Mixed or 2.4Ghz */
  9138. rates->ieee_mode = IPW_G_MODE;
  9139. rates->purpose = IPW_RATE_CAPABILITIES;
  9140. ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
  9141. LIBIPW_CCK_DEFAULT_RATES_MASK);
  9142. if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
  9143. ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
  9144. LIBIPW_OFDM_DEFAULT_RATES_MASK);
  9145. }
  9146. break;
  9147. }
  9148. return 0;
  9149. }
  9150. static int ipw_config(struct ipw_priv *priv)
  9151. {
  9152. /* This is only called from ipw_up, which resets/reloads the firmware
  9153. so, we don't need to first disable the card before we configure
  9154. it */
  9155. if (ipw_set_tx_power(priv))
  9156. goto error;
  9157. /* initialize adapter address */
  9158. if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
  9159. goto error;
  9160. /* set basic system config settings */
  9161. init_sys_config(&priv->sys_config);
  9162. /* Support Bluetooth if we have BT h/w on board, and user wants to.
  9163. * Does not support BT priority yet (don't abort or defer our Tx) */
  9164. if (bt_coexist) {
  9165. unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
  9166. if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
  9167. priv->sys_config.bt_coexistence
  9168. |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
  9169. if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
  9170. priv->sys_config.bt_coexistence
  9171. |= CFG_BT_COEXISTENCE_OOB;
  9172. }
  9173. #ifdef CONFIG_IPW2200_PROMISCUOUS
  9174. if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
  9175. priv->sys_config.accept_all_data_frames = 1;
  9176. priv->sys_config.accept_non_directed_frames = 1;
  9177. priv->sys_config.accept_all_mgmt_bcpr = 1;
  9178. priv->sys_config.accept_all_mgmt_frames = 1;
  9179. }
  9180. #endif
  9181. if (priv->ieee->iw_mode == IW_MODE_ADHOC)
  9182. priv->sys_config.answer_broadcast_ssid_probe = 1;
  9183. else
  9184. priv->sys_config.answer_broadcast_ssid_probe = 0;
  9185. if (ipw_send_system_config(priv))
  9186. goto error;
  9187. init_supported_rates(priv, &priv->rates);
  9188. if (ipw_send_supported_rates(priv, &priv->rates))
  9189. goto error;
  9190. /* Set request-to-send threshold */
  9191. if (priv->rts_threshold) {
  9192. if (ipw_send_rts_threshold(priv, priv->rts_threshold))
  9193. goto error;
  9194. }
  9195. #ifdef CONFIG_IPW2200_QOS
  9196. IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
  9197. ipw_qos_activate(priv, NULL);
  9198. #endif /* CONFIG_IPW2200_QOS */
  9199. if (ipw_set_random_seed(priv))
  9200. goto error;
  9201. /* final state transition to the RUN state */
  9202. if (ipw_send_host_complete(priv))
  9203. goto error;
  9204. priv->status |= STATUS_INIT;
  9205. ipw_led_init(priv);
  9206. ipw_led_radio_on(priv);
  9207. priv->notif_missed_beacons = 0;
  9208. /* Set hardware WEP key if it is configured. */
  9209. if ((priv->capability & CAP_PRIVACY_ON) &&
  9210. (priv->ieee->sec.level == SEC_LEVEL_1) &&
  9211. !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
  9212. ipw_set_hwcrypto_keys(priv);
  9213. return 0;
  9214. error:
  9215. return -EIO;
  9216. }
  9217. /*
  9218. * NOTE:
  9219. *
  9220. * These tables have been tested in conjunction with the
  9221. * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
  9222. *
  9223. * Altering this values, using it on other hardware, or in geographies
  9224. * not intended for resale of the above mentioned Intel adapters has
  9225. * not been tested.
  9226. *
  9227. * Remember to update the table in README.ipw2200 when changing this
  9228. * table.
  9229. *
  9230. */
  9231. static const struct libipw_geo ipw_geos[] = {
  9232. { /* Restricted */
  9233. "---",
  9234. .bg_channels = 11,
  9235. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9236. {2427, 4}, {2432, 5}, {2437, 6},
  9237. {2442, 7}, {2447, 8}, {2452, 9},
  9238. {2457, 10}, {2462, 11}},
  9239. },
  9240. { /* Custom US/Canada */
  9241. "ZZF",
  9242. .bg_channels = 11,
  9243. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9244. {2427, 4}, {2432, 5}, {2437, 6},
  9245. {2442, 7}, {2447, 8}, {2452, 9},
  9246. {2457, 10}, {2462, 11}},
  9247. .a_channels = 8,
  9248. .a = {{5180, 36},
  9249. {5200, 40},
  9250. {5220, 44},
  9251. {5240, 48},
  9252. {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
  9253. {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
  9254. {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
  9255. {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
  9256. },
  9257. { /* Rest of World */
  9258. "ZZD",
  9259. .bg_channels = 13,
  9260. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9261. {2427, 4}, {2432, 5}, {2437, 6},
  9262. {2442, 7}, {2447, 8}, {2452, 9},
  9263. {2457, 10}, {2462, 11}, {2467, 12},
  9264. {2472, 13}},
  9265. },
  9266. { /* Custom USA & Europe & High */
  9267. "ZZA",
  9268. .bg_channels = 11,
  9269. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9270. {2427, 4}, {2432, 5}, {2437, 6},
  9271. {2442, 7}, {2447, 8}, {2452, 9},
  9272. {2457, 10}, {2462, 11}},
  9273. .a_channels = 13,
  9274. .a = {{5180, 36},
  9275. {5200, 40},
  9276. {5220, 44},
  9277. {5240, 48},
  9278. {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
  9279. {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
  9280. {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
  9281. {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
  9282. {5745, 149},
  9283. {5765, 153},
  9284. {5785, 157},
  9285. {5805, 161},
  9286. {5825, 165}},
  9287. },
  9288. { /* Custom NA & Europe */
  9289. "ZZB",
  9290. .bg_channels = 11,
  9291. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9292. {2427, 4}, {2432, 5}, {2437, 6},
  9293. {2442, 7}, {2447, 8}, {2452, 9},
  9294. {2457, 10}, {2462, 11}},
  9295. .a_channels = 13,
  9296. .a = {{5180, 36},
  9297. {5200, 40},
  9298. {5220, 44},
  9299. {5240, 48},
  9300. {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
  9301. {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
  9302. {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
  9303. {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
  9304. {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
  9305. {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
  9306. {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
  9307. {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
  9308. {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
  9309. },
  9310. { /* Custom Japan */
  9311. "ZZC",
  9312. .bg_channels = 11,
  9313. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9314. {2427, 4}, {2432, 5}, {2437, 6},
  9315. {2442, 7}, {2447, 8}, {2452, 9},
  9316. {2457, 10}, {2462, 11}},
  9317. .a_channels = 4,
  9318. .a = {{5170, 34}, {5190, 38},
  9319. {5210, 42}, {5230, 46}},
  9320. },
  9321. { /* Custom */
  9322. "ZZM",
  9323. .bg_channels = 11,
  9324. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9325. {2427, 4}, {2432, 5}, {2437, 6},
  9326. {2442, 7}, {2447, 8}, {2452, 9},
  9327. {2457, 10}, {2462, 11}},
  9328. },
  9329. { /* Europe */
  9330. "ZZE",
  9331. .bg_channels = 13,
  9332. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9333. {2427, 4}, {2432, 5}, {2437, 6},
  9334. {2442, 7}, {2447, 8}, {2452, 9},
  9335. {2457, 10}, {2462, 11}, {2467, 12},
  9336. {2472, 13}},
  9337. .a_channels = 19,
  9338. .a = {{5180, 36},
  9339. {5200, 40},
  9340. {5220, 44},
  9341. {5240, 48},
  9342. {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
  9343. {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
  9344. {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
  9345. {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
  9346. {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
  9347. {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
  9348. {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
  9349. {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
  9350. {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
  9351. {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
  9352. {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
  9353. {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
  9354. {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
  9355. {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
  9356. {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
  9357. },
  9358. { /* Custom Japan */
  9359. "ZZJ",
  9360. .bg_channels = 14,
  9361. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9362. {2427, 4}, {2432, 5}, {2437, 6},
  9363. {2442, 7}, {2447, 8}, {2452, 9},
  9364. {2457, 10}, {2462, 11}, {2467, 12},
  9365. {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
  9366. .a_channels = 4,
  9367. .a = {{5170, 34}, {5190, 38},
  9368. {5210, 42}, {5230, 46}},
  9369. },
  9370. { /* Rest of World */
  9371. "ZZR",
  9372. .bg_channels = 14,
  9373. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9374. {2427, 4}, {2432, 5}, {2437, 6},
  9375. {2442, 7}, {2447, 8}, {2452, 9},
  9376. {2457, 10}, {2462, 11}, {2467, 12},
  9377. {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
  9378. LIBIPW_CH_PASSIVE_ONLY}},
  9379. },
  9380. { /* High Band */
  9381. "ZZH",
  9382. .bg_channels = 13,
  9383. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9384. {2427, 4}, {2432, 5}, {2437, 6},
  9385. {2442, 7}, {2447, 8}, {2452, 9},
  9386. {2457, 10}, {2462, 11},
  9387. {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
  9388. {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
  9389. .a_channels = 4,
  9390. .a = {{5745, 149}, {5765, 153},
  9391. {5785, 157}, {5805, 161}},
  9392. },
  9393. { /* Custom Europe */
  9394. "ZZG",
  9395. .bg_channels = 13,
  9396. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9397. {2427, 4}, {2432, 5}, {2437, 6},
  9398. {2442, 7}, {2447, 8}, {2452, 9},
  9399. {2457, 10}, {2462, 11},
  9400. {2467, 12}, {2472, 13}},
  9401. .a_channels = 4,
  9402. .a = {{5180, 36}, {5200, 40},
  9403. {5220, 44}, {5240, 48}},
  9404. },
  9405. { /* Europe */
  9406. "ZZK",
  9407. .bg_channels = 13,
  9408. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9409. {2427, 4}, {2432, 5}, {2437, 6},
  9410. {2442, 7}, {2447, 8}, {2452, 9},
  9411. {2457, 10}, {2462, 11},
  9412. {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
  9413. {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
  9414. .a_channels = 24,
  9415. .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
  9416. {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
  9417. {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
  9418. {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
  9419. {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
  9420. {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
  9421. {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
  9422. {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
  9423. {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
  9424. {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
  9425. {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
  9426. {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
  9427. {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
  9428. {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
  9429. {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
  9430. {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
  9431. {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
  9432. {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
  9433. {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
  9434. {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
  9435. {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
  9436. {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
  9437. {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
  9438. {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
  9439. },
  9440. { /* Europe */
  9441. "ZZL",
  9442. .bg_channels = 11,
  9443. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9444. {2427, 4}, {2432, 5}, {2437, 6},
  9445. {2442, 7}, {2447, 8}, {2452, 9},
  9446. {2457, 10}, {2462, 11}},
  9447. .a_channels = 13,
  9448. .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
  9449. {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
  9450. {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
  9451. {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
  9452. {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
  9453. {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
  9454. {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
  9455. {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
  9456. {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
  9457. {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
  9458. {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
  9459. {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
  9460. {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
  9461. }
  9462. };
  9463. static void ipw_set_geo(struct ipw_priv *priv)
  9464. {
  9465. int j;
  9466. for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
  9467. if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
  9468. ipw_geos[j].name, 3))
  9469. break;
  9470. }
  9471. if (j == ARRAY_SIZE(ipw_geos)) {
  9472. IPW_WARNING("SKU [%c%c%c] not recognized.\n",
  9473. priv->eeprom[EEPROM_COUNTRY_CODE + 0],
  9474. priv->eeprom[EEPROM_COUNTRY_CODE + 1],
  9475. priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
  9476. j = 0;
  9477. }
  9478. libipw_set_geo(priv->ieee, &ipw_geos[j]);
  9479. }
  9480. #define MAX_HW_RESTARTS 5
  9481. static int ipw_up(struct ipw_priv *priv)
  9482. {
  9483. int rc, i;
  9484. /* Age scan list entries found before suspend */
  9485. if (priv->suspend_time) {
  9486. libipw_networks_age(priv->ieee, priv->suspend_time);
  9487. priv->suspend_time = 0;
  9488. }
  9489. if (priv->status & STATUS_EXIT_PENDING)
  9490. return -EIO;
  9491. if (cmdlog && !priv->cmdlog) {
  9492. priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
  9493. GFP_KERNEL);
  9494. if (priv->cmdlog == NULL) {
  9495. IPW_ERROR("Error allocating %d command log entries.\n",
  9496. cmdlog);
  9497. return -ENOMEM;
  9498. } else {
  9499. priv->cmdlog_len = cmdlog;
  9500. }
  9501. }
  9502. for (i = 0; i < MAX_HW_RESTARTS; i++) {
  9503. /* Load the microcode, firmware, and eeprom.
  9504. * Also start the clocks. */
  9505. rc = ipw_load(priv);
  9506. if (rc) {
  9507. IPW_ERROR("Unable to load firmware: %d\n", rc);
  9508. return rc;
  9509. }
  9510. ipw_init_ordinals(priv);
  9511. if (!(priv->config & CFG_CUSTOM_MAC))
  9512. eeprom_parse_mac(priv, priv->mac_addr);
  9513. eth_hw_addr_set(priv->net_dev, priv->mac_addr);
  9514. ipw_set_geo(priv);
  9515. if (priv->status & STATUS_RF_KILL_SW) {
  9516. IPW_WARNING("Radio disabled by module parameter.\n");
  9517. return 0;
  9518. } else if (rf_kill_active(priv)) {
  9519. IPW_WARNING("Radio Frequency Kill Switch is On:\n"
  9520. "Kill switch must be turned off for "
  9521. "wireless networking to work.\n");
  9522. schedule_delayed_work(&priv->rf_kill, 2 * HZ);
  9523. return 0;
  9524. }
  9525. rc = ipw_config(priv);
  9526. if (!rc) {
  9527. IPW_DEBUG_INFO("Configured device on count %i\n", i);
  9528. /* If configure to try and auto-associate, kick
  9529. * off a scan. */
  9530. schedule_delayed_work(&priv->request_scan, 0);
  9531. return 0;
  9532. }
  9533. IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
  9534. IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
  9535. i, MAX_HW_RESTARTS);
  9536. /* We had an error bringing up the hardware, so take it
  9537. * all the way back down so we can try again */
  9538. ipw_down(priv);
  9539. }
  9540. /* tried to restart and config the device for as long as our
  9541. * patience could withstand */
  9542. IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
  9543. return -EIO;
  9544. }
  9545. static void ipw_bg_up(struct work_struct *work)
  9546. {
  9547. struct ipw_priv *priv =
  9548. container_of(work, struct ipw_priv, up);
  9549. mutex_lock(&priv->mutex);
  9550. ipw_up(priv);
  9551. mutex_unlock(&priv->mutex);
  9552. }
  9553. static void ipw_deinit(struct ipw_priv *priv)
  9554. {
  9555. int i;
  9556. if (priv->status & STATUS_SCANNING) {
  9557. IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
  9558. ipw_abort_scan(priv);
  9559. }
  9560. if (priv->status & STATUS_ASSOCIATED) {
  9561. IPW_DEBUG_INFO("Disassociating during shutdown.\n");
  9562. ipw_disassociate(priv);
  9563. }
  9564. ipw_led_shutdown(priv);
  9565. /* Wait up to 1s for status to change to not scanning and not
  9566. * associated (disassociation can take a while for a ful 802.11
  9567. * exchange */
  9568. for (i = 1000; i && (priv->status &
  9569. (STATUS_DISASSOCIATING |
  9570. STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
  9571. udelay(10);
  9572. if (priv->status & (STATUS_DISASSOCIATING |
  9573. STATUS_ASSOCIATED | STATUS_SCANNING))
  9574. IPW_DEBUG_INFO("Still associated or scanning...\n");
  9575. else
  9576. IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
  9577. /* Attempt to disable the card */
  9578. ipw_send_card_disable(priv, 0);
  9579. priv->status &= ~STATUS_INIT;
  9580. }
  9581. static void ipw_down(struct ipw_priv *priv)
  9582. {
  9583. int exit_pending = priv->status & STATUS_EXIT_PENDING;
  9584. priv->status |= STATUS_EXIT_PENDING;
  9585. if (ipw_is_init(priv))
  9586. ipw_deinit(priv);
  9587. /* Wipe out the EXIT_PENDING status bit if we are not actually
  9588. * exiting the module */
  9589. if (!exit_pending)
  9590. priv->status &= ~STATUS_EXIT_PENDING;
  9591. /* tell the device to stop sending interrupts */
  9592. ipw_disable_interrupts(priv);
  9593. /* Clear all bits but the RF Kill */
  9594. priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
  9595. netif_carrier_off(priv->net_dev);
  9596. ipw_stop_nic(priv);
  9597. ipw_led_radio_off(priv);
  9598. }
  9599. static void ipw_bg_down(struct work_struct *work)
  9600. {
  9601. struct ipw_priv *priv =
  9602. container_of(work, struct ipw_priv, down);
  9603. mutex_lock(&priv->mutex);
  9604. ipw_down(priv);
  9605. mutex_unlock(&priv->mutex);
  9606. }
  9607. static int ipw_wdev_init(struct net_device *dev)
  9608. {
  9609. int i, rc = 0;
  9610. struct ipw_priv *priv = libipw_priv(dev);
  9611. const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
  9612. struct wireless_dev *wdev = &priv->ieee->wdev;
  9613. memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
  9614. /* fill-out priv->ieee->bg_band */
  9615. if (geo->bg_channels) {
  9616. struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
  9617. bg_band->band = NL80211_BAND_2GHZ;
  9618. bg_band->n_channels = geo->bg_channels;
  9619. bg_band->channels = kcalloc(geo->bg_channels,
  9620. sizeof(struct ieee80211_channel),
  9621. GFP_KERNEL);
  9622. if (!bg_band->channels) {
  9623. rc = -ENOMEM;
  9624. goto out;
  9625. }
  9626. /* translate geo->bg to bg_band.channels */
  9627. for (i = 0; i < geo->bg_channels; i++) {
  9628. bg_band->channels[i].band = NL80211_BAND_2GHZ;
  9629. bg_band->channels[i].center_freq = geo->bg[i].freq;
  9630. bg_band->channels[i].hw_value = geo->bg[i].channel;
  9631. bg_band->channels[i].max_power = geo->bg[i].max_power;
  9632. if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
  9633. bg_band->channels[i].flags |=
  9634. IEEE80211_CHAN_NO_IR;
  9635. if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
  9636. bg_band->channels[i].flags |=
  9637. IEEE80211_CHAN_NO_IR;
  9638. if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
  9639. bg_band->channels[i].flags |=
  9640. IEEE80211_CHAN_RADAR;
  9641. /* No equivalent for LIBIPW_CH_80211H_RULES,
  9642. LIBIPW_CH_UNIFORM_SPREADING, or
  9643. LIBIPW_CH_B_ONLY... */
  9644. }
  9645. /* point at bitrate info */
  9646. bg_band->bitrates = ipw2200_bg_rates;
  9647. bg_band->n_bitrates = ipw2200_num_bg_rates;
  9648. wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
  9649. }
  9650. /* fill-out priv->ieee->a_band */
  9651. if (geo->a_channels) {
  9652. struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
  9653. a_band->band = NL80211_BAND_5GHZ;
  9654. a_band->n_channels = geo->a_channels;
  9655. a_band->channels = kcalloc(geo->a_channels,
  9656. sizeof(struct ieee80211_channel),
  9657. GFP_KERNEL);
  9658. if (!a_band->channels) {
  9659. rc = -ENOMEM;
  9660. goto out;
  9661. }
  9662. /* translate geo->a to a_band.channels */
  9663. for (i = 0; i < geo->a_channels; i++) {
  9664. a_band->channels[i].band = NL80211_BAND_5GHZ;
  9665. a_band->channels[i].center_freq = geo->a[i].freq;
  9666. a_band->channels[i].hw_value = geo->a[i].channel;
  9667. a_band->channels[i].max_power = geo->a[i].max_power;
  9668. if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
  9669. a_band->channels[i].flags |=
  9670. IEEE80211_CHAN_NO_IR;
  9671. if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
  9672. a_band->channels[i].flags |=
  9673. IEEE80211_CHAN_NO_IR;
  9674. if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
  9675. a_band->channels[i].flags |=
  9676. IEEE80211_CHAN_RADAR;
  9677. /* No equivalent for LIBIPW_CH_80211H_RULES,
  9678. LIBIPW_CH_UNIFORM_SPREADING, or
  9679. LIBIPW_CH_B_ONLY... */
  9680. }
  9681. /* point at bitrate info */
  9682. a_band->bitrates = ipw2200_a_rates;
  9683. a_band->n_bitrates = ipw2200_num_a_rates;
  9684. wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band;
  9685. }
  9686. wdev->wiphy->cipher_suites = ipw_cipher_suites;
  9687. wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
  9688. set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
  9689. /* With that information in place, we can now register the wiphy... */
  9690. rc = wiphy_register(wdev->wiphy);
  9691. if (rc)
  9692. goto out;
  9693. return 0;
  9694. out:
  9695. kfree(priv->ieee->a_band.channels);
  9696. kfree(priv->ieee->bg_band.channels);
  9697. return rc;
  9698. }
  9699. /* PCI driver stuff */
  9700. static const struct pci_device_id card_ids[] = {
  9701. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
  9702. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
  9703. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
  9704. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
  9705. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
  9706. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
  9707. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
  9708. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
  9709. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
  9710. {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
  9711. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
  9712. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
  9713. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
  9714. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
  9715. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
  9716. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
  9717. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
  9718. {PCI_VDEVICE(INTEL, 0x104f), 0},
  9719. {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */
  9720. {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */
  9721. {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */
  9722. {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */
  9723. /* required last entry */
  9724. {0,}
  9725. };
  9726. MODULE_DEVICE_TABLE(pci, card_ids);
  9727. static struct attribute *ipw_sysfs_entries[] = {
  9728. &dev_attr_rf_kill.attr,
  9729. &dev_attr_direct_dword.attr,
  9730. &dev_attr_indirect_byte.attr,
  9731. &dev_attr_indirect_dword.attr,
  9732. &dev_attr_mem_gpio_reg.attr,
  9733. &dev_attr_command_event_reg.attr,
  9734. &dev_attr_nic_type.attr,
  9735. &dev_attr_status.attr,
  9736. &dev_attr_cfg.attr,
  9737. &dev_attr_error.attr,
  9738. &dev_attr_event_log.attr,
  9739. &dev_attr_cmd_log.attr,
  9740. &dev_attr_eeprom_delay.attr,
  9741. &dev_attr_ucode_version.attr,
  9742. &dev_attr_rtc.attr,
  9743. &dev_attr_scan_age.attr,
  9744. &dev_attr_led.attr,
  9745. &dev_attr_speed_scan.attr,
  9746. &dev_attr_net_stats.attr,
  9747. &dev_attr_channels.attr,
  9748. #ifdef CONFIG_IPW2200_PROMISCUOUS
  9749. &dev_attr_rtap_iface.attr,
  9750. &dev_attr_rtap_filter.attr,
  9751. #endif
  9752. NULL
  9753. };
  9754. static const struct attribute_group ipw_attribute_group = {
  9755. .name = NULL, /* put in device directory */
  9756. .attrs = ipw_sysfs_entries,
  9757. };
  9758. #ifdef CONFIG_IPW2200_PROMISCUOUS
  9759. static int ipw_prom_open(struct net_device *dev)
  9760. {
  9761. struct ipw_prom_priv *prom_priv = libipw_priv(dev);
  9762. struct ipw_priv *priv = prom_priv->priv;
  9763. IPW_DEBUG_INFO("prom dev->open\n");
  9764. netif_carrier_off(dev);
  9765. if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
  9766. priv->sys_config.accept_all_data_frames = 1;
  9767. priv->sys_config.accept_non_directed_frames = 1;
  9768. priv->sys_config.accept_all_mgmt_bcpr = 1;
  9769. priv->sys_config.accept_all_mgmt_frames = 1;
  9770. ipw_send_system_config(priv);
  9771. }
  9772. return 0;
  9773. }
  9774. static int ipw_prom_stop(struct net_device *dev)
  9775. {
  9776. struct ipw_prom_priv *prom_priv = libipw_priv(dev);
  9777. struct ipw_priv *priv = prom_priv->priv;
  9778. IPW_DEBUG_INFO("prom dev->stop\n");
  9779. if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
  9780. priv->sys_config.accept_all_data_frames = 0;
  9781. priv->sys_config.accept_non_directed_frames = 0;
  9782. priv->sys_config.accept_all_mgmt_bcpr = 0;
  9783. priv->sys_config.accept_all_mgmt_frames = 0;
  9784. ipw_send_system_config(priv);
  9785. }
  9786. return 0;
  9787. }
  9788. static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
  9789. struct net_device *dev)
  9790. {
  9791. IPW_DEBUG_INFO("prom dev->xmit\n");
  9792. dev_kfree_skb(skb);
  9793. return NETDEV_TX_OK;
  9794. }
  9795. static const struct net_device_ops ipw_prom_netdev_ops = {
  9796. .ndo_open = ipw_prom_open,
  9797. .ndo_stop = ipw_prom_stop,
  9798. .ndo_start_xmit = ipw_prom_hard_start_xmit,
  9799. .ndo_set_mac_address = eth_mac_addr,
  9800. .ndo_validate_addr = eth_validate_addr,
  9801. };
  9802. static int ipw_prom_alloc(struct ipw_priv *priv)
  9803. {
  9804. int rc = 0;
  9805. if (priv->prom_net_dev)
  9806. return -EPERM;
  9807. priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
  9808. if (priv->prom_net_dev == NULL)
  9809. return -ENOMEM;
  9810. priv->prom_priv = libipw_priv(priv->prom_net_dev);
  9811. priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
  9812. priv->prom_priv->priv = priv;
  9813. strcpy(priv->prom_net_dev->name, "rtap%d");
  9814. eth_hw_addr_set(priv->prom_net_dev, priv->mac_addr);
  9815. priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
  9816. priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
  9817. priv->prom_net_dev->min_mtu = 68;
  9818. priv->prom_net_dev->max_mtu = LIBIPW_DATA_LEN;
  9819. priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
  9820. SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
  9821. rc = register_netdev(priv->prom_net_dev);
  9822. if (rc) {
  9823. free_libipw(priv->prom_net_dev, 1);
  9824. priv->prom_net_dev = NULL;
  9825. return rc;
  9826. }
  9827. return 0;
  9828. }
  9829. static void ipw_prom_free(struct ipw_priv *priv)
  9830. {
  9831. if (!priv->prom_net_dev)
  9832. return;
  9833. unregister_netdev(priv->prom_net_dev);
  9834. free_libipw(priv->prom_net_dev, 1);
  9835. priv->prom_net_dev = NULL;
  9836. }
  9837. #endif
  9838. static const struct net_device_ops ipw_netdev_ops = {
  9839. .ndo_open = ipw_net_open,
  9840. .ndo_stop = ipw_net_stop,
  9841. .ndo_set_rx_mode = ipw_net_set_multicast_list,
  9842. .ndo_set_mac_address = ipw_net_set_mac_address,
  9843. .ndo_start_xmit = libipw_xmit,
  9844. .ndo_validate_addr = eth_validate_addr,
  9845. };
  9846. static int ipw_pci_probe(struct pci_dev *pdev,
  9847. const struct pci_device_id *ent)
  9848. {
  9849. int err = 0;
  9850. struct net_device *net_dev;
  9851. void __iomem *base;
  9852. u32 length, val;
  9853. struct ipw_priv *priv;
  9854. int i;
  9855. net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
  9856. if (net_dev == NULL) {
  9857. err = -ENOMEM;
  9858. goto out;
  9859. }
  9860. priv = libipw_priv(net_dev);
  9861. priv->ieee = netdev_priv(net_dev);
  9862. priv->net_dev = net_dev;
  9863. priv->pci_dev = pdev;
  9864. ipw_debug_level = debug;
  9865. spin_lock_init(&priv->irq_lock);
  9866. spin_lock_init(&priv->lock);
  9867. for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
  9868. INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
  9869. mutex_init(&priv->mutex);
  9870. if (pci_enable_device(pdev)) {
  9871. err = -ENODEV;
  9872. goto out_free_libipw;
  9873. }
  9874. pci_set_master(pdev);
  9875. err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
  9876. if (!err)
  9877. err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
  9878. if (err) {
  9879. printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
  9880. goto out_pci_disable_device;
  9881. }
  9882. pci_set_drvdata(pdev, priv);
  9883. err = pci_request_regions(pdev, DRV_NAME);
  9884. if (err)
  9885. goto out_pci_disable_device;
  9886. /* We disable the RETRY_TIMEOUT register (0x41) to keep
  9887. * PCI Tx retries from interfering with C3 CPU state */
  9888. pci_read_config_dword(pdev, 0x40, &val);
  9889. if ((val & 0x0000ff00) != 0)
  9890. pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
  9891. length = pci_resource_len(pdev, 0);
  9892. priv->hw_len = length;
  9893. base = pci_ioremap_bar(pdev, 0);
  9894. if (!base) {
  9895. err = -ENODEV;
  9896. goto out_pci_release_regions;
  9897. }
  9898. priv->hw_base = base;
  9899. IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
  9900. IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
  9901. ipw_setup_deferred_work(priv);
  9902. ipw_sw_reset(priv, 1);
  9903. err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
  9904. if (err) {
  9905. IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
  9906. goto out_iounmap;
  9907. }
  9908. SET_NETDEV_DEV(net_dev, &pdev->dev);
  9909. mutex_lock(&priv->mutex);
  9910. priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
  9911. priv->ieee->set_security = shim__set_security;
  9912. priv->ieee->is_queue_full = ipw_net_is_queue_full;
  9913. #ifdef CONFIG_IPW2200_QOS
  9914. priv->ieee->is_qos_active = ipw_is_qos_active;
  9915. priv->ieee->handle_probe_response = ipw_handle_beacon;
  9916. priv->ieee->handle_beacon = ipw_handle_probe_response;
  9917. priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
  9918. #endif /* CONFIG_IPW2200_QOS */
  9919. priv->ieee->perfect_rssi = -20;
  9920. priv->ieee->worst_rssi = -85;
  9921. net_dev->netdev_ops = &ipw_netdev_ops;
  9922. priv->wireless_data.spy_data = &priv->ieee->spy_data;
  9923. net_dev->wireless_data = &priv->wireless_data;
  9924. net_dev->wireless_handlers = &ipw_wx_handler_def;
  9925. net_dev->ethtool_ops = &ipw_ethtool_ops;
  9926. net_dev->min_mtu = 68;
  9927. net_dev->max_mtu = LIBIPW_DATA_LEN;
  9928. err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
  9929. if (err) {
  9930. IPW_ERROR("failed to create sysfs device attributes\n");
  9931. mutex_unlock(&priv->mutex);
  9932. goto out_release_irq;
  9933. }
  9934. if (ipw_up(priv)) {
  9935. mutex_unlock(&priv->mutex);
  9936. err = -EIO;
  9937. goto out_remove_sysfs;
  9938. }
  9939. mutex_unlock(&priv->mutex);
  9940. err = ipw_wdev_init(net_dev);
  9941. if (err) {
  9942. IPW_ERROR("failed to register wireless device\n");
  9943. goto out_remove_sysfs;
  9944. }
  9945. err = register_netdev(net_dev);
  9946. if (err) {
  9947. IPW_ERROR("failed to register network device\n");
  9948. goto out_unregister_wiphy;
  9949. }
  9950. #ifdef CONFIG_IPW2200_PROMISCUOUS
  9951. if (rtap_iface) {
  9952. err = ipw_prom_alloc(priv);
  9953. if (err) {
  9954. IPW_ERROR("Failed to register promiscuous network "
  9955. "device (error %d).\n", err);
  9956. unregister_netdev(priv->net_dev);
  9957. goto out_unregister_wiphy;
  9958. }
  9959. }
  9960. #endif
  9961. printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
  9962. "channels, %d 802.11a channels)\n",
  9963. priv->ieee->geo.name, priv->ieee->geo.bg_channels,
  9964. priv->ieee->geo.a_channels);
  9965. return 0;
  9966. out_unregister_wiphy:
  9967. wiphy_unregister(priv->ieee->wdev.wiphy);
  9968. kfree(priv->ieee->a_band.channels);
  9969. kfree(priv->ieee->bg_band.channels);
  9970. out_remove_sysfs:
  9971. sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
  9972. out_release_irq:
  9973. free_irq(pdev->irq, priv);
  9974. out_iounmap:
  9975. iounmap(priv->hw_base);
  9976. out_pci_release_regions:
  9977. pci_release_regions(pdev);
  9978. out_pci_disable_device:
  9979. pci_disable_device(pdev);
  9980. out_free_libipw:
  9981. free_libipw(priv->net_dev, 0);
  9982. out:
  9983. return err;
  9984. }
  9985. static void ipw_pci_remove(struct pci_dev *pdev)
  9986. {
  9987. struct ipw_priv *priv = pci_get_drvdata(pdev);
  9988. struct list_head *p, *q;
  9989. int i;
  9990. if (!priv)
  9991. return;
  9992. mutex_lock(&priv->mutex);
  9993. priv->status |= STATUS_EXIT_PENDING;
  9994. ipw_down(priv);
  9995. sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
  9996. mutex_unlock(&priv->mutex);
  9997. unregister_netdev(priv->net_dev);
  9998. if (priv->rxq) {
  9999. ipw_rx_queue_free(priv, priv->rxq);
  10000. priv->rxq = NULL;
  10001. }
  10002. ipw_tx_queue_free(priv);
  10003. if (priv->cmdlog) {
  10004. kfree(priv->cmdlog);
  10005. priv->cmdlog = NULL;
  10006. }
  10007. /* make sure all works are inactive */
  10008. cancel_delayed_work_sync(&priv->adhoc_check);
  10009. cancel_work_sync(&priv->associate);
  10010. cancel_work_sync(&priv->disassociate);
  10011. cancel_work_sync(&priv->system_config);
  10012. cancel_work_sync(&priv->rx_replenish);
  10013. cancel_work_sync(&priv->adapter_restart);
  10014. cancel_delayed_work_sync(&priv->rf_kill);
  10015. cancel_work_sync(&priv->up);
  10016. cancel_work_sync(&priv->down);
  10017. cancel_delayed_work_sync(&priv->request_scan);
  10018. cancel_delayed_work_sync(&priv->request_direct_scan);
  10019. cancel_delayed_work_sync(&priv->request_passive_scan);
  10020. cancel_delayed_work_sync(&priv->scan_event);
  10021. cancel_delayed_work_sync(&priv->gather_stats);
  10022. cancel_work_sync(&priv->abort_scan);
  10023. cancel_work_sync(&priv->roam);
  10024. cancel_delayed_work_sync(&priv->scan_check);
  10025. cancel_work_sync(&priv->link_up);
  10026. cancel_work_sync(&priv->link_down);
  10027. cancel_delayed_work_sync(&priv->led_link_on);
  10028. cancel_delayed_work_sync(&priv->led_link_off);
  10029. cancel_delayed_work_sync(&priv->led_act_off);
  10030. cancel_work_sync(&priv->merge_networks);
  10031. /* Free MAC hash list for ADHOC */
  10032. for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
  10033. list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
  10034. list_del(p);
  10035. kfree(list_entry(p, struct ipw_ibss_seq, list));
  10036. }
  10037. }
  10038. kfree(priv->error);
  10039. priv->error = NULL;
  10040. #ifdef CONFIG_IPW2200_PROMISCUOUS
  10041. ipw_prom_free(priv);
  10042. #endif
  10043. free_irq(pdev->irq, priv);
  10044. iounmap(priv->hw_base);
  10045. pci_release_regions(pdev);
  10046. pci_disable_device(pdev);
  10047. /* wiphy_unregister needs to be here, before free_libipw */
  10048. wiphy_unregister(priv->ieee->wdev.wiphy);
  10049. kfree(priv->ieee->a_band.channels);
  10050. kfree(priv->ieee->bg_band.channels);
  10051. free_libipw(priv->net_dev, 0);
  10052. free_firmware();
  10053. }
  10054. static int __maybe_unused ipw_pci_suspend(struct device *dev_d)
  10055. {
  10056. struct ipw_priv *priv = dev_get_drvdata(dev_d);
  10057. struct net_device *dev = priv->net_dev;
  10058. printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
  10059. /* Take down the device; powers it off, etc. */
  10060. ipw_down(priv);
  10061. /* Remove the PRESENT state of the device */
  10062. netif_device_detach(dev);
  10063. priv->suspend_at = ktime_get_boottime_seconds();
  10064. return 0;
  10065. }
  10066. static int __maybe_unused ipw_pci_resume(struct device *dev_d)
  10067. {
  10068. struct pci_dev *pdev = to_pci_dev(dev_d);
  10069. struct ipw_priv *priv = pci_get_drvdata(pdev);
  10070. struct net_device *dev = priv->net_dev;
  10071. u32 val;
  10072. printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
  10073. /*
  10074. * Suspend/Resume resets the PCI configuration space, so we have to
  10075. * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
  10076. * from interfering with C3 CPU state. pci_restore_state won't help
  10077. * here since it only restores the first 64 bytes pci config header.
  10078. */
  10079. pci_read_config_dword(pdev, 0x40, &val);
  10080. if ((val & 0x0000ff00) != 0)
  10081. pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
  10082. /* Set the device back into the PRESENT state; this will also wake
  10083. * the queue of needed */
  10084. netif_device_attach(dev);
  10085. priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at;
  10086. /* Bring the device back up */
  10087. schedule_work(&priv->up);
  10088. return 0;
  10089. }
  10090. static void ipw_pci_shutdown(struct pci_dev *pdev)
  10091. {
  10092. struct ipw_priv *priv = pci_get_drvdata(pdev);
  10093. /* Take down the device; powers it off, etc. */
  10094. ipw_down(priv);
  10095. pci_disable_device(pdev);
  10096. }
  10097. static SIMPLE_DEV_PM_OPS(ipw_pci_pm_ops, ipw_pci_suspend, ipw_pci_resume);
  10098. /* driver initialization stuff */
  10099. static struct pci_driver ipw_driver = {
  10100. .name = DRV_NAME,
  10101. .id_table = card_ids,
  10102. .probe = ipw_pci_probe,
  10103. .remove = ipw_pci_remove,
  10104. .driver.pm = &ipw_pci_pm_ops,
  10105. .shutdown = ipw_pci_shutdown,
  10106. };
  10107. static int __init ipw_init(void)
  10108. {
  10109. int ret;
  10110. printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
  10111. printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
  10112. ret = pci_register_driver(&ipw_driver);
  10113. if (ret) {
  10114. IPW_ERROR("Unable to initialize PCI module\n");
  10115. return ret;
  10116. }
  10117. ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
  10118. if (ret) {
  10119. IPW_ERROR("Unable to create driver sysfs file\n");
  10120. pci_unregister_driver(&ipw_driver);
  10121. return ret;
  10122. }
  10123. return ret;
  10124. }
  10125. static void __exit ipw_exit(void)
  10126. {
  10127. driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
  10128. pci_unregister_driver(&ipw_driver);
  10129. }
  10130. module_param(disable, int, 0444);
  10131. MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
  10132. module_param(associate, int, 0444);
  10133. MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
  10134. module_param(auto_create, int, 0444);
  10135. MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
  10136. module_param_named(led, led_support, int, 0444);
  10137. MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
  10138. module_param(debug, int, 0444);
  10139. MODULE_PARM_DESC(debug, "debug output mask");
  10140. module_param_named(channel, default_channel, int, 0444);
  10141. MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
  10142. #ifdef CONFIG_IPW2200_PROMISCUOUS
  10143. module_param(rtap_iface, int, 0444);
  10144. MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
  10145. #endif
  10146. #ifdef CONFIG_IPW2200_QOS
  10147. module_param(qos_enable, int, 0444);
  10148. MODULE_PARM_DESC(qos_enable, "enable all QoS functionalities");
  10149. module_param(qos_burst_enable, int, 0444);
  10150. MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
  10151. module_param(qos_no_ack_mask, int, 0444);
  10152. MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
  10153. module_param(burst_duration_CCK, int, 0444);
  10154. MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
  10155. module_param(burst_duration_OFDM, int, 0444);
  10156. MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
  10157. #endif /* CONFIG_IPW2200_QOS */
  10158. #ifdef CONFIG_IPW2200_MONITOR
  10159. module_param_named(mode, network_mode, int, 0444);
  10160. MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
  10161. #else
  10162. module_param_named(mode, network_mode, int, 0444);
  10163. MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
  10164. #endif
  10165. module_param(bt_coexist, int, 0444);
  10166. MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
  10167. module_param(hwcrypto, int, 0444);
  10168. MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
  10169. module_param(cmdlog, int, 0444);
  10170. MODULE_PARM_DESC(cmdlog,
  10171. "allocate a ring buffer for logging firmware commands");
  10172. module_param(roaming, int, 0444);
  10173. MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
  10174. module_param(antenna, int, 0444);
  10175. MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
  10176. module_exit(ipw_exit);
  10177. module_init(ipw_init);