001-igbe_update.patch 375 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755
  1. --- a/Embedded/src/GbE/gcu.h
  2. +++ b/Embedded/src/GbE/gcu.h
  3. @@ -2,7 +2,7 @@
  4. GPL LICENSE SUMMARY
  5. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  6. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  7. This program is free software; you can redistribute it and/or modify
  8. it under the terms of version 2 of the GNU General Public License as
  9. @@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
  10. Contact Information:
  11. Intel Corporation
  12. - version: Embedded.L.1.0.34
  13. + version: Embedded.Release.Patch.L.1.0.7-5
  14. Contact Information:
  15. --- a/Embedded/src/GbE/gcu_if.c
  16. +++ b/Embedded/src/GbE/gcu_if.c
  17. @@ -2,7 +2,7 @@
  18. GPL LICENSE SUMMARY
  19. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  20. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  21. This program is free software; you can redistribute it and/or modify
  22. it under the terms of version 2 of the GNU General Public License as
  23. @@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
  24. Contact Information:
  25. Intel Corporation
  26. - version: Embedded.L.1.0.34
  27. + version: Embedded.Release.Patch.L.1.0.7-5
  28. Contact Information:
  29. @@ -330,10 +330,17 @@ gcu_write_verify(uint32_t phy_num, uint3
  30. */
  31. void gcu_iegbe_resume(struct pci_dev *pdev)
  32. {
  33. +#if ( ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,6) ) && \
  34. + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) )
  35. + struct net_device *netdev = pci_get_drvdata(pdev);
  36. + struct gcu_adapter *adapter = netdev_priv(netdev);
  37. +#endif
  38. +
  39. GCU_DBG("%s\n", __func__);
  40. pci_restore_state(pdev);
  41. - pci_enable_device(pdev);
  42. + if(!pci_enable_device(pdev))
  43. + GCU_DBG("pci_enable_device failed!\n",);
  44. return;
  45. }
  46. @@ -348,6 +355,12 @@ EXPORT_SYMBOL(gcu_iegbe_resume);
  47. */
  48. int gcu_iegbe_suspend(struct pci_dev *pdev, uint32_t state)
  49. {
  50. +#if ( ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,6) ) && \
  51. + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) )
  52. + struct net_device *netdev = pci_get_drvdata(pdev);
  53. + struct gcu_adapter *adapter = netdev_priv(netdev);
  54. +#endif
  55. +
  56. GCU_DBG("%s\n", __func__);
  57. pci_save_state(pdev);
  58. --- a/Embedded/src/GbE/gcu_if.h
  59. +++ b/Embedded/src/GbE/gcu_if.h
  60. @@ -2,7 +2,7 @@
  61. GPL LICENSE SUMMARY
  62. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  63. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  64. This program is free software; you can redistribute it and/or modify
  65. it under the terms of version 2 of the GNU General Public License as
  66. @@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
  67. Contact Information:
  68. Intel Corporation
  69. - version: Embedded.L.1.0.34
  70. + version: Embedded.Release.Patch.L.1.0.7-5
  71. Contact Information:
  72. --- a/Embedded/src/GbE/gcu_main.c
  73. +++ b/Embedded/src/GbE/gcu_main.c
  74. @@ -2,7 +2,7 @@
  75. GPL LICENSE SUMMARY
  76. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  77. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  78. This program is free software; you can redistribute it and/or modify
  79. it under the terms of version 2 of the GNU General Public License as
  80. @@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
  81. Contact Information:
  82. Intel Corporation
  83. - version: Embedded.L.1.0.34
  84. + version: Embedded.Release.Patch.L.1.0.7-5
  85. Contact Information:
  86. @@ -94,6 +94,7 @@ static struct pci_driver gcu_driver = {
  87. static struct gcu_adapter *global_adapter = 0;
  88. static spinlock_t global_adapter_spinlock = SPIN_LOCK_UNLOCKED;
  89. +static unsigned long g_intflags = 0;
  90. MODULE_AUTHOR("Intel(R) Corporation");
  91. MODULE_DESCRIPTION("Global Configuration Unit Driver");
  92. @@ -124,7 +125,7 @@ gcu_init_module(void)
  93. printk(KERN_INFO "%s\n", gcu_copyright);
  94. - ret = pci_module_init(&gcu_driver);
  95. + ret = pci_register_driver(&gcu_driver);
  96. if(ret >= 0) {
  97. register_reboot_notifier(&gcu_notifier_reboot);
  98. }
  99. @@ -199,8 +200,6 @@ gcu_probe(struct pci_dev *pdev,
  100. return -ENOMEM;
  101. }
  102. - SET_MODULE_OWNER(adapter);
  103. -
  104. pci_set_drvdata(pdev, adapter);
  105. adapter->pdev = pdev;
  106. @@ -238,7 +237,6 @@ gcu_probe(struct pci_dev *pdev,
  107. return 0;
  108. }
  109. -
  110. /**
  111. * gcu_probe_err - gcu_probe error handler
  112. * @err: gcu_err_type
  113. @@ -295,7 +293,7 @@ gcu_notify_reboot(struct notifier_block
  114. case SYS_DOWN:
  115. case SYS_HALT:
  116. case SYS_POWER_OFF:
  117. - while((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
  118. + while((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
  119. if(pci_dev_driver(pdev) == &gcu_driver){
  120. gcu_suspend(pdev, 0x3);
  121. }
  122. @@ -318,6 +316,11 @@ static int
  123. gcu_suspend(struct pci_dev *pdev, uint32_t state)
  124. {
  125. /*struct gcu_adapter *adapter = pci_get_drvdata(pdev); */
  126. +#if ( ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,6) ) && \
  127. + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) )
  128. + struct net_device *netdev = pci_get_drvdata(pdev);
  129. + struct gcu_adapter *adapter = netdev_priv(netdev);
  130. +#endif
  131. GCU_DBG("%s\n", __func__);
  132. @@ -338,7 +341,6 @@ gcu_suspend(struct pci_dev *pdev, uint32
  133. return state;
  134. }
  135. -
  136. /**
  137. * alloc_gcu_adapter
  138. *
  139. @@ -412,7 +414,7 @@ gcu_get_adapter(void)
  140. return NULL;
  141. }
  142. - spin_lock(&global_adapter_spinlock);
  143. + spin_lock_irqsave(&global_adapter_spinlock, g_intflags);
  144. return global_adapter;
  145. }
  146. @@ -437,7 +439,7 @@ gcu_release_adapter(const struct gcu_ada
  147. *adapter = 0;
  148. }
  149. - spin_unlock(&global_adapter_spinlock);
  150. + spin_unlock_irqrestore(&global_adapter_spinlock, g_intflags);
  151. return;
  152. }
  153. --- a/Embedded/src/GbE/gcu_reg.h
  154. +++ b/Embedded/src/GbE/gcu_reg.h
  155. @@ -2,7 +2,7 @@
  156. GPL LICENSE SUMMARY
  157. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  158. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  159. This program is free software; you can redistribute it and/or modify
  160. it under the terms of version 2 of the GNU General Public License as
  161. @@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
  162. Contact Information:
  163. Intel Corporation
  164. - version: Embedded.L.1.0.34
  165. + version: Embedded.Release.Patch.L.1.0.7-5
  166. Contact Information:
  167. --- a/Embedded/src/GbE/iegbe.7
  168. +++ b/Embedded/src/GbE/iegbe.7
  169. @@ -1,7 +1,7 @@
  170. .\" GPL LICENSE SUMMARY
  171. .\"
  172. -.\" Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  173. +.\" Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  174. .\"
  175. .\" This program is free software; you can redistribute it and/or modify
  176. .\" it under the terms of version 2 of the GNU General Public License as
  177. @@ -21,7 +21,7 @@
  178. .\" Contact Information:
  179. .\" Intel Corporation
  180. .\"
  181. -.\" version: Embedded.L.1.0.34
  182. +.\" version: Embedded.Release.Patch.L.1.0.7-5
  183. .\" LICENSE
  184. .\"
  185. --- a/Embedded/src/GbE/iegbe_ethtool.c
  186. +++ b/Embedded/src/GbE/iegbe_ethtool.c
  187. @@ -2,7 +2,7 @@
  188. GPL LICENSE SUMMARY
  189. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  190. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  191. This program is free software; you can redistribute it and/or modify
  192. it under the terms of version 2 of the GNU General Public License as
  193. @@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
  194. Contact Information:
  195. Intel Corporation
  196. - version: Embedded.L.1.0.34
  197. + version: Embedded.Release.Patch.L.1.0.7-5
  198. Contact Information:
  199. @@ -132,22 +132,6 @@ static const struct iegbe_stats iegbe_gs
  200. { "cpp_master", E1000_STAT(icr_cpp_master) },
  201. { "stat", E1000_STAT(icr_stat) },
  202. #endif
  203. -#ifdef IEGBE_GBE_WORKAROUND
  204. - { "txqec", E1000_STAT(stats.txqec) },
  205. - { "tx_next_to_clean", E1000_STAT(stats.tx_next_to_clean) },
  206. - { "tx_next_to_use", E1000_STAT(stats.tx_next_to_use) },
  207. - { "num_tx_queues", E1000_STAT(stats.num_tx_queues) },
  208. -
  209. - { "num_rx_buf_alloc", E1000_STAT(stats.num_rx_buf_alloc) },
  210. - { "rx_next_to_clean", E1000_STAT(stats.rx_next_to_clean) },
  211. - { "rx_next_to_use", E1000_STAT(stats.rx_next_to_use) },
  212. - { "cc_gt_num_rx", E1000_STAT(stats.cc_gt_num_rx) },
  213. - { "tx_hnet", E1000_STAT(stats.tx_hnet) },
  214. - { "tx_hnentu", E1000_STAT(stats.tx_hnentu) },
  215. - { "RUC", E1000_STAT(stats.ruc) },
  216. - { "RFC", E1000_STAT(stats.rfc) },
  217. -
  218. -#endif
  219. };
  220. #define E1000_STATS_LEN \
  221. sizeof(iegbe_gstrings_stats) / sizeof(struct iegbe_stats)
  222. @@ -158,7 +142,7 @@ static const char iegbe_gstrings_test[][
  223. "Interrupt test (offline)", "Loopback test (offline)",
  224. "Link test (on/offline)"
  225. };
  226. -#define E1000_TEST_LEN (sizeof(iegbe_gstrings_test) / (ETH_GSTRING_LEN))
  227. +#define E1000_TEST_LEN (sizeof(iegbe_gstrings_test) / ETH_GSTRING_LEN)
  228. #endif /* ETHTOOL_TEST */
  229. #define E1000_REGS_LEN 0x20
  230. @@ -176,9 +160,7 @@ iegbe_get_settings(struct net_device *ne
  231. SUPPORTED_10baseT_Full |
  232. SUPPORTED_100baseT_Half |
  233. SUPPORTED_100baseT_Full |
  234. -#ifndef IEGBE_10_100_ONLY
  235. SUPPORTED_1000baseT_Full|
  236. -#endif
  237. SUPPORTED_Autoneg |
  238. SUPPORTED_TP);
  239. @@ -259,21 +241,13 @@ iegbe_set_settings(struct net_device *ne
  240. ADVERTISED_10baseT_Full |
  241. ADVERTISED_100baseT_Half |
  242. ADVERTISED_100baseT_Full |
  243. -#ifndef IEGBE_10_100_ONLY
  244. ADVERTISED_1000baseT_Full|
  245. -#endif
  246. -
  247. ADVERTISED_Autoneg |
  248. ADVERTISED_TP;
  249. ecmd->advertising = hw->autoneg_advertised;
  250. }
  251. - } else {
  252. - uint16_t duplex;
  253. -
  254. - // ethtool uses DUPLEX_FULL/DUPLEX_HALF
  255. - // the driver needs FULL_DUPLEX/HALF_DUPLEX
  256. - duplex = (ecmd->duplex == DUPLEX_FULL) ? FULL_DUPLEX : HALF_DUPLEX;
  257. - if(iegbe_set_spd_dplx(adapter, ecmd->speed + duplex))
  258. + } else
  259. + if(iegbe_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)){
  260. return -EINVAL;
  261. }
  262. /* reset the link */
  263. @@ -728,8 +702,8 @@ iegbe_set_ringparam(struct net_device *n
  264. struct iegbe_rx_ring *rxdr, *rx_old, *rx_new;
  265. int i, err, tx_ring_size, rx_ring_size;
  266. - tx_ring_size = sizeof(struct iegbe_tx_ring) * adapter->num_queues;
  267. - rx_ring_size = sizeof(struct iegbe_rx_ring) * adapter->num_queues;
  268. + tx_ring_size = sizeof(struct iegbe_tx_ring) * adapter->num_tx_queues;
  269. + rx_ring_size = sizeof(struct iegbe_rx_ring) * adapter->num_rx_queues;
  270. if (netif_running(adapter->netdev)){
  271. iegbe_down(adapter);
  272. @@ -768,10 +742,10 @@ iegbe_set_ringparam(struct net_device *n
  273. E1000_MAX_TXD : E1000_MAX_82544_TXD));
  274. E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
  275. - for (i = 0; i < adapter->num_queues; i++) {
  276. - txdr[i].count = txdr->count;
  277. - rxdr[i].count = rxdr->count;
  278. - }
  279. + for (i = 0; i < adapter->num_tx_queues; i++)
  280. + txdr[i].count = txdr->count;
  281. + for (i = 0; i < adapter->num_rx_queues; i++)
  282. + rxdr[i].count = rxdr->count;
  283. if(netif_running(adapter->netdev)) {
  284. /* Try to get new resources before deleting old */
  285. @@ -950,8 +924,7 @@ iegbe_eeprom_test(struct iegbe_adapter *
  286. static irqreturn_t
  287. iegbe_test_intr(int irq,
  288. - void *data,
  289. - struct pt_regs *regs)
  290. + void *data)
  291. {
  292. struct net_device *netdev = (struct net_device *) data;
  293. struct iegbe_adapter *adapter = netdev_priv(netdev);
  294. @@ -973,7 +946,7 @@ iegbe_intr_test(struct iegbe_adapter *ad
  295. /* Hook up test interrupt handler just for this test */
  296. if(!request_irq(irq, &iegbe_test_intr, 0, netdev->name, netdev)) {
  297. shared_int = FALSE;
  298. - } else if(request_irq(irq, &iegbe_test_intr, SA_SHIRQ,
  299. + } else if(request_irq(irq, &iegbe_test_intr, IRQF_SHARED,
  300. netdev->name, netdev)){
  301. *data = 1;
  302. return -1;
  303. @@ -1393,7 +1366,7 @@ iegbe_set_phy_loopback(struct iegbe_adap
  304. * attempt this 10 times.
  305. */
  306. while(iegbe_nonintegrated_phy_loopback(adapter) &&
  307. - count++ < 0xa) { };
  308. + count++ < 0xa);
  309. if(count < 0xb) {
  310. return 0;
  311. }
  312. --- a/Embedded/src/GbE/iegbe.h
  313. +++ b/Embedded/src/GbE/iegbe.h
  314. @@ -1,7 +1,7 @@
  315. /*******************************************************************************
  316. GPL LICENSE SUMMARY
  317. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  318. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  319. This program is free software; you can redistribute it and/or modify
  320. it under the terms of version 2 of the GNU General Public License as
  321. @@ -21,7 +21,7 @@ GPL LICENSE SUMMARY
  322. Contact Information:
  323. Intel Corporation
  324. - version: Embedded.L.1.0.34
  325. + version: Embedded.Release.Patch.L.1.0.7-5
  326. Contact Information:
  327. @@ -127,9 +127,12 @@ struct iegbe_adapter;
  328. #define E1000_MIN_RXD 80
  329. #define E1000_MAX_82544_RXD 4096
  330. +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
  331. /* Supported Rx Buffer Sizes */
  332. #define E1000_RXBUFFER_128 128 /* Used for packet split */
  333. #define E1000_RXBUFFER_256 256 /* Used for packet split */
  334. +#define E1000_RXBUFFER_512 512
  335. +#define E1000_RXBUFFER_1024 1024
  336. #define E1000_RXBUFFER_2048 2048
  337. #define E1000_RXBUFFER_4096 4096
  338. #define E1000_RXBUFFER_8192 8192
  339. @@ -164,11 +167,9 @@ struct iegbe_adapter;
  340. #define E1000_MASTER_SLAVE iegbe_ms_hw_default
  341. #endif
  342. -#ifdef NETIF_F_HW_VLAN_TX
  343. -#define E1000_MNG_VLAN_NONE -1
  344. -#endif
  345. +#define E1000_MNG_VLAN_NONE (-1)
  346. /* Number of packet split data buffers (not including the header buffer) */
  347. -#define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1
  348. +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
  349. /* only works for sizes that are powers of 2 */
  350. #define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
  351. @@ -206,6 +207,7 @@ struct iegbe_tx_ring {
  352. spinlock_t tx_lock;
  353. uint16_t tdh;
  354. uint16_t tdt;
  355. + boolean_t last_tx_tso;
  356. uint64_t pkt;
  357. };
  358. @@ -228,6 +230,9 @@ struct iegbe_rx_ring {
  359. struct iegbe_ps_page *ps_page;
  360. struct iegbe_ps_page_dma *ps_page_dma;
  361. + /* cpu for rx queue */
  362. + int cpu;
  363. +
  364. uint16_t rdh;
  365. uint16_t rdt;
  366. uint64_t pkt;
  367. @@ -252,10 +257,8 @@ struct iegbe_adapter {
  368. struct timer_list tx_fifo_stall_timer;
  369. struct timer_list watchdog_timer;
  370. struct timer_list phy_info_timer;
  371. -#ifdef NETIF_F_HW_VLAN_TX
  372. struct vlan_group *vlgrp;
  373. uint16_t mng_vlan_id;
  374. -#endif
  375. uint32_t bd_number;
  376. uint32_t rx_buffer_len;
  377. uint32_t part_num;
  378. @@ -265,8 +268,18 @@ struct iegbe_adapter {
  379. uint16_t link_speed;
  380. uint16_t link_duplex;
  381. spinlock_t stats_lock;
  382. - atomic_t irq_sem;
  383. - struct work_struct tx_timeout_task;
  384. + spinlock_t tx_queue_lock;
  385. + unsigned int total_tx_bytes;
  386. + unsigned int total_tx_packets;
  387. + unsigned int total_rx_bytes;
  388. + unsigned int total_rx_packets;
  389. + /* Interrupt Throttle Rate */
  390. + uint32_t itr;
  391. + uint32_t itr_setting;
  392. + uint16_t tx_itr;
  393. + uint16_t rx_itr;
  394. +
  395. + struct work_struct reset_task;
  396. uint8_t fc_autoneg;
  397. #ifdef ETHTOOL_PHYS_ID
  398. @@ -276,9 +289,8 @@ struct iegbe_adapter {
  399. /* TX */
  400. struct iegbe_tx_ring *tx_ring; /* One per active queue */
  401. -#ifdef CONFIG_E1000_MQ
  402. - struct iegbe_tx_ring **cpu_tx_ring; /* per-cpu */
  403. -#endif
  404. + unsigned int restart_queue;
  405. + unsigned long tx_queue_len;
  406. uint32_t txd_cmd;
  407. uint32_t tx_int_delay;
  408. uint32_t tx_abs_int_delay;
  409. @@ -286,46 +298,33 @@ struct iegbe_adapter {
  410. uint64_t gotcl_old;
  411. uint64_t tpt_old;
  412. uint64_t colc_old;
  413. + uint32_t tx_timeout_count;
  414. uint32_t tx_fifo_head;
  415. uint32_t tx_head_addr;
  416. uint32_t tx_fifo_size;
  417. + uint8_t tx_timeout_factor;
  418. atomic_t tx_fifo_stall;
  419. boolean_t pcix_82544;
  420. boolean_t detect_tx_hung;
  421. /* RX */
  422. -#ifdef CONFIG_E1000_NAPI
  423. - boolean_t (*clean_rx) (struct iegbe_adapter *adapter,
  424. + bool (*clean_rx)(struct iegbe_adapter *adapter,
  425. struct iegbe_rx_ring *rx_ring,
  426. int *work_done, int work_to_do);
  427. -#else
  428. - boolean_t (*clean_rx) (struct iegbe_adapter *adapter,
  429. - struct iegbe_rx_ring *rx_ring);
  430. -#endif
  431. -
  432. -#ifdef IEGBE_GBE_WORKAROUND
  433. void (*alloc_rx_buf) (struct iegbe_adapter *adapter,
  434. - struct iegbe_rx_ring *rx_ring,
  435. - int cleaned_count);
  436. -#else
  437. - void (*alloc_rx_buf) (struct iegbe_adapter *adapter,
  438. - struct iegbe_rx_ring *rx_ring);
  439. -#endif
  440. -
  441. + struct iegbe_rx_ring *rx_ring,
  442. + int cleaned_count);
  443. struct iegbe_rx_ring *rx_ring; /* One per active queue */
  444. -#ifdef CONFIG_E1000_NAPI
  445. + struct napi_struct napi;
  446. struct net_device *polling_netdev; /* One per active queue */
  447. -#endif
  448. -#ifdef CONFIG_E1000_MQ
  449. - struct net_device **cpu_netdev; /* per-cpu */
  450. - struct call_async_data_struct rx_sched_call_data;
  451. - int cpu_for_queue[4];
  452. -#endif
  453. - int num_queues;
  454. +
  455. + int num_tx_queues;
  456. + int num_rx_queues;
  457. uint64_t hw_csum_err;
  458. uint64_t hw_csum_good;
  459. uint64_t rx_hdr_split;
  460. + uint32_t alloc_rx_buff_failed;
  461. uint32_t rx_int_delay;
  462. uint32_t rx_abs_int_delay;
  463. boolean_t rx_csum;
  464. @@ -334,8 +333,6 @@ struct iegbe_adapter {
  465. uint64_t gorcl_old;
  466. uint16_t rx_ps_bsize0;
  467. - /* Interrupt Throttle Rate */
  468. - uint32_t itr;
  469. /* OS defined structs */
  470. struct net_device *netdev;
  471. @@ -378,7 +375,21 @@ struct iegbe_adapter {
  472. #ifdef CONFIG_PCI_MSI
  473. boolean_t have_msi;
  474. #endif
  475. -#define IEGBE_INTD_DISABLE 0x0400
  476. + /* to not mess up cache alignment, always add to the bottom */
  477. + boolean_t tso_force;
  478. + boolean_t smart_power_down; /* phy smart power down */
  479. + boolean_t quad_port_a;
  480. + unsigned long flags;
  481. + uint32_t eeprom_wol;
  482. + int bars;
  483. + int need_ioport;
  484. };
  485. +
  486. +enum iegbe_state_t {
  487. + __E1000_TESTING,
  488. + __E1000_RESETTING,
  489. + __E1000_DOWN
  490. +};
  491. +#define IEGBE_INTD_DISABLE 0x0400
  492. #endif /* _IEGBE_H_ */
  493. --- a/Embedded/src/GbE/iegbe_hw.c
  494. +++ b/Embedded/src/GbE/iegbe_hw.c
  495. @@ -2,7 +2,7 @@
  496. GPL LICENSE SUMMARY
  497. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  498. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  499. This program is free software; you can redistribute it and/or modify
  500. it under the terms of version 2 of the GNU General Public License as
  501. @@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
  502. Contact Information:
  503. Intel Corporation
  504. - version: Embedded.L.1.0.34
  505. + version: Embedded.Release.Patch.L.1.0.7-5
  506. Contact Information:
  507. @@ -2115,7 +2115,7 @@ iegbe_config_mac_to_phy(struct iegbe_hw
  508. ret_val = iegbe_oem_set_trans_gasket(hw);
  509. if(ret_val){
  510. - return ret_val;
  511. + return ret_val;
  512. }
  513. ret_val = iegbe_oem_phy_is_full_duplex(
  514. hw, (int *) &is_FullDuplex);
  515. @@ -2164,7 +2164,7 @@ iegbe_config_mac_to_phy(struct iegbe_hw
  516. }
  517. /* Write the configured values back to the Device Control Reg. */
  518. E1000_WRITE_REG(hw, CTRL, ctrl);
  519. - return E1000_SUCCESS;
  520. + return ret_val;
  521. }
  522. /*****************************************************************************
  523. @@ -2684,7 +2684,7 @@ iegbe_check_for_link(struct iegbe_hw *hw
  524. if(hw->autoneg_failed == 0) {
  525. hw->autoneg_failed = 1;
  526. - return 0;
  527. + return E1000_SUCCESS;
  528. }
  529. DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\r\n");
  530. @@ -5875,7 +5875,7 @@ iegbe_get_cable_length(struct iegbe_hw *
  531. max_agc = cur_agc;
  532. }
  533. }
  534. -
  535. +
  536. /* This is to fix a Klockwork defect, that the array index might
  537. * be out of bounds. 113 is table size */
  538. if (cur_agc < 0x71){
  539. --- a/Embedded/src/GbE/iegbe_hw.h
  540. +++ b/Embedded/src/GbE/iegbe_hw.h
  541. @@ -2,7 +2,7 @@
  542. GPL LICENSE SUMMARY
  543. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  544. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  545. This program is free software; you can redistribute it and/or modify
  546. it under the terms of version 2 of the GNU General Public License as
  547. @@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
  548. Contact Information:
  549. Intel Corporation
  550. - version: Embedded.L.1.0.34
  551. + version: Embedded.Release.Patch.L.1.0.7-5
  552. Contact Information:
  553. @@ -299,7 +299,7 @@ void iegbe_set_media_type(struct iegbe_h
  554. /* Link Configuration */
  555. int32_t iegbe_setup_link(struct iegbe_hw *hw);
  556. int32_t iegbe_phy_setup_autoneg(struct iegbe_hw *hw);
  557. -void iegbe_config_collision_dist(struct iegbe_hw *hw);
  558. +void iegbe_config_collision_dist(struct iegbe_hw *hw);
  559. int32_t iegbe_config_fc_after_link_up(struct iegbe_hw *hw);
  560. int32_t iegbe_check_for_link(struct iegbe_hw *hw);
  561. int32_t iegbe_get_speed_and_duplex(struct iegbe_hw *hw, uint16_t * speed, uint16_t * duplex);
  562. @@ -588,14 +588,6 @@ uint8_t iegbe_arc_subsystem_valid(struct
  563. * o LSC = Link Status Change
  564. */
  565. -#ifdef IEGBE_GBE_WORKAROUND
  566. -#define IMS_ENABLE_MASK ( \
  567. - E1000_IMS_RXT0 | \
  568. - E1000_IMS_TXQE | \
  569. - E1000_IMS_RXDMT0 | \
  570. - E1000_IMS_RXSEQ | \
  571. - E1000_IMS_LSC)
  572. -#else
  573. #define IMS_ENABLE_MASK ( \
  574. E1000_IMS_RXT0 | \
  575. E1000_IMS_TXDW | \
  576. @@ -606,8 +598,7 @@ uint8_t iegbe_arc_subsystem_valid(struct
  577. E1000_ICR_PB | \
  578. E1000_ICR_CPP_TARGET | \
  579. E1000_ICR_CPP_MASTER | \
  580. - E1000_IMS_LSC)
  581. -#endif
  582. + E1000_ICR_LSC)
  583. /* Number of high/low register pairs in the RAR. The RAR (Receive Address
  584. * Registers) holds the directed and multicast addresses that we monitor. We
  585. @@ -923,10 +914,15 @@ struct iegbe_ffvt_entry {
  586. #define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
  587. #define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
  588. #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
  589. -// Register conflict, does not exist for ICP_xxxx hardware
  590. -// #define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
  591. #define E1000_CTRL_AUX 0x000E0 /* Aux Control -RW */
  592. +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
  593. #define E1000_RCTL 0x00100 /* RX Control - RW */
  594. +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
  595. +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
  596. +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */
  597. +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */
  598. +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */
  599. +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */
  600. #define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
  601. #define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
  602. #define E1000_RXCW 0x00180 /* RX Configuration Word - RO */
  603. @@ -1282,8 +1278,6 @@ struct iegbe_ffvt_entry {
  604. #define E1000_82542_FFMT E1000_FFMT
  605. #define E1000_82542_FFVT E1000_FFVT
  606. #define E1000_82542_HOST_IF E1000_HOST_IF
  607. -// Register conflict with ICP_xxxx hardware, no IAM
  608. -// #define E1000_82542_IAM E1000_IAM
  609. #define E1000_82542_EEMNGCTL E1000_EEMNGCTL
  610. #define E1000_82542_PSRCTL E1000_PSRCTL
  611. #define E1000_82542_RAID E1000_RAID
  612. @@ -1329,6 +1323,7 @@ struct iegbe_hw_stats {
  613. uint64_t algnerrc;
  614. uint64_t symerrs;
  615. uint64_t rxerrc;
  616. + uint64_t txerrc;
  617. uint64_t mpc;
  618. uint64_t scc;
  619. uint64_t ecol;
  620. @@ -1363,6 +1358,7 @@ struct iegbe_hw_stats {
  621. uint64_t ruc;
  622. uint64_t rfc;
  623. uint64_t roc;
  624. + uint64_t rlerrc;
  625. uint64_t rjc;
  626. uint64_t mgprc;
  627. uint64_t mgpdc;
  628. @@ -1392,19 +1388,6 @@ struct iegbe_hw_stats {
  629. uint64_t ictxqmtc;
  630. uint64_t icrxdmtc;
  631. uint64_t icrxoc;
  632. -#ifdef IEGBE_GBE_WORKAROUND
  633. - u64 txqec;
  634. - u64 tx_next_to_clean;
  635. - u64 tx_next_to_use;
  636. - u64 cc_gt_num_rx;
  637. - u64 tx_hnet;
  638. - u64 tx_hnentu;
  639. - u64 num_tx_queues;
  640. -
  641. - u64 num_rx_buf_alloc;
  642. - u64 rx_next_to_clean;
  643. - u64 rx_next_to_use;
  644. -#endif
  645. };
  646. /* Structure containing variables used by the shared code (iegbe_hw.c) */
  647. @@ -1484,6 +1467,7 @@ struct iegbe_hw {
  648. boolean_t ifs_params_forced;
  649. boolean_t in_ifs_mode;
  650. boolean_t mng_reg_access_disabled;
  651. + boolean_t rx_needs_kicking;
  652. boolean_t icp_xxxx_is_link_up;
  653. };
  654. @@ -2358,17 +2342,23 @@ struct iegbe_host_command_info {
  655. #define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF
  656. #define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00
  657. #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000
  658. +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
  659. +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
  660. /* PBA constants */
  661. +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */
  662. #define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */
  663. #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
  664. +#define E1000_PBA_20K 0x0014
  665. #define E1000_PBA_22K 0x0016
  666. #define E1000_PBA_24K 0x0018
  667. #define E1000_PBA_30K 0x001E
  668. #define E1000_PBA_32K 0x0020
  669. +#define E1000_PBA_34K 0x0022
  670. #define E1000_PBA_38K 0x0026
  671. #define E1000_PBA_40K 0x0028
  672. #define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */
  673. +#define E1000_PBS_16K E1000_PBA_16K
  674. /* Flow Control Constants */
  675. #define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
  676. @@ -2899,7 +2889,7 @@ struct iegbe_host_command_info {
  677. #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
  678. #define M88E1011_I_REV_4 0x04
  679. #define M88E1111_I_PHY_ID 0x01410CC2
  680. -#define M88E1141_E_PHY_ID 0x01410CD4
  681. +#define M88E1141_E_PHY_ID 0x01410CD0
  682. #define L1LXT971A_PHY_ID 0x001378E0
  683. /* Miscellaneous PHY bit definitions. */
  684. --- a/Embedded/src/GbE/iegbe_main.c
  685. +++ b/Embedded/src/GbE/iegbe_main.c
  686. @@ -2,7 +2,7 @@
  687. GPL LICENSE SUMMARY
  688. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  689. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  690. This program is free software; you can redistribute it and/or modify
  691. it under the terms of version 2 of the GNU General Public License as
  692. @@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
  693. Contact Information:
  694. Intel Corporation
  695. - version: Embedded.L.1.0.34
  696. + version: Embedded.Release.Patch.L.1.0.7-5
  697. Contact Information:
  698. @@ -42,103 +42,15 @@ GPL LICENSE SUMMARY
  699. #include "iegbe.h"
  700. #include "gcu_if.h"
  701. -
  702. -/* Change Log
  703. - * 6.0.58 4/20/05
  704. - * o iegbe_set_spd_dplx tests for compatible speed/duplex specification
  705. - * for fiber adapters
  706. - * 6.0.57 4/19/05
  707. - * o Added code to fix register test failure for devices >= 82571
  708. - *
  709. - * 6.0.52 3/15/05
  710. - * o Added stats_lock around iegbe_read_phy_reg commands to avoid concurrent
  711. - * calls, one from mii_ioctl and other from within update_stats while
  712. - * processing MIIREG ioctl.
  713. - *
  714. - * 6.1.2 4/13/05
  715. - * o Fixed ethtool diagnostics
  716. - * o Enabled flow control to take default eeprom settings
  717. - * o Added stats_lock around iegbe_read_phy_reg commands to avoid concurrent
  718. - * calls, one from mii_ioctl and other from within update_stats while processing
  719. - * MIIREG ioctl.
  720. - * 6.0.55 3/23/05
  721. - * o Support for MODULE_VERSION
  722. - * o Fix APM setting for 82544 based adapters
  723. - * 6.0.54 3/26/05
  724. - * o Added a timer to expire packets that were deferred for cleanup
  725. - * 6.0.52 3/15/05
  726. - * o Added stats_lock around iegbe_read_phy_reg commands to avoid concurrent
  727. - * calls, one from mii_ioctl and other from within update_stats while
  728. - * processing MIIREG ioctl.
  729. - * 6.0.47 3/2/05
  730. - * o Added enhanced functionality to the loopback diags to wrap the
  731. - * descriptor rings
  732. - * o Added manageability vlan filtering workaround.
  733. - *
  734. - * 6.0.44+ 2/15/05
  735. - * o Added code to handle raw packet based DHCP packets
  736. - * o Added code to fix the errata 10 buffer overflow issue
  737. - * o Sync up with WR01-05
  738. - * o applied Anton's patch to resolve tx hang in hardware
  739. - * o iegbe timeouts with early writeback patch
  740. - * o Removed Queensport IDs
  741. - * o fixed driver panic if MAC receives a bad large packets when packet
  742. - * split is enabled
  743. - * o Applied Andrew Mortons patch - iegbe stops working after resume
  744. - * 5.2.29 12/24/03
  745. - * o Bug fix: Endianess issue causing ethtool diags to fail on ppc.
  746. - * o Bug fix: Use pdev->irq instead of netdev->irq for MSI support.
  747. - * o Report driver message on user override of InterruptThrottleRate module
  748. - * parameter.
  749. - * o Bug fix: Change I/O address storage from uint32_t to unsigned long.
  750. - * o Feature: Added ethtool RINGPARAM support.
  751. - * o Feature: Added netpoll support.
  752. - * o Bug fix: Race between Tx queue and Tx clean fixed with a spin lock.
  753. - * o Bug fix: Allow 1000/Full setting for autoneg param for fiber connections.
  754. - * Jon D Mason [jonmason@us.ibm.com].
  755. - *
  756. - * 5.2.22 10/15/03
  757. - * o Bug fix: SERDES devices might be connected to a back-plane switch that
  758. - * doesn't support auto-neg, so add the capability to force 1000/Full.
  759. - * Also, since forcing 1000/Full, sample RxSynchronize bit to detect link
  760. - * state.
  761. - * o Bug fix: Flow control settings for hi/lo watermark didn't consider
  762. - * changes in the RX FIFO size, which could occur with Jumbo Frames or with
  763. - * the reduced FIFO in 82547.
  764. - * o Bug fix: Better propagation of error codes.
  765. - * [Janice Girouard (janiceg -a-t- us.ibm.com)]
  766. - * o Bug fix: hang under heavy Tx stress when running out of Tx descriptors;
  767. - * wasn't clearing context descriptor when backing out of send because of
  768. - * no-resource condition.
  769. - * o Bug fix: check netif_running in dev->poll so we don't have to hang in
  770. - * dev->close until all polls are finished. [Rober Olsson
  771. - * (robert.olsson@data.slu.se)].
  772. - * o Revert TxDescriptor ring size back to 256 since change to 1024 wasn't
  773. - * accepted into the kernel.
  774. - *
  775. - * 5.2.16 8/8/03
  776. - */
  777. -
  778. -#ifdef IEGBE_GBE_WORKAROUND
  779. -#define IEGBE_GBE_WORKAROUND_NUM_RX_DESCRIPTORS 1
  780. -#endif
  781. +#include <linux/ipv6.h>
  782. +#include <net/ip6_checksum.h>
  783. char iegbe_driver_name[] = "iegbe";
  784. char iegbe_driver_string[] = "Gigabit Ethernet Controller Driver";
  785. -#ifndef CONFIG_E1000_NAPI
  786. -#define DRIVERNAPI
  787. -#else
  788. -#define DRIVERNAPI "-NAPI"
  789. -#endif
  790. -#define DRV_VERSION "0.8.0"DRIVERNAPI
  791. +#define DRV_VERSION "1.0.0-K28-NAPI"
  792. char iegbe_driver_version[] = DRV_VERSION;
  793. -char iegbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation.";
  794. +char iegbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
  795. -#define E1000_FIFO_HDR 0x10
  796. -#define E1000_82547_PAD_LEN 0x3E0
  797. -#define MINIMUM_DHCP_PACKET_SIZE 282
  798. -#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
  799. -#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
  800. /* iegbe_pci_tbl - PCI Device ID Table
  801. *
  802. @@ -148,95 +60,48 @@ char iegbe_copyright[] = "Copyright (c)
  803. * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  804. */
  805. static struct pci_device_id iegbe_pci_tbl[] = {
  806. -/* INTEL_E1000_ETHERNET_DEVICE(0x1000), */
  807. -/* INTEL_E1000_ETHERNET_DEVICE(0x1001), */
  808. -/* INTEL_E1000_ETHERNET_DEVICE(0x1004), */
  809. -/* INTEL_E1000_ETHERNET_DEVICE(0x1008), */
  810. -/* INTEL_E1000_ETHERNET_DEVICE(0x1009), */
  811. -/* INTEL_E1000_ETHERNET_DEVICE(0x100C), */
  812. -/* INTEL_E1000_ETHERNET_DEVICE(0x100D), */
  813. -/* INTEL_E1000_ETHERNET_DEVICE(0x100E), */
  814. -/* INTEL_E1000_ETHERNET_DEVICE(0x100F), */
  815. -/* INTEL_E1000_ETHERNET_DEVICE(0x1010), */
  816. -/* INTEL_E1000_ETHERNET_DEVICE(0x1011), */
  817. -/* INTEL_E1000_ETHERNET_DEVICE(0x1012), */
  818. -/* INTEL_E1000_ETHERNET_DEVICE(0x1013), */
  819. -/* INTEL_E1000_ETHERNET_DEVICE(0x1014), */
  820. -/* INTEL_E1000_ETHERNET_DEVICE(0x1015), */
  821. -/* INTEL_E1000_ETHERNET_DEVICE(0x1016), */
  822. -/* INTEL_E1000_ETHERNET_DEVICE(0x1017), */
  823. -/* INTEL_E1000_ETHERNET_DEVICE(0x1018), */
  824. -/* INTEL_E1000_ETHERNET_DEVICE(0x1019), */
  825. -/* INTEL_E1000_ETHERNET_DEVICE(0x101A), */
  826. -/* INTEL_E1000_ETHERNET_DEVICE(0x101D), */
  827. -/* INTEL_E1000_ETHERNET_DEVICE(0x101E), */
  828. -/* INTEL_E1000_ETHERNET_DEVICE(0x1026), */
  829. -/* INTEL_E1000_ETHERNET_DEVICE(0x1027), */
  830. -/* INTEL_E1000_ETHERNET_DEVICE(0x1028), */
  831. -/* INTEL_E1000_ETHERNET_DEVICE(0x105E), */
  832. -/* INTEL_E1000_ETHERNET_DEVICE(0x105F), */
  833. -/* INTEL_E1000_ETHERNET_DEVICE(0x1060), */
  834. -/* INTEL_E1000_ETHERNET_DEVICE(0x1075), */
  835. -/* INTEL_E1000_ETHERNET_DEVICE(0x1076), */
  836. -/* INTEL_E1000_ETHERNET_DEVICE(0x1077), */
  837. -/* INTEL_E1000_ETHERNET_DEVICE(0x1078), */
  838. -/* INTEL_E1000_ETHERNET_DEVICE(0x1079), */
  839. -/* INTEL_E1000_ETHERNET_DEVICE(0x107A), */
  840. -/* INTEL_E1000_ETHERNET_DEVICE(0x107B), */
  841. -/* INTEL_E1000_ETHERNET_DEVICE(0x107C), */
  842. -/* INTEL_E1000_ETHERNET_DEVICE(0x107D), */
  843. -/* INTEL_E1000_ETHERNET_DEVICE(0x107E), */
  844. -/* INTEL_E1000_ETHERNET_DEVICE(0x107F), */
  845. -/* INTEL_E1000_ETHERNET_DEVICE(0x108A), */
  846. -/* INTEL_E1000_ETHERNET_DEVICE(0x108B), */
  847. -/* INTEL_E1000_ETHERNET_DEVICE(0x108C), */
  848. -/* INTEL_E1000_ETHERNET_DEVICE(0x109A), */
  849. - INTEL_E1000_ETHERNET_DEVICE(0x5040),
  850. - INTEL_E1000_ETHERNET_DEVICE(0x5041),
  851. - INTEL_E1000_ETHERNET_DEVICE(0x5042),
  852. - INTEL_E1000_ETHERNET_DEVICE(0x5043),
  853. - INTEL_E1000_ETHERNET_DEVICE(0x5044),
  854. - INTEL_E1000_ETHERNET_DEVICE(0x5045),
  855. - INTEL_E1000_ETHERNET_DEVICE(0x5046),
  856. - INTEL_E1000_ETHERNET_DEVICE(0x5047),
  857. - INTEL_E1000_ETHERNET_DEVICE(0x5048),
  858. - INTEL_E1000_ETHERNET_DEVICE(0x5049),
  859. - INTEL_E1000_ETHERNET_DEVICE(0x504A),
  860. - INTEL_E1000_ETHERNET_DEVICE(0x504B),
  861. - /* required last entry */
  862. + INTEL_E1000_ETHERNET_DEVICE(0x5040),
  863. + INTEL_E1000_ETHERNET_DEVICE(0x5041),
  864. + INTEL_E1000_ETHERNET_DEVICE(0x5042),
  865. + INTEL_E1000_ETHERNET_DEVICE(0x5043),
  866. + INTEL_E1000_ETHERNET_DEVICE(0x5044),
  867. + INTEL_E1000_ETHERNET_DEVICE(0x5045),
  868. + INTEL_E1000_ETHERNET_DEVICE(0x5046),
  869. + INTEL_E1000_ETHERNET_DEVICE(0x5047),
  870. + INTEL_E1000_ETHERNET_DEVICE(0x5048),
  871. + INTEL_E1000_ETHERNET_DEVICE(0x5049),
  872. + INTEL_E1000_ETHERNET_DEVICE(0x504A),
  873. + INTEL_E1000_ETHERNET_DEVICE(0x504B),
  874. + /* required last entry */
  875. {0,}
  876. };
  877. MODULE_DEVICE_TABLE(pci, iegbe_pci_tbl);
  878. -DEFINE_SPINLOCK(print_lock);
  879. int iegbe_up(struct iegbe_adapter *adapter);
  880. void iegbe_down(struct iegbe_adapter *adapter);
  881. +void iegbe_reinit_locked(struct iegbe_adapter *adapter);
  882. void iegbe_reset(struct iegbe_adapter *adapter);
  883. int iegbe_set_spd_dplx(struct iegbe_adapter *adapter, uint16_t spddplx);
  884. int iegbe_setup_all_tx_resources(struct iegbe_adapter *adapter);
  885. int iegbe_setup_all_rx_resources(struct iegbe_adapter *adapter);
  886. void iegbe_free_all_tx_resources(struct iegbe_adapter *adapter);
  887. void iegbe_free_all_rx_resources(struct iegbe_adapter *adapter);
  888. -int iegbe_setup_tx_resources(struct iegbe_adapter *adapter,
  889. +static int iegbe_setup_tx_resources(struct iegbe_adapter *adapter,
  890. struct iegbe_tx_ring *txdr);
  891. -int iegbe_setup_rx_resources(struct iegbe_adapter *adapter,
  892. +static int iegbe_setup_rx_resources(struct iegbe_adapter *adapter,
  893. struct iegbe_rx_ring *rxdr);
  894. -void iegbe_free_tx_resources(struct iegbe_adapter *adapter,
  895. +static void iegbe_free_tx_resources(struct iegbe_adapter *adapter,
  896. struct iegbe_tx_ring *tx_ring);
  897. -void iegbe_free_rx_resources(struct iegbe_adapter *adapter,
  898. +static void iegbe_free_rx_resources(struct iegbe_adapter *adapter,
  899. struct iegbe_rx_ring *rx_ring);
  900. void iegbe_update_stats(struct iegbe_adapter *adapter);
  901. -
  902. static int iegbe_init_module(void);
  903. static void iegbe_exit_module(void);
  904. static int iegbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  905. static void __devexit iegbe_remove(struct pci_dev *pdev);
  906. static int iegbe_alloc_queues(struct iegbe_adapter *adapter);
  907. -#ifdef CONFIG_E1000_MQ
  908. -static void iegbe_setup_queue_mapping(struct iegbe_adapter *adapter);
  909. -#endif
  910. static int iegbe_sw_init(struct iegbe_adapter *adapter);
  911. static int iegbe_open(struct net_device *netdev);
  912. static int iegbe_close(struct net_device *netdev);
  913. @@ -249,7 +114,8 @@ static void iegbe_clean_tx_ring(struct i
  914. struct iegbe_tx_ring *tx_ring);
  915. static void iegbe_clean_rx_ring(struct iegbe_adapter *adapter,
  916. struct iegbe_rx_ring *rx_ring);
  917. -static void iegbe_set_multi(struct net_device *netdev);
  918. +
  919. +static void iegbe_set_rx_mode(struct net_device *netdev);
  920. static void iegbe_update_phy_info(unsigned long data);
  921. static void iegbe_watchdog(unsigned long data);
  922. static void iegbe_82547_tx_fifo_stall(unsigned long data);
  923. @@ -257,66 +123,46 @@ static int iegbe_xmit_frame(struct sk_bu
  924. static struct net_device_stats * iegbe_get_stats(struct net_device *netdev);
  925. static int iegbe_change_mtu(struct net_device *netdev, int new_mtu);
  926. static int iegbe_set_mac(struct net_device *netdev, void *p);
  927. -static irqreturn_t iegbe_intr(int irq, void *data, struct pt_regs *regs);
  928. +static irqreturn_t iegbe_intr(int irq, void *data);
  929. -void iegbe_tasklet(unsigned long);
  930. +static irqreturn_t iegbe_intr_msi(int irq, void *data);
  931. -#ifndef IEGBE_GBE_WORKAROUND
  932. -static boolean_t iegbe_clean_tx_irq(struct iegbe_adapter *adapter,
  933. +static bool iegbe_clean_tx_irq(struct iegbe_adapter *adapter,
  934. struct iegbe_tx_ring *tx_ring);
  935. -#endif
  936. -
  937. -#ifdef CONFIG_E1000_NAPI
  938. -static int iegbe_clean(struct net_device *poll_dev, int *budget);
  939. -static boolean_t iegbe_clean_rx_irq(struct iegbe_adapter *adapter,
  940. +static int iegbe_clean(struct napi_struct *napi, int budget);
  941. +static bool iegbe_clean_rx_irq(struct iegbe_adapter *adapter,
  942. struct iegbe_rx_ring *rx_ring,
  943. int *work_done, int work_to_do);
  944. -static boolean_t iegbe_clean_rx_irq_ps(struct iegbe_adapter *adapter,
  945. +static bool iegbe_clean_rx_irq_ps(struct iegbe_adapter *adapter,
  946. struct iegbe_rx_ring *rx_ring,
  947. int *work_done, int work_to_do);
  948. -#else
  949. -static boolean_t iegbe_clean_rx_irq(struct iegbe_adapter *adapter,
  950. - struct iegbe_rx_ring *rx_ring);
  951. -static boolean_t iegbe_clean_rx_irq_ps(struct iegbe_adapter *adapter,
  952. - struct iegbe_rx_ring *rx_ring);
  953. -#endif
  954. -#ifdef IEGBE_GBE_WORKAROUND
  955. +
  956. static void iegbe_alloc_rx_buffers(struct iegbe_adapter *adapter,
  957. struct iegbe_rx_ring *rx_ring,
  958. int cleaned_count);
  959. static void iegbe_alloc_rx_buffers_ps(struct iegbe_adapter *adapter,
  960. struct iegbe_rx_ring *rx_ring,
  961. int cleaned_count);
  962. -#else
  963. -static void iegbe_alloc_rx_buffers(struct iegbe_adapter *adapter,
  964. - struct iegbe_rx_ring *rx_ring);
  965. -static void iegbe_alloc_rx_buffers_ps(struct iegbe_adapter *adapter,
  966. - struct iegbe_rx_ring *rx_ring);
  967. -#endif
  968. +
  969. static int iegbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
  970. -#ifdef SIOCGMIIPHY
  971. static int iegbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
  972. - int cmd);
  973. -#endif
  974. + int cmd);
  975. void set_ethtool_ops(struct net_device *netdev);
  976. extern int ethtool_ioctl(struct ifreq *ifr);
  977. static void iegbe_enter_82542_rst(struct iegbe_adapter *adapter);
  978. static void iegbe_leave_82542_rst(struct iegbe_adapter *adapter);
  979. static void iegbe_tx_timeout(struct net_device *dev);
  980. -static void iegbe_tx_timeout_task(struct net_device *dev);
  981. +static void iegbe_reset_task(struct work_struct *work);
  982. static void iegbe_smartspeed(struct iegbe_adapter *adapter);
  983. static inline int iegbe_82547_fifo_workaround(struct iegbe_adapter *adapter,
  984. - struct sk_buff *skb);
  985. + struct sk_buff *skb);
  986. -#ifdef NETIF_F_HW_VLAN_TX
  987. -static void iegbe_vlan_rx_register(struct net_device *netdev,
  988. - struct vlan_group *grp);
  989. +static void iegbe_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
  990. static void iegbe_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
  991. static void iegbe_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
  992. static void iegbe_restore_vlan(struct iegbe_adapter *adapter);
  993. -#endif
  994. static int iegbe_notify_reboot(struct notifier_block *,
  995. unsigned long event,
  996. @@ -331,15 +177,17 @@ static int iegbe_resume(struct pci_dev *
  997. static void iegbe_netpoll (struct net_device *netdev);
  998. #endif
  999. -#ifdef CONFIG_E1000_MQ
  1000. -/* for multiple Rx queues */
  1001. +#define COPYBREAK_DEFAULT 256
  1002. +static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
  1003. +module_param(copybreak, uint, 0644);
  1004. +MODULE_PARM_DESC(copybreak,
  1005. + "Maximum size of packet that is copied to a new buffer on receive");
  1006. void iegbe_rx_schedule(void *data);
  1007. -#endif
  1008. struct notifier_block iegbe_notifier_reboot = {
  1009. - .notifier_call = iegbe_notify_reboot,
  1010. - .next = NULL,
  1011. - .priority = 0
  1012. + .notifier_call = iegbe_notify_reboot,
  1013. + .next = NULL,
  1014. + .priority = 0
  1015. };
  1016. /* Exported from other modules */
  1017. @@ -347,14 +195,14 @@ struct notifier_block iegbe_notifier_reb
  1018. extern void iegbe_check_options(struct iegbe_adapter *adapter);
  1019. static struct pci_driver iegbe_driver = {
  1020. - .name = iegbe_driver_name,
  1021. - .id_table = iegbe_pci_tbl,
  1022. - .probe = iegbe_probe,
  1023. - .remove = __devexit_p(iegbe_remove),
  1024. - /* Power Managment Hooks */
  1025. + .name = iegbe_driver_name,
  1026. + .id_table = iegbe_pci_tbl,
  1027. + .probe = iegbe_probe,
  1028. + .remove = __devexit_p(iegbe_remove),
  1029. + /* Power Managment Hooks */
  1030. #ifdef CONFIG_PM
  1031. - .suspend = iegbe_suspend,
  1032. - .resume = iegbe_resume
  1033. + .suspend = iegbe_suspend,
  1034. + .resume = iegbe_resume
  1035. #endif
  1036. };
  1037. @@ -364,46 +212,17 @@ MODULE_LICENSE("GPL");
  1038. MODULE_VERSION(DRV_VERSION);
  1039. static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
  1040. -module_param(debug, int, 0);
  1041. +module_param(debug, int, 0x0);
  1042. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  1043. -static uint8_t gcu_suspend = 0;
  1044. -static uint8_t gcu_resume = 0;
  1045. +static uint8_t gcu_suspend = 0x0;
  1046. +static uint8_t gcu_resume = 0x0;
  1047. struct pci_dev *gcu = NULL;
  1048. -unsigned long tasklet_data;
  1049. -DECLARE_TASKLET(iegbe_reset_tasklet, iegbe_tasklet, (unsigned long) &tasklet_data);
  1050. /**
  1051. * iegbe_iegbe_tasklet -*
  1052. **/
  1053. -void iegbe_tasklet(unsigned long data)
  1054. -{
  1055. - char* err_msg = "TEST";
  1056. - uint32_t *icr = (uint32_t*) data;
  1057. - uint32_t gbe = *icr & 0x000000FF;
  1058. - if( *icr & E1000_ICR_RX_DESC_FIFO_PAR) { /* 21 */
  1059. - err_msg = "DMA Transmit Descriptor 2-bit ECC Error!";
  1060. - }
  1061. - if( *icr & E1000_ICR_TX_DESC_FIFO_PAR) { /* 20 */
  1062. - err_msg = "DMA Receive Descriptor 2-bit ECC Error!";
  1063. - }
  1064. - if( *icr & E1000_ICR_PB) { /* 23 */
  1065. - err_msg = "DMA Packet Buffer 2-bit ECC Error!";
  1066. - }
  1067. - if( *icr & E1000_ICR_CPP_TARGET) { /* 27 */
  1068. - err_msg = "Statistic Register ECC Error!";
  1069. - }
  1070. - if( *icr & E1000_ICR_CPP_MASTER) {
  1071. - err_msg = "CPP Error!";
  1072. - }
  1073. - spin_lock(&print_lock);
  1074. - printk("IEGBE%d: System Reset due to: %s\n", gbe, err_msg);
  1075. - dump_stack();
  1076. - spin_unlock(&print_lock);
  1077. - panic(err_msg);
  1078. - return;
  1079. -}
  1080. /**
  1081. * iegbe_init_module - Driver Registration Routine
  1082. *
  1083. @@ -411,21 +230,24 @@ void iegbe_tasklet(unsigned long data)
  1084. * loaded. All it does is register with the PCI subsystem.
  1085. **/
  1086. -static int __init
  1087. -iegbe_init_module(void)
  1088. +static int __init iegbe_init_module(void)
  1089. {
  1090. - int ret;
  1091. + int ret;
  1092. printk(KERN_INFO "%s - version %s\n",
  1093. - iegbe_driver_string, iegbe_driver_version);
  1094. + iegbe_driver_string, iegbe_driver_version);
  1095. - printk(KERN_INFO "%s\n", iegbe_copyright);
  1096. + printk(KERN_INFO "%s\n", iegbe_copyright);
  1097. - ret = pci_module_init(&iegbe_driver);
  1098. - if(ret >= 0) {
  1099. - register_reboot_notifier(&iegbe_notifier_reboot);
  1100. - }
  1101. - return ret;
  1102. + ret = pci_register_driver(&iegbe_driver);
  1103. + if (copybreak != COPYBREAK_DEFAULT) {
  1104. + if (copybreak == 0)
  1105. + printk(KERN_INFO "iegbe: copybreak disabled\n");
  1106. + else
  1107. + printk(KERN_INFO "iegbe: copybreak enabled for "
  1108. + "packets <= %u bytes\n", copybreak);
  1109. + }
  1110. + return ret;
  1111. }
  1112. module_init(iegbe_init_module);
  1113. @@ -437,29 +259,51 @@ module_init(iegbe_init_module);
  1114. * from memory.
  1115. **/
  1116. -static void __exit
  1117. -iegbe_exit_module(void)
  1118. +static void __exit iegbe_exit_module(void)
  1119. {
  1120. -
  1121. - unregister_reboot_notifier(&iegbe_notifier_reboot);
  1122. - pci_unregister_driver(&iegbe_driver);
  1123. + pci_unregister_driver(&iegbe_driver);
  1124. }
  1125. module_exit(iegbe_exit_module);
  1126. +static int iegbe_request_irq(struct iegbe_adapter *adapter)
  1127. +{
  1128. + struct net_device *netdev = adapter->netdev;
  1129. + irq_handler_t handler = iegbe_intr;
  1130. + int irq_flags = IRQF_SHARED;
  1131. + int err;
  1132. + adapter->have_msi = !pci_enable_msi(adapter->pdev);
  1133. + if (adapter->have_msi) {
  1134. + handler = iegbe_intr_msi;
  1135. + irq_flags = 0;
  1136. + }
  1137. + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
  1138. + netdev);
  1139. + if (err) {
  1140. + if (adapter->have_msi)
  1141. + pci_disable_msi(adapter->pdev);
  1142. + DPRINTK(PROBE, ERR,
  1143. + "Unable to allocate interrupt Error: %d\n", err);
  1144. + }
  1145. + return err;
  1146. +}
  1147. +static void iegbe_free_irq(struct iegbe_adapter *adapter)
  1148. +{
  1149. + struct net_device *netdev = adapter->netdev;
  1150. + free_irq(adapter->pdev->irq, netdev);
  1151. + if (adapter->have_msi)
  1152. + pci_disable_msi(adapter->pdev);
  1153. +}
  1154. /**
  1155. * iegbe_irq_disable - Mask off interrupt generation on the NIC
  1156. * @adapter: board private structure
  1157. **/
  1158. -static inline void
  1159. -iegbe_irq_disable(struct iegbe_adapter *adapter)
  1160. +static void iegbe_irq_disable(struct iegbe_adapter *adapter)
  1161. {
  1162. -
  1163. - atomic_inc(&adapter->irq_sem);
  1164. - E1000_WRITE_REG(&adapter->hw, IMC, ~0);
  1165. - E1000_WRITE_FLUSH(&adapter->hw);
  1166. - synchronize_irq(adapter->pdev->irq);
  1167. + E1000_WRITE_REG(&adapter->hw, IMC, ~0);
  1168. + E1000_WRITE_FLUSH(&adapter->hw);
  1169. + synchronize_irq(adapter->pdev->irq);
  1170. }
  1171. /**
  1172. @@ -470,244 +314,414 @@ iegbe_irq_disable(struct iegbe_adapter *
  1173. static inline void
  1174. iegbe_irq_enable(struct iegbe_adapter *adapter)
  1175. {
  1176. -
  1177. - if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
  1178. - E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
  1179. - E1000_WRITE_FLUSH(&adapter->hw);
  1180. - }
  1181. + E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
  1182. + E1000_WRITE_FLUSH(&adapter->hw);
  1183. }
  1184. -#ifdef NETIF_F_HW_VLAN_TX
  1185. -void
  1186. -iegbe_update_mng_vlan(struct iegbe_adapter *adapter)
  1187. -{
  1188. - struct net_device *netdev = adapter->netdev;
  1189. - uint16_t vid = adapter->hw.mng_cookie.vlan_id;
  1190. - uint16_t old_vid = adapter->mng_vlan_id;
  1191. - if(adapter->vlgrp) {
  1192. - if(!adapter->vlgrp->vlan_devices[vid]) {
  1193. - if(adapter->hw.mng_cookie.status &
  1194. - E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
  1195. - iegbe_vlan_rx_add_vid(netdev, vid);
  1196. - adapter->mng_vlan_id = vid;
  1197. - } else {
  1198. - adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  1199. - }
  1200. - if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
  1201. - (vid != old_vid) &&
  1202. - !adapter->vlgrp->vlan_devices[old_vid]) {
  1203. - iegbe_vlan_rx_kill_vid(netdev, old_vid);
  1204. - }
  1205. - }
  1206. -}
  1207. +static void iegbe_update_mng_vlan(struct iegbe_adapter *adapter)
  1208. +{
  1209. + struct iegbe_hw *hw = &adapter->hw;
  1210. + struct net_device *netdev = adapter->netdev;
  1211. + u16 vid = hw->mng_cookie.vlan_id;
  1212. + u16 old_vid = adapter->mng_vlan_id;
  1213. + if (adapter->vlgrp) {
  1214. + if (!vlan_group_get_device(adapter->vlgrp, vid)) {
  1215. + if (hw->mng_cookie.status &
  1216. + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
  1217. + iegbe_vlan_rx_add_vid(netdev, vid);
  1218. + adapter->mng_vlan_id = vid;
  1219. + } else
  1220. + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  1221. +
  1222. + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
  1223. + (vid != old_vid) &&
  1224. + !vlan_group_get_device(adapter->vlgrp, old_vid))
  1225. + iegbe_vlan_rx_kill_vid(netdev, old_vid);
  1226. + } else
  1227. + adapter->mng_vlan_id = vid;
  1228. + }
  1229. }
  1230. -#endif
  1231. -int
  1232. -iegbe_up(struct iegbe_adapter *adapter)
  1233. +/**
  1234. + * iegbe_configure - configure the hardware for RX and TX
  1235. + * @adapter = private board structure
  1236. + **/
  1237. +static void iegbe_configure(struct iegbe_adapter *adapter)
  1238. {
  1239. struct net_device *netdev = adapter->netdev;
  1240. - int i, err;
  1241. - uint16_t pci_cmd;
  1242. -
  1243. - /* hardware has been reset, we need to reload some things */
  1244. -
  1245. - /* Reset the PHY if it was previously powered down */
  1246. - if(adapter->hw.media_type == iegbe_media_type_copper
  1247. - || (adapter->hw.media_type == iegbe_media_type_oem
  1248. - && iegbe_oem_phy_is_copper(&adapter->hw))) {
  1249. - uint16_t mii_reg;
  1250. - iegbe_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
  1251. - if(mii_reg & MII_CR_POWER_DOWN){
  1252. - iegbe_phy_reset(&adapter->hw);
  1253. - }
  1254. - }
  1255. + int i;
  1256. - iegbe_set_multi(netdev);
  1257. + iegbe_set_rx_mode(netdev);
  1258. -#ifdef NETIF_F_HW_VLAN_TX
  1259. iegbe_restore_vlan(adapter);
  1260. -#endif
  1261. iegbe_configure_tx(adapter);
  1262. iegbe_setup_rctl(adapter);
  1263. iegbe_configure_rx(adapter);
  1264. + /* call E1000_DESC_UNUSED which always leaves
  1265. + * at least 1 descriptor unused to make sure
  1266. + * next_to_use != next_to_clean */
  1267. + for (i = 0; i < adapter->num_rx_queues; i++) {
  1268. + struct iegbe_rx_ring *ring = &adapter->rx_ring[i];
  1269. + adapter->alloc_rx_buf(adapter, ring,
  1270. + E1000_DESC_UNUSED(ring));
  1271. + }
  1272. -#ifdef IEGBE_GBE_WORKAROUND
  1273. - for (i = 0; i < adapter->num_queues; i++)
  1274. - adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i],
  1275. - IEGBE_GBE_WORKAROUND_NUM_RX_DESCRIPTORS + 1);
  1276. -#else
  1277. - for (i = 0; i < adapter->num_queues; i++)
  1278. - adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]);
  1279. -#endif
  1280. + adapter->tx_queue_len = netdev->tx_queue_len;
  1281. +}
  1282. -#ifdef CONFIG_PCI_MSI
  1283. - if(adapter->hw.mac_type > iegbe_82547_rev_2
  1284. - || adapter->hw.mac_type == iegbe_icp_xxxx) {
  1285. - adapter->have_msi = TRUE;
  1286. - if((err = pci_enable_msi(adapter->pdev))) {
  1287. - DPRINTK(PROBE, ERR,
  1288. - "Unable to allocate MSI interrupt Error: %d\n", err);
  1289. - adapter->have_msi = FALSE;
  1290. - }
  1291. - }
  1292. - pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
  1293. - pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd | IEGBE_INTD_DISABLE);
  1294. +int iegbe_up(struct iegbe_adapter *adapter)
  1295. +{
  1296. + /* hardware has been reset, we need to reload some things */
  1297. + iegbe_configure(adapter);
  1298. -#endif
  1299. - if((err = request_irq(adapter->pdev->irq, &iegbe_intr,
  1300. - SA_SHIRQ | SA_SAMPLE_RANDOM,
  1301. - netdev->name, netdev))) {
  1302. - DPRINTK(PROBE, ERR,
  1303. - "Unable to allocate interrupt Error: %d\n", err);
  1304. - return err;
  1305. - }
  1306. + clear_bit(__E1000_DOWN, &adapter->flags);
  1307. - mod_timer(&adapter->watchdog_timer, jiffies);
  1308. + napi_enable(&adapter->napi);
  1309. -#ifdef CONFIG_E1000_NAPI
  1310. - netif_poll_enable(netdev);
  1311. -#endif
  1312. iegbe_irq_enable(adapter);
  1313. + adapter->hw.get_link_status = 0x1;
  1314. return 0;
  1315. }
  1316. -void
  1317. -iegbe_down(struct iegbe_adapter *adapter)
  1318. -{
  1319. - struct net_device *netdev = adapter->netdev;
  1320. -
  1321. - iegbe_irq_disable(adapter);
  1322. -#ifdef CONFIG_E1000_MQ
  1323. - while (atomic_read(&adapter->rx_sched_call_data.count) != 0) { };
  1324. -#endif
  1325. - free_irq(adapter->pdev->irq, netdev);
  1326. -#ifdef CONFIG_PCI_MSI
  1327. - if((adapter->hw.mac_type > iegbe_82547_rev_2
  1328. - || adapter->hw.mac_type == iegbe_icp_xxxx)
  1329. - && adapter->have_msi == TRUE) {
  1330. - pci_disable_msi(adapter->pdev);
  1331. - }
  1332. -#endif
  1333. - del_timer_sync(&adapter->tx_fifo_stall_timer);
  1334. - del_timer_sync(&adapter->watchdog_timer);
  1335. - del_timer_sync(&adapter->phy_info_timer);
  1336. +/**
  1337. + * iegbe_power_up_phy - restore link in case the phy was powered down
  1338. + * @adapter: address of board private structure
  1339. + *
  1340. + * The phy may be powered down to save power and turn off link when the
  1341. + * driver is unloaded and wake on lan is not enabled (among others)
  1342. + * *** this routine MUST be followed by a call to iegbe_reset ***
  1343. + *
  1344. + **/
  1345. -#ifdef CONFIG_E1000_NAPI
  1346. - netif_poll_disable(netdev);
  1347. -#endif
  1348. - adapter->link_speed = 0;
  1349. - adapter->link_duplex = 0;
  1350. - netif_carrier_off(netdev);
  1351. - netif_stop_queue(netdev);
  1352. +void iegbe_power_up_phy(struct iegbe_adapter *adapter)
  1353. +{
  1354. + struct iegbe_hw *hw = &adapter->hw;
  1355. + u16 mii_reg = 0;
  1356. - iegbe_reset(adapter);
  1357. - iegbe_clean_all_tx_rings(adapter);
  1358. - iegbe_clean_all_rx_rings(adapter);
  1359. + /* Just clear the power down bit to wake the phy back up */
  1360. + if (hw->media_type == iegbe_media_type_copper) {
  1361. + /* according to the manual, the phy will retain its
  1362. + * settings across a power-down/up cycle */
  1363. + iegbe_read_phy_reg(hw, PHY_CTRL, &mii_reg);
  1364. + mii_reg &= ~MII_CR_POWER_DOWN;
  1365. + iegbe_write_phy_reg(hw, PHY_CTRL, mii_reg);
  1366. + }
  1367. +}
  1368. - /* If WoL is not enabled and management mode is not IAMT
  1369. - * or if WoL is not enabled and OEM PHY is copper based,
  1370. - * power down the PHY so no link is implied when interface is down */
  1371. - if(!adapter->wol
  1372. - && ((adapter->hw.mac_type >= iegbe_82540
  1373. - && adapter->hw.media_type == iegbe_media_type_copper
  1374. - && !iegbe_check_mng_mode(&adapter->hw)
  1375. - && !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN))
  1376. - || (adapter->hw.media_type == iegbe_media_type_oem
  1377. - && iegbe_oem_phy_is_copper(&adapter->hw)))){
  1378. +static void iegbe_power_down_phy(struct iegbe_adapter *adapter)
  1379. +{
  1380. + struct iegbe_hw *hw = &adapter->hw;
  1381. - uint16_t mii_reg;
  1382. - iegbe_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
  1383. + /* Power down the PHY so no link is implied when interface is down *
  1384. + * The PHY cannot be powered down if any of the following is true *
  1385. + * (a) WoL is enabled
  1386. + * (b) AMT is active
  1387. + * (c) SoL/IDER session is active */
  1388. + if (!adapter->wol && hw->mac_type >= iegbe_82540 &&
  1389. + hw->media_type == iegbe_media_type_copper) {
  1390. + u16 mii_reg = 0;
  1391. +
  1392. + switch (hw->mac_type) {
  1393. + case iegbe_82540:
  1394. + case iegbe_82545:
  1395. + case iegbe_82545_rev_3:
  1396. + case iegbe_82546:
  1397. + case iegbe_82546_rev_3:
  1398. + case iegbe_82541:
  1399. + case iegbe_82541_rev_2:
  1400. + case iegbe_82547:
  1401. + case iegbe_82547_rev_2:
  1402. + if (E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)
  1403. + goto out;
  1404. + break;
  1405. + case iegbe_82571:
  1406. + case iegbe_82572:
  1407. + case iegbe_82573:
  1408. + if (iegbe_check_mng_mode(hw) ||
  1409. + iegbe_check_phy_reset_block(hw))
  1410. + goto out;
  1411. + break;
  1412. + default:
  1413. + goto out;
  1414. + }
  1415. + iegbe_read_phy_reg(hw, PHY_CTRL, &mii_reg);
  1416. mii_reg |= MII_CR_POWER_DOWN;
  1417. - iegbe_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
  1418. + iegbe_write_phy_reg(hw, PHY_CTRL, mii_reg);
  1419. mdelay(1);
  1420. }
  1421. +out:
  1422. + return;
  1423. }
  1424. -void
  1425. -iegbe_reset(struct iegbe_adapter *adapter)
  1426. +void iegbe_down(struct iegbe_adapter *adapter)
  1427. {
  1428. - struct net_device *netdev = adapter->netdev;
  1429. - uint32_t pba, manc;
  1430. - uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
  1431. - uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF;
  1432. + struct net_device *netdev = adapter->netdev;
  1433. + /* signal that we're down so the interrupt handler does not
  1434. + * reschedule our watchdog timer */
  1435. + set_bit(__E1000_DOWN, &adapter->flags);
  1436. - /* Repartition Pba for greater than 9k mtu
  1437. - * To take effect CTRL.RST is required.
  1438. - */
  1439. + napi_disable(&adapter->napi);
  1440. - switch (adapter->hw.mac_type) {
  1441. - case iegbe_82547:
  1442. - case iegbe_82547_rev_2:
  1443. - pba = E1000_PBA_30K;
  1444. - break;
  1445. - case iegbe_82571:
  1446. - case iegbe_82572:
  1447. - pba = E1000_PBA_38K;
  1448. - break;
  1449. - case iegbe_82573:
  1450. - pba = E1000_PBA_12K;
  1451. + iegbe_irq_disable(adapter);
  1452. +
  1453. + del_timer_sync(&adapter->tx_fifo_stall_timer);
  1454. + del_timer_sync(&adapter->watchdog_timer);
  1455. + del_timer_sync(&adapter->phy_info_timer);
  1456. +
  1457. + netdev->tx_queue_len = adapter->tx_queue_len;
  1458. + adapter->link_speed = 0;
  1459. + adapter->link_duplex = 0;
  1460. + netif_carrier_off(netdev);
  1461. + netif_stop_queue(netdev);
  1462. +
  1463. + iegbe_reset(adapter);
  1464. + iegbe_clean_all_tx_rings(adapter);
  1465. + iegbe_clean_all_rx_rings(adapter);
  1466. +}
  1467. +void iegbe_reinit_locked(struct iegbe_adapter *adapter)
  1468. +{
  1469. + WARN_ON(in_interrupt());
  1470. + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
  1471. + msleep(1);
  1472. + iegbe_down(adapter);
  1473. + iegbe_up(adapter);
  1474. + clear_bit(__E1000_RESETTING, &adapter->flags);
  1475. +}
  1476. +
  1477. +void iegbe_reset(struct iegbe_adapter *adapter)
  1478. +{
  1479. + struct iegbe_hw *hw = &adapter->hw;
  1480. + u32 pba = 0, tx_space, min_tx_space, min_rx_space;
  1481. + u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
  1482. + bool legacy_pba_adjust = false;
  1483. +
  1484. + /* Repartition Pba for greater than 9k mtu
  1485. + * To take effect CTRL.RST is required.
  1486. + */
  1487. +
  1488. + switch (hw->mac_type) {
  1489. + case iegbe_82542_rev2_0:
  1490. + case iegbe_82542_rev2_1:
  1491. + case iegbe_82543:
  1492. + case iegbe_82544:
  1493. + case iegbe_82540:
  1494. + case iegbe_82541:
  1495. + case iegbe_82541_rev_2:
  1496. + case iegbe_icp_xxxx:
  1497. + legacy_pba_adjust = true;
  1498. + pba = E1000_PBA_48K;
  1499. break;
  1500. - default:
  1501. + case iegbe_82545:
  1502. + case iegbe_82545_rev_3:
  1503. + case iegbe_82546:
  1504. + case iegbe_82546_rev_3:
  1505. pba = E1000_PBA_48K;
  1506. break;
  1507. - }
  1508. + case iegbe_82547:
  1509. + case iegbe_82573:
  1510. + case iegbe_82547_rev_2:
  1511. + legacy_pba_adjust = true;
  1512. + pba = E1000_PBA_30K;
  1513. + break;
  1514. + case iegbe_82571:
  1515. + case iegbe_82572:
  1516. + case iegbe_undefined:
  1517. + case iegbe_num_macs:
  1518. + break;
  1519. + }
  1520. +
  1521. + if (legacy_pba_adjust) {
  1522. + if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
  1523. + pba -= 8; /* allocate more FIFO for Tx */
  1524. + /* send an XOFF when there is enough space in the
  1525. + * Rx FIFO to hold one extra full size Rx packet
  1526. + */
  1527. - if((adapter->hw.mac_type != iegbe_82573) &&
  1528. - (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) {
  1529. - pba -= 0x8; /* allocate more FIFO for Tx */
  1530. - /* send an XOFF when there is enough space in the
  1531. - * Rx FIFO to hold one extra full size Rx packet
  1532. - */
  1533. - fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE +
  1534. - ETHERNET_FCS_SIZE + 0x1;
  1535. - fc_low_water_mark = fc_high_water_mark + 0x8;
  1536. - }
  1537. + if (hw->mac_type == iegbe_82547) {
  1538. + adapter->tx_fifo_head = 0;
  1539. + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
  1540. + adapter->tx_fifo_size =
  1541. + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
  1542. + atomic_set(&adapter->tx_fifo_stall, 0);
  1543. + }
  1544. + } else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
  1545. + E1000_WRITE_REG(&adapter->hw, PBA, pba);
  1546. +
  1547. + /* To maintain wire speed transmits, the Tx FIFO should be
  1548. + * large enough to accomodate two full transmit packets,
  1549. + * rounded up to the next 1KB and expressed in KB. Likewise,
  1550. + * the Rx FIFO should be large enough to accomodate at least
  1551. + * one full receive packet and is similarly rounded up and
  1552. + * expressed in KB. */
  1553. + pba = E1000_READ_REG(&adapter->hw, PBA);
  1554. + /* upper 16 bits has Tx packet buffer allocation size in KB */
  1555. + tx_space = pba >> 16;
  1556. + /* lower 16 bits has Rx packet buffer allocation size in KB */
  1557. + pba &= 0xffff;
  1558. + /* don't include ethernet FCS because hardware appends/strips */
  1559. + min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE +
  1560. + VLAN_TAG_SIZE;
  1561. + min_tx_space = min_rx_space;
  1562. + min_tx_space *= 2;
  1563. + min_tx_space = ALIGN(min_tx_space, 1024);
  1564. + min_tx_space >>= 10;
  1565. + min_rx_space = ALIGN(min_rx_space, 1024);
  1566. + min_rx_space >>= 10;
  1567. +
  1568. + /* If current Tx allocation is less than the min Tx FIFO size,
  1569. + * and the min Tx FIFO size is less than the current Rx FIFO
  1570. + * allocation, take space away from current Rx allocation */
  1571. + if (tx_space < min_tx_space &&
  1572. + ((min_tx_space - tx_space) < pba)) {
  1573. + pba = pba - (min_tx_space - tx_space);
  1574. +
  1575. + /* PCI/PCIx hardware has PBA alignment constraints */
  1576. + switch (hw->mac_type) {
  1577. + case iegbe_82545 ... iegbe_82546_rev_3:
  1578. + pba &= ~(E1000_PBA_8K - 1);
  1579. + break;
  1580. + default:
  1581. + break;
  1582. + }
  1583. - if(adapter->hw.mac_type == iegbe_82547) {
  1584. - adapter->tx_fifo_head = 0;
  1585. - adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
  1586. - adapter->tx_fifo_size =
  1587. - (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
  1588. - atomic_set(&adapter->tx_fifo_stall, 0);
  1589. + /* if short on rx space, rx wins and must trump tx
  1590. + * adjustment or use Early Receive if available */
  1591. + if (pba < min_rx_space) {
  1592. + switch (hw->mac_type) {
  1593. + case iegbe_82573:
  1594. + /* ERT enabled in iegbe_configure_rx */
  1595. + break;
  1596. + default:
  1597. + pba = min_rx_space;
  1598. + break;
  1599. + }
  1600. + }
  1601. + }
  1602. }
  1603. E1000_WRITE_REG(&adapter->hw, PBA, pba);
  1604. /* flow control settings */
  1605. - adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) -
  1606. - fc_high_water_mark;
  1607. - adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) -
  1608. - fc_low_water_mark;
  1609. - adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
  1610. - adapter->hw.fc_send_xon = 1;
  1611. - adapter->hw.fc = adapter->hw.original_fc;
  1612. + /* Set the FC high water mark to 90% of the FIFO size.
  1613. + * Required to clear last 3 LSB */
  1614. + fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
  1615. + /* We can't use 90% on small FIFOs because the remainder
  1616. + * would be less than 1 full frame. In this case, we size
  1617. + * it to allow at least a full frame above the high water
  1618. + * mark. */
  1619. + if (pba < E1000_PBA_16K)
  1620. + fc_high_water_mark = (pba * 1024) - 1600;
  1621. +
  1622. + hw->fc_high_water = fc_high_water_mark;
  1623. + hw->fc_low_water = fc_high_water_mark - 8;
  1624. + hw->fc_pause_time = E1000_FC_PAUSE_TIME;
  1625. + hw->fc_send_xon = 1;
  1626. + hw->fc = hw->original_fc;
  1627. /* Allow time for pending master requests to run */
  1628. - iegbe_reset_hw(&adapter->hw);
  1629. - if(adapter->hw.mac_type >= iegbe_82544){
  1630. + iegbe_reset_hw(hw);
  1631. + if (hw->mac_type >= iegbe_82544)
  1632. E1000_WRITE_REG(&adapter->hw, WUC, 0);
  1633. - }
  1634. - if(iegbe_init_hw(&adapter->hw)) {
  1635. +
  1636. + if (iegbe_init_hw(hw))
  1637. DPRINTK(PROBE, ERR, "Hardware Error\n");
  1638. - }
  1639. -#ifdef NETIF_F_HW_VLAN_TX
  1640. iegbe_update_mng_vlan(adapter);
  1641. -#endif
  1642. +
  1643. + /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
  1644. + if (hw->mac_type >= iegbe_82544 &&
  1645. + hw->mac_type <= iegbe_82547_rev_2 &&
  1646. + hw->autoneg == 1 &&
  1647. + hw->autoneg_advertised == ADVERTISE_1000_FULL) {
  1648. + u32 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
  1649. + /* clear phy power management bit if we are in gig only mode,
  1650. + * which if enabled will attempt negotiation to 100Mb, which
  1651. + * can cause a loss of link at power off or driver unload */
  1652. + ctrl &= ~E1000_CTRL_SWDPIN3;
  1653. + E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
  1654. + }
  1655. +
  1656. /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
  1657. E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
  1658. - iegbe_reset_adaptive(&adapter->hw);
  1659. - iegbe_phy_get_info(&adapter->hw, &adapter->phy_info);
  1660. - if(adapter->en_mng_pt) {
  1661. - manc = E1000_READ_REG(&adapter->hw, MANC);
  1662. - manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
  1663. - E1000_WRITE_REG(&adapter->hw, MANC, manc);
  1664. + iegbe_reset_adaptive(hw);
  1665. + iegbe_phy_get_info(hw, &adapter->phy_info);
  1666. +
  1667. + if (!adapter->smart_power_down &&
  1668. + (hw->mac_type == iegbe_82571 ||
  1669. + hw->mac_type == iegbe_82572)) {
  1670. + u16 phy_data = 0;
  1671. + /* speed up time to link by disabling smart power down, ignore
  1672. + * the return value of this function because there is nothing
  1673. + * different we would do if it failed */
  1674. + iegbe_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
  1675. + &phy_data);
  1676. + phy_data &= ~IGP02E1000_PM_SPD;
  1677. + iegbe_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
  1678. + phy_data);
  1679. + }
  1680. +
  1681. +}
  1682. +
  1683. +/**
  1684. + * Dump the eeprom for users having checksum issues
  1685. + **/
  1686. +static void iegbe_dump_eeprom(struct iegbe_adapter *adapter)
  1687. +{
  1688. + struct net_device *netdev = adapter->netdev;
  1689. + struct ethtool_eeprom eeprom;
  1690. + const struct ethtool_ops *ops = netdev->ethtool_ops;
  1691. + u8 *data;
  1692. + int i;
  1693. + u16 csum_old, csum_new = 0;
  1694. +
  1695. + eeprom.len = ops->get_eeprom_len(netdev);
  1696. + eeprom.offset = 0;
  1697. +
  1698. + data = kmalloc(eeprom.len, GFP_KERNEL);
  1699. + if (!data) {
  1700. + printk(KERN_ERR "Unable to allocate memory to dump EEPROM"
  1701. + " data\n");
  1702. + return;
  1703. }
  1704. +
  1705. + ops->get_eeprom(netdev, &eeprom, data);
  1706. +
  1707. + csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
  1708. + (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
  1709. + for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
  1710. + csum_new += data[i] + (data[i + 1] << 8);
  1711. + csum_new = EEPROM_SUM - csum_new;
  1712. +
  1713. + printk(KERN_ERR "/*********************/\n");
  1714. + printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old);
  1715. + printk(KERN_ERR "Calculated : 0x%04x\n", csum_new);
  1716. +
  1717. + printk(KERN_ERR "Offset Values\n");
  1718. + printk(KERN_ERR "======== ======\n");
  1719. + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
  1720. +
  1721. + printk(KERN_ERR "Include this output when contacting your support "
  1722. + "provider.\n");
  1723. + printk(KERN_ERR "This is not a software error! Something bad "
  1724. + "happened to your hardware or\n");
  1725. + printk(KERN_ERR "EEPROM image. Ignoring this "
  1726. + "problem could result in further problems,\n");
  1727. + printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n");
  1728. + printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, "
  1729. + "which is invalid\n");
  1730. + printk(KERN_ERR "and requires you to set the proper MAC "
  1731. + "address manually before continuing\n");
  1732. + printk(KERN_ERR "to enable this network device.\n");
  1733. + printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
  1734. + "to your hardware vendor\n");
  1735. + printk(KERN_ERR "or Intel Customer Support.\n");
  1736. + printk(KERN_ERR "/*********************/\n");
  1737. +
  1738. + kfree(data);
  1739. }
  1740. /**
  1741. @@ -721,184 +735,166 @@ iegbe_reset(struct iegbe_adapter *adapte
  1742. * The OS initialization, configuring of the adapter private structure,
  1743. * and a hardware reset occur.
  1744. **/
  1745. -
  1746. -static int __devinit
  1747. -iegbe_probe(struct pci_dev *pdev,
  1748. +static int __devinit iegbe_probe(struct pci_dev *pdev,
  1749. const struct pci_device_id *ent)
  1750. {
  1751. - struct net_device *netdev;
  1752. - struct iegbe_adapter *adapter;
  1753. - unsigned long mmio_start, mmio_len;
  1754. - uint32_t ctrl_ext;
  1755. - uint32_t swsm;
  1756. + struct net_device *netdev;
  1757. + struct iegbe_adapter *adapter;
  1758. + struct iegbe_hw *hw;
  1759. static int cards_found = 0;
  1760. + int i, err, pci_using_dac;
  1761. + u16 eeprom_data = 0;
  1762. + u16 eeprom_apme_mask = E1000_EEPROM_APME;
  1763. + int bars;
  1764. + DECLARE_MAC_BUF(mac);
  1765. - int i, err, pci_using_dac;
  1766. - uint16_t eeprom_data = 0;
  1767. - uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
  1768. + bars = pci_select_bars(pdev, IORESOURCE_MEM);
  1769. + err = pci_enable_device(pdev);
  1770. + if (err)
  1771. + return err;
  1772. - if((err = pci_enable_device(pdev))) {
  1773. - return err;
  1774. - }
  1775. - if(!(err = pci_set_dma_mask(pdev, PCI_DMA_64BIT))) {
  1776. + if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
  1777. + !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
  1778. pci_using_dac = 1;
  1779. - } else {
  1780. - if((err = pci_set_dma_mask(pdev, PCI_DMA_32BIT))) {
  1781. - E1000_ERR("No usable DMA configuration, aborting\n");
  1782. - return err;
  1783. - }
  1784. + } else {
  1785. + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  1786. + if (err) {
  1787. + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  1788. + if (err) {
  1789. + E1000_ERR("No usable DMA configuration, "
  1790. + "aborting\n");
  1791. + goto err_dma;
  1792. + }
  1793. + }
  1794. pci_using_dac = 0;
  1795. - }
  1796. -
  1797. - if((err = pci_request_regions(pdev, iegbe_driver_name))) {
  1798. - return err;
  1799. }
  1800. - pci_set_master(pdev);
  1801. - netdev = alloc_etherdev(sizeof(struct iegbe_adapter));
  1802. - if(!netdev) {
  1803. - err = -ENOMEM;
  1804. - goto err_alloc_etherdev;
  1805. - }
  1806. + err = pci_request_selected_regions(pdev, bars, iegbe_driver_name);
  1807. + if (err)
  1808. + goto err_pci_reg;
  1809. +
  1810. + pci_set_master(pdev);
  1811. +
  1812. + err = -ENOMEM;
  1813. + netdev = alloc_etherdev(sizeof(struct iegbe_adapter));
  1814. + if (!netdev)
  1815. + goto err_alloc_etherdev;
  1816. - SET_MODULE_OWNER(netdev);
  1817. SET_NETDEV_DEV(netdev, &pdev->dev);
  1818. - pci_set_drvdata(pdev, netdev);
  1819. - adapter = netdev_priv(netdev);
  1820. - adapter->netdev = netdev;
  1821. - adapter->pdev = pdev;
  1822. - adapter->hw.back = adapter;
  1823. - adapter->msg_enable = (0x1 << debug) - 0x1;
  1824. -
  1825. - mmio_start = pci_resource_start(pdev, BAR_0);
  1826. - mmio_len = pci_resource_len(pdev, BAR_0);
  1827. -
  1828. - adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
  1829. - if(!adapter->hw.hw_addr) {
  1830. - err = -EIO;
  1831. - goto err_ioremap;
  1832. - }
  1833. -
  1834. - for(i = BAR_1; i <= BAR_5; i++) {
  1835. - if(pci_resource_len(pdev, i) == 0) {
  1836. - continue;
  1837. - }
  1838. - if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
  1839. - adapter->hw.io_base = pci_resource_start(pdev, i);
  1840. - break;
  1841. - }
  1842. - }
  1843. -
  1844. - netdev->open = &iegbe_open;
  1845. - netdev->stop = &iegbe_close;
  1846. - netdev->hard_start_xmit = &iegbe_xmit_frame;
  1847. - netdev->get_stats = &iegbe_get_stats;
  1848. - netdev->set_multicast_list = &iegbe_set_multi;
  1849. + pci_set_drvdata(pdev, netdev);
  1850. + adapter = netdev_priv(netdev);
  1851. + adapter->netdev = netdev;
  1852. + adapter->pdev = pdev;
  1853. + adapter->msg_enable = (1 << debug) - 1;
  1854. + adapter->bars = bars;
  1855. +
  1856. + hw = &adapter->hw;
  1857. + hw->back = adapter;
  1858. +
  1859. + err = -EIO;
  1860. + hw->hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
  1861. + pci_resource_len(pdev, BAR_0));
  1862. + if (!hw->hw_addr)
  1863. + goto err_ioremap;
  1864. +
  1865. + netdev->open = &iegbe_open;
  1866. + netdev->stop = &iegbe_close;
  1867. + netdev->hard_start_xmit = &iegbe_xmit_frame;
  1868. + netdev->get_stats = &iegbe_get_stats;
  1869. + netdev->set_rx_mode = &iegbe_set_rx_mode;
  1870. netdev->set_mac_address = &iegbe_set_mac;
  1871. - netdev->change_mtu = &iegbe_change_mtu;
  1872. - netdev->do_ioctl = &iegbe_ioctl;
  1873. + netdev->change_mtu = &iegbe_change_mtu;
  1874. + netdev->do_ioctl = &iegbe_ioctl;
  1875. set_ethtool_ops(netdev);
  1876. -#ifdef HAVE_TX_TIMEOUT
  1877. - netdev->tx_timeout = &iegbe_tx_timeout;
  1878. - netdev->watchdog_timeo = 0x5 * HZ;
  1879. -#endif
  1880. -#ifdef CONFIG_E1000_NAPI
  1881. - netdev->poll = &iegbe_clean;
  1882. - netdev->weight = 0x40;
  1883. -#endif
  1884. -#ifdef NETIF_F_HW_VLAN_TX
  1885. - netdev->vlan_rx_register = iegbe_vlan_rx_register;
  1886. - netdev->vlan_rx_add_vid = iegbe_vlan_rx_add_vid;
  1887. - netdev->vlan_rx_kill_vid = iegbe_vlan_rx_kill_vid;
  1888. -#endif
  1889. + netdev->tx_timeout = &iegbe_tx_timeout;
  1890. + netdev->watchdog_timeo = 5 * HZ;
  1891. + netif_napi_add(netdev, &adapter->napi, iegbe_clean, 64);
  1892. + netdev->vlan_rx_register = iegbe_vlan_rx_register;
  1893. + netdev->vlan_rx_add_vid = iegbe_vlan_rx_add_vid;
  1894. + netdev->vlan_rx_kill_vid = iegbe_vlan_rx_kill_vid;
  1895. #ifdef CONFIG_NET_POLL_CONTROLLER
  1896. - netdev->poll_controller = iegbe_netpoll;
  1897. + netdev->poll_controller = iegbe_netpoll;
  1898. #endif
  1899. - strcpy(netdev->name, pci_name(pdev));
  1900. + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
  1901. - netdev->mem_start = mmio_start;
  1902. - netdev->mem_end = mmio_start + mmio_len;
  1903. - netdev->base_addr = adapter->hw.io_base;
  1904. - adapter->bd_number = cards_found;
  1905. + adapter->bd_number = cards_found;
  1906. - /* setup the private structure */
  1907. + /* setup the private structure */
  1908. - if((err = iegbe_sw_init(adapter))) {
  1909. - goto err_sw_init;
  1910. - }
  1911. - if((err = iegbe_check_phy_reset_block(&adapter->hw))) {
  1912. - DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
  1913. - }
  1914. -#ifdef MAX_SKB_FRAGS
  1915. - if(adapter->hw.mac_type >= iegbe_82543) {
  1916. -#ifdef NETIF_F_HW_VLAN_TX
  1917. - netdev->features = NETIF_F_SG |
  1918. - NETIF_F_HW_CSUM |
  1919. - NETIF_F_HW_VLAN_TX |
  1920. - NETIF_F_HW_VLAN_RX |
  1921. - NETIF_F_HW_VLAN_FILTER;
  1922. -#else
  1923. - netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM;
  1924. -#endif
  1925. - }
  1926. + err = iegbe_sw_init(adapter);
  1927. + if (err)
  1928. + goto err_sw_init;
  1929. + err = -EIO;
  1930. + if (iegbe_check_phy_reset_block(hw))
  1931. + DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
  1932. -#ifdef NETIF_F_TSO
  1933. - if((adapter->hw.mac_type >= iegbe_82544) &&
  1934. - (adapter->hw.mac_type != iegbe_82547)) {
  1935. - netdev->features |= NETIF_F_TSO;
  1936. - }
  1937. -#ifdef NETIF_F_TSO_IPV6
  1938. - if(adapter->hw.mac_type > iegbe_82547_rev_2) {
  1939. - netdev->features |= NETIF_F_TSO_IPV6;
  1940. - }
  1941. -#endif
  1942. -#endif
  1943. - if(pci_using_dac) {
  1944. - netdev->features |= NETIF_F_HIGHDMA;
  1945. + if (hw->mac_type >= iegbe_82543) {
  1946. + netdev->features = NETIF_F_SG |
  1947. + NETIF_F_HW_CSUM |
  1948. + NETIF_F_HW_VLAN_TX |
  1949. + NETIF_F_HW_VLAN_RX |
  1950. + NETIF_F_HW_VLAN_FILTER;
  1951. }
  1952. -#endif
  1953. -#ifdef NETIF_F_LLTX
  1954. - netdev->features |= NETIF_F_LLTX;
  1955. -#endif
  1956. - adapter->en_mng_pt = iegbe_enable_mng_pass_thru(&adapter->hw);
  1957. + if ((hw->mac_type >= iegbe_82544) &&
  1958. + (hw->mac_type != iegbe_82547))
  1959. + netdev->features |= NETIF_F_TSO;
  1960. - /* before reading the EEPROM, reset the controller to
  1961. - * put the device in a known good starting state */
  1962. + if (hw->mac_type > iegbe_82547_rev_2)
  1963. + netdev->features |= NETIF_F_TSO6;
  1964. + if (pci_using_dac)
  1965. + netdev->features |= NETIF_F_HIGHDMA;
  1966. +
  1967. + netdev->features |= NETIF_F_LLTX;
  1968. - iegbe_reset_hw(&adapter->hw);
  1969. + adapter->en_mng_pt = iegbe_enable_mng_pass_thru(hw);
  1970. - /* make sure the EEPROM is good */
  1971. - if(iegbe_validate_eeprom_checksum(&adapter->hw) < 0) {
  1972. - DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
  1973. - err = -EIO;
  1974. + /* initialize eeprom parameters */
  1975. +
  1976. + if (iegbe_init_eeprom_params(hw)) {
  1977. + E1000_ERR("EEPROM initialization failed\n");
  1978. goto err_eeprom;
  1979. }
  1980. - /* copy the MAC address out of the EEPROM */
  1981. + /* before reading the EEPROM, reset the controller to
  1982. + * put the device in a known good starting state */
  1983. - if(iegbe_read_mac_addr(&adapter->hw)) {
  1984. - DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
  1985. - }
  1986. - memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
  1987. + iegbe_reset_hw(hw);
  1988. - if(!is_valid_ether_addr(netdev->dev_addr)) {
  1989. - DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
  1990. - err = -EIO;
  1991. - goto err_eeprom;
  1992. - }
  1993. + /* make sure the EEPROM is good */
  1994. + if (iegbe_validate_eeprom_checksum(hw) < 0) {
  1995. + DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
  1996. + iegbe_dump_eeprom(adapter);
  1997. + /*
  1998. + * set MAC address to all zeroes to invalidate and temporary
  1999. + * disable this device for the user. This blocks regular
  2000. + * traffic while still permitting ethtool ioctls from reaching
  2001. + * the hardware as well as allowing the user to run the
  2002. + * interface after manually setting a hw addr using
  2003. + * `ip set address`
  2004. + */
  2005. + memset(hw->mac_addr, 0, netdev->addr_len);
  2006. + } else {
  2007. + /* copy the MAC address out of the EEPROM */
  2008. + if (iegbe_read_mac_addr(hw))
  2009. + DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
  2010. + }
  2011. + /* don't block initalization here due to bad MAC address */
  2012. + memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
  2013. + memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
  2014. - iegbe_read_part_num(&adapter->hw, &(adapter->part_num));
  2015. + if (!is_valid_ether_addr(netdev->perm_addr))
  2016. + DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
  2017. - iegbe_get_bus_info(&adapter->hw);
  2018. + iegbe_get_bus_info(hw);
  2019. init_timer(&adapter->tx_fifo_stall_timer);
  2020. adapter->tx_fifo_stall_timer.function = &iegbe_82547_tx_fifo_stall;
  2021. - adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
  2022. + adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
  2023. init_timer(&adapter->watchdog_timer);
  2024. adapter->watchdog_timer.function = &iegbe_watchdog;
  2025. @@ -906,75 +902,50 @@ iegbe_probe(struct pci_dev *pdev,
  2026. init_timer(&adapter->phy_info_timer);
  2027. adapter->phy_info_timer.function = &iegbe_update_phy_info;
  2028. - adapter->phy_info_timer.data = (unsigned long) adapter;
  2029. -
  2030. - INIT_WORK(&adapter->tx_timeout_task,
  2031. - (void (*)(void *))iegbe_tx_timeout_task, netdev);
  2032. + adapter->phy_info_timer.data = (unsigned long)adapter;
  2033. - /* we're going to reset, so assume we have no link for now */
  2034. -
  2035. - netif_carrier_off(netdev);
  2036. - netif_stop_queue(netdev);
  2037. + INIT_WORK(&adapter->reset_task, iegbe_reset_task);
  2038. - iegbe_check_options(adapter);
  2039. + iegbe_check_options(adapter);
  2040. - /* Initial Wake on LAN setting
  2041. - * If APM wake is enabled in the EEPROM,
  2042. - * enable the ACPI Magic Packet filter
  2043. - */
  2044. + /* Initial Wake on LAN setting
  2045. + * If APM wake is enabled in the EEPROM,
  2046. + * enable the ACPI Magic Packet filter
  2047. + */
  2048. - switch(adapter->hw.mac_type) {
  2049. - case iegbe_82542_rev2_0:
  2050. - case iegbe_82542_rev2_1:
  2051. - case iegbe_82543:
  2052. - break;
  2053. - case iegbe_82544:
  2054. - iegbe_read_eeprom(&adapter->hw,
  2055. - EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
  2056. - eeprom_apme_mask = E1000_EEPROM_82544_APM;
  2057. - break;
  2058. + switch(adapter->hw.mac_type) {
  2059. + case iegbe_82542_rev2_0:
  2060. + case iegbe_82542_rev2_1:
  2061. + case iegbe_82543:
  2062. + break;
  2063. + case iegbe_82544:
  2064. + iegbe_read_eeprom(&adapter->hw,
  2065. + EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
  2066. + eeprom_apme_mask = E1000_EEPROM_82544_APM;
  2067. + break;
  2068. case iegbe_icp_xxxx:
  2069. - iegbe_read_eeprom(&adapter->hw,
  2070. - EEPROM_INIT_CONTROL3_ICP_xxxx(adapter->bd_number),
  2071. - 1, &eeprom_data);
  2072. - eeprom_apme_mask = EEPROM_CTRL3_APME_ICP_xxxx;
  2073. - break;
  2074. - case iegbe_82546:
  2075. - case iegbe_82546_rev_3:
  2076. - if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
  2077. - && (adapter->hw.media_type == iegbe_media_type_copper)) {
  2078. - iegbe_read_eeprom(&adapter->hw,
  2079. - EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
  2080. - break;
  2081. - }
  2082. - /* Fall Through */
  2083. - default:
  2084. - iegbe_read_eeprom(&adapter->hw,
  2085. - EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
  2086. - break;
  2087. - }
  2088. + iegbe_read_eeprom(&adapter->hw,
  2089. + EEPROM_INIT_CONTROL3_ICP_xxxx(adapter->bd_number),
  2090. + 1, &eeprom_data);
  2091. + eeprom_apme_mask = EEPROM_CTRL3_APME_ICP_xxxx;
  2092. + break;
  2093. + case iegbe_82546:
  2094. + case iegbe_82546_rev_3:
  2095. + if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
  2096. + && (adapter->hw.media_type == iegbe_media_type_copper)) {
  2097. + iegbe_read_eeprom(&adapter->hw,
  2098. + EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
  2099. + break;
  2100. + }
  2101. + /* Fall Through */
  2102. + default:
  2103. + iegbe_read_eeprom(&adapter->hw,
  2104. + EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
  2105. + break;
  2106. + }
  2107. if(eeprom_data & eeprom_apme_mask) {
  2108. - adapter->wol |= E1000_WUFC_MAG;
  2109. + adapter->wol |= E1000_WUFC_MAG;
  2110. }
  2111. - /* reset the hardware with the new settings */
  2112. - iegbe_reset(adapter);
  2113. -
  2114. - /* Let firmware know the driver has taken over */
  2115. - switch(adapter->hw.mac_type) {
  2116. - case iegbe_82571:
  2117. - case iegbe_82572:
  2118. - ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  2119. - E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
  2120. - ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
  2121. - break;
  2122. - case iegbe_82573:
  2123. - swsm = E1000_READ_REG(&adapter->hw, SWSM);
  2124. - E1000_WRITE_REG(&adapter->hw, SWSM,
  2125. - swsm | E1000_SWSM_DRV_LOAD);
  2126. - break;
  2127. - default:
  2128. - break;
  2129. - }
  2130. /* The ICP_xxxx device has multiple, duplicate interrupt
  2131. * registers, so disable all but the first one
  2132. @@ -987,24 +958,40 @@ iegbe_probe(struct pci_dev *pdev,
  2133. E1000_WRITE_REG(&adapter->hw, IMC2, ~0UL);
  2134. }
  2135. - strcpy(netdev->name, "eth%d");
  2136. - if((err = register_netdev(netdev))) {
  2137. - goto err_register;
  2138. - }
  2139. + iegbe_reset(adapter);
  2140. + netif_carrier_off(netdev);
  2141. + netif_stop_queue(netdev);
  2142. + strcpy(netdev->name, "eth%d");
  2143. + err = register_netdev(netdev);
  2144. + if (err)
  2145. + goto err_register;
  2146. +
  2147. DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
  2148. - cards_found++;
  2149. - return 0;
  2150. + cards_found++;
  2151. + return 0;
  2152. err_register:
  2153. -err_sw_init:
  2154. err_eeprom:
  2155. - iounmap(adapter->hw.hw_addr);
  2156. + if (!iegbe_check_phy_reset_block(hw))
  2157. + iegbe_phy_hw_reset(hw);
  2158. + if (hw->flash_address)
  2159. + iounmap(hw->flash_address);
  2160. + for (i = 0; i < adapter->num_rx_queues; i++)
  2161. + dev_put(&adapter->polling_netdev[i]);
  2162. + kfree(adapter->tx_ring);
  2163. + kfree(adapter->rx_ring);
  2164. + kfree(adapter->polling_netdev);
  2165. +err_sw_init:
  2166. + iounmap(hw->hw_addr);
  2167. err_ioremap:
  2168. - free_netdev(netdev);
  2169. + free_netdev(netdev);
  2170. err_alloc_etherdev:
  2171. - pci_release_regions(pdev);
  2172. - return err;
  2173. + pci_release_selected_regions(pdev, bars);
  2174. +err_pci_reg:
  2175. +err_dma:
  2176. + pci_disable_device(pdev);
  2177. + return err;
  2178. }
  2179. /**
  2180. @@ -1020,64 +1007,36 @@ err_alloc_etherdev:
  2181. static void __devexit
  2182. iegbe_remove(struct pci_dev *pdev)
  2183. {
  2184. - struct net_device *netdev = pci_get_drvdata(pdev);
  2185. - struct iegbe_adapter *adapter = netdev_priv(netdev);
  2186. - uint32_t ctrl_ext;
  2187. - uint32_t manc, swsm;
  2188. -#ifdef CONFIG_E1000_NAPI
  2189. - int i;
  2190. -#endif
  2191. -
  2192. - if(adapter->hw.mac_type >= iegbe_82540
  2193. - && adapter->hw.mac_type != iegbe_icp_xxxx
  2194. - && adapter->hw.media_type == iegbe_media_type_copper) {
  2195. - manc = E1000_READ_REG(&adapter->hw, MANC);
  2196. - if(manc & E1000_MANC_SMBUS_EN) {
  2197. - manc |= E1000_MANC_ARP_EN;
  2198. - E1000_WRITE_REG(&adapter->hw, MANC, manc);
  2199. - }
  2200. - }
  2201. -
  2202. - switch(adapter->hw.mac_type) {
  2203. - case iegbe_82571:
  2204. - case iegbe_82572:
  2205. - ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  2206. - E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
  2207. - ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
  2208. - break;
  2209. - case iegbe_82573:
  2210. - swsm = E1000_READ_REG(&adapter->hw, SWSM);
  2211. - E1000_WRITE_REG(&adapter->hw, SWSM,
  2212. - swsm & ~E1000_SWSM_DRV_LOAD);
  2213. - break;
  2214. -
  2215. - default:
  2216. - break;
  2217. - }
  2218. + struct net_device *netdev = pci_get_drvdata(pdev);
  2219. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  2220. + uint32_t manc;
  2221. + int i;
  2222. +
  2223. + if(adapter->hw.mac_type >= iegbe_82540
  2224. + && adapter->hw.mac_type != iegbe_icp_xxxx
  2225. + && adapter->hw.media_type == iegbe_media_type_copper) {
  2226. + manc = E1000_READ_REG(&adapter->hw, MANC);
  2227. + if(manc & E1000_MANC_SMBUS_EN) {
  2228. + manc |= E1000_MANC_ARP_EN;
  2229. + E1000_WRITE_REG(&adapter->hw, MANC, manc);
  2230. + }
  2231. + }
  2232. - unregister_netdev(netdev);
  2233. -#ifdef CONFIG_E1000_NAPI
  2234. - for (i = 0; i < adapter->num_queues; i++)
  2235. + unregister_netdev(netdev);
  2236. + for (i = 0x0; i < adapter->num_rx_queues; i++)
  2237. dev_put(&adapter->polling_netdev[i]);
  2238. -#endif
  2239. if(!iegbe_check_phy_reset_block(&adapter->hw)) {
  2240. - iegbe_phy_hw_reset(&adapter->hw);
  2241. + iegbe_phy_hw_reset(&adapter->hw);
  2242. }
  2243. - kfree(adapter->tx_ring);
  2244. - kfree(adapter->rx_ring);
  2245. -#ifdef CONFIG_E1000_NAPI
  2246. - kfree(adapter->polling_netdev);
  2247. -#endif
  2248. + kfree(adapter->tx_ring);
  2249. + kfree(adapter->rx_ring);
  2250. + kfree(adapter->polling_netdev);
  2251. - iounmap(adapter->hw.hw_addr);
  2252. - pci_release_regions(pdev);
  2253. + iounmap(adapter->hw.hw_addr);
  2254. + pci_release_regions(pdev);
  2255. -#ifdef CONFIG_E1000_MQ
  2256. - free_percpu(adapter->cpu_netdev);
  2257. - free_percpu(adapter->cpu_tx_ring);
  2258. -#endif
  2259. - free_netdev(netdev);
  2260. + free_netdev(netdev);
  2261. }
  2262. /**
  2263. @@ -1092,118 +1051,78 @@ iegbe_remove(struct pci_dev *pdev)
  2264. static int __devinit
  2265. iegbe_sw_init(struct iegbe_adapter *adapter)
  2266. {
  2267. - struct iegbe_hw *hw = &adapter->hw;
  2268. - struct net_device *netdev = adapter->netdev;
  2269. - struct pci_dev *pdev = adapter->pdev;
  2270. -#ifdef CONFIG_E1000_NAPI
  2271. - int i;
  2272. -#endif
  2273. + struct iegbe_hw *hw = &adapter->hw;
  2274. + struct net_device *netdev = adapter->netdev;
  2275. + struct pci_dev *pdev = adapter->pdev;
  2276. + int i;
  2277. - /* PCI config space info */
  2278. + /* PCI config space info */
  2279. - hw->vendor_id = pdev->vendor;
  2280. - hw->device_id = pdev->device;
  2281. - hw->subsystem_vendor_id = pdev->subsystem_vendor;
  2282. - hw->subsystem_id = pdev->subsystem_device;
  2283. + hw->vendor_id = pdev->vendor;
  2284. + hw->device_id = pdev->device;
  2285. + hw->subsystem_vendor_id = pdev->subsystem_vendor;
  2286. + hw->subsystem_id = pdev->subsystem_device;
  2287. - pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
  2288. + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
  2289. - pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
  2290. + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
  2291. - adapter->rx_buffer_len = E1000_RXBUFFER_2048;
  2292. - adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
  2293. - hw->max_frame_size = netdev->mtu +
  2294. - ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
  2295. - hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
  2296. + adapter->rx_buffer_len = E1000_RXBUFFER_2048;
  2297. + adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
  2298. + hw->max_frame_size = netdev->mtu +
  2299. + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
  2300. + hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
  2301. - /* identify the MAC */
  2302. + /* identify the MAC */
  2303. - if(iegbe_set_mac_type(hw)) {
  2304. + if (iegbe_set_mac_type(hw)) {
  2305. DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
  2306. return -EIO;
  2307. }
  2308. - /* initialize eeprom parameters */
  2309. -
  2310. - if(iegbe_init_eeprom_params(hw)) {
  2311. - E1000_ERR("EEPROM initialization failed\n");
  2312. - return -EIO;
  2313. - }
  2314. -
  2315. - switch(hw->mac_type) {
  2316. - default:
  2317. - break;
  2318. - case iegbe_82541:
  2319. - case iegbe_82547:
  2320. - case iegbe_82541_rev_2:
  2321. - case iegbe_82547_rev_2:
  2322. - hw->phy_init_script = 0x1;
  2323. - break;
  2324. - }
  2325. -
  2326. - iegbe_set_media_type(hw);
  2327. + iegbe_set_media_type(hw);
  2328. - hw->wait_autoneg_complete = FALSE;
  2329. - hw->tbi_compatibility_en = TRUE;
  2330. - hw->adaptive_ifs = TRUE;
  2331. + hw->wait_autoneg_complete = FALSE;
  2332. + hw->tbi_compatibility_en = TRUE;
  2333. + hw->adaptive_ifs = TRUE;
  2334. - /* Copper options */
  2335. + /* Copper options */
  2336. - if(hw->media_type == iegbe_media_type_copper
  2337. + if(hw->media_type == iegbe_media_type_copper
  2338. || (hw->media_type == iegbe_media_type_oem
  2339. && iegbe_oem_phy_is_copper(&adapter->hw))) {
  2340. - hw->mdix = AUTO_ALL_MODES;
  2341. - hw->disable_polarity_correction = FALSE;
  2342. - hw->master_slave = E1000_MASTER_SLAVE;
  2343. - }
  2344. + hw->mdix = AUTO_ALL_MODES;
  2345. + hw->disable_polarity_correction = FALSE;
  2346. + hw->master_slave = E1000_MASTER_SLAVE;
  2347. + }
  2348. -#ifdef CONFIG_E1000_MQ
  2349. - /* Number of supported queues */
  2350. - switch (hw->mac_type) {
  2351. - case iegbe_82571:
  2352. - case iegbe_82572:
  2353. - adapter->num_queues = 0x2;
  2354. - break;
  2355. - default:
  2356. - adapter->num_queues = 0x1;
  2357. - break;
  2358. - }
  2359. - adapter->num_queues = min(adapter->num_queues, num_online_cpus());
  2360. -#else
  2361. - adapter->num_queues = 0x1;
  2362. -#endif
  2363. + adapter->num_tx_queues = 0x1;
  2364. + adapter->num_rx_queues = 0x1;
  2365. if (iegbe_alloc_queues(adapter)) {
  2366. DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
  2367. return -ENOMEM;
  2368. }
  2369. -#ifdef CONFIG_E1000_NAPI
  2370. - for (i = 0; i < adapter->num_queues; i++) {
  2371. + for (i = 0; i < adapter->num_rx_queues; i++) {
  2372. adapter->polling_netdev[i].priv = adapter;
  2373. - adapter->polling_netdev[i].poll = &iegbe_clean;
  2374. - adapter->polling_netdev[i].weight = 0x40;
  2375. dev_hold(&adapter->polling_netdev[i]);
  2376. set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
  2377. }
  2378. -#endif
  2379. -
  2380. -#ifdef CONFIG_E1000_MQ
  2381. - iegbe_setup_queue_mapping(adapter);
  2382. -#endif
  2383. + spin_lock_init(&adapter->tx_queue_lock);
  2384. /*
  2385. - * for ICP_XXXX style controllers, it is necessary to keep
  2386. - * track of the last known state of the link to determine if
  2387. - * the link experienced a change in state when iegbe_watchdog
  2388. - * fires
  2389. - */
  2390. - adapter->hw.icp_xxxx_is_link_up = FALSE;
  2391. + * for ICP_XXXX style controllers, it is necessary to keep
  2392. + * track of the last known state of the link to determine if
  2393. + * the link experienced a change in state when iegbe_watchdog
  2394. + * fires
  2395. + */
  2396. + adapter->hw.icp_xxxx_is_link_up = FALSE;
  2397. - atomic_set(&adapter->irq_sem, 1);
  2398. - spin_lock_init(&adapter->stats_lock);
  2399. + spin_lock_init(&adapter->stats_lock);
  2400. - return 0;
  2401. + set_bit(__E1000_DOWN, &adapter->flags);
  2402. + return 0x0;
  2403. }
  2404. /**
  2405. @@ -1218,71 +1137,31 @@ iegbe_sw_init(struct iegbe_adapter *adap
  2406. static int __devinit
  2407. iegbe_alloc_queues(struct iegbe_adapter *adapter)
  2408. {
  2409. - int size;
  2410. - size = sizeof(struct iegbe_tx_ring) * adapter->num_queues;
  2411. - adapter->tx_ring = kmalloc(size, GFP_KERNEL);
  2412. - if (!adapter->tx_ring){
  2413. +
  2414. + adapter->tx_ring = kcalloc(adapter->num_tx_queues,
  2415. + sizeof(struct iegbe_tx_ring), GFP_KERNEL);
  2416. + if (!adapter->tx_ring)
  2417. return -ENOMEM;
  2418. - }
  2419. - memset(adapter->tx_ring, 0, size);
  2420. - size = sizeof(struct iegbe_rx_ring) * adapter->num_queues;
  2421. - adapter->rx_ring = kmalloc(size, GFP_KERNEL);
  2422. + adapter->rx_ring = kcalloc(adapter->num_rx_queues,
  2423. + sizeof(struct iegbe_rx_ring), GFP_KERNEL);
  2424. if (!adapter->rx_ring) {
  2425. kfree(adapter->tx_ring);
  2426. return -ENOMEM;
  2427. }
  2428. - memset(adapter->rx_ring, 0, size);
  2429. -#ifdef CONFIG_E1000_NAPI
  2430. - size = sizeof(struct net_device) * adapter->num_queues;
  2431. - adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
  2432. + adapter->polling_netdev = kcalloc(adapter->num_rx_queues,
  2433. + sizeof(struct net_device),
  2434. + GFP_KERNEL);
  2435. if (!adapter->polling_netdev) {
  2436. kfree(adapter->tx_ring);
  2437. kfree(adapter->rx_ring);
  2438. return -ENOMEM;
  2439. }
  2440. - memset(adapter->polling_netdev, 0, size);
  2441. -#endif
  2442. -
  2443. - return E1000_SUCCESS;
  2444. -}
  2445. -#ifdef CONFIG_E1000_MQ
  2446. -static void __devinit
  2447. -iegbe_setup_queue_mapping(struct iegbe_adapter *adapter)
  2448. -{
  2449. - int i, cpu;
  2450. -
  2451. - adapter->rx_sched_call_data.func = iegbe_rx_schedule;
  2452. - adapter->rx_sched_call_data.info = adapter->netdev;
  2453. - cpus_clear(adapter->rx_sched_call_data.cpumask);
  2454. -
  2455. - adapter->cpu_netdev = alloc_percpu(struct net_device *);
  2456. - adapter->cpu_tx_ring = alloc_percpu(struct iegbe_tx_ring *);
  2457. -
  2458. - lock_cpu_hotplug();
  2459. - i = 0;
  2460. - for_each_online_cpu(cpu) {
  2461. - *per_cpu_ptr(adapter->cpu_tx_ring, cpu) =
  2462. - &adapter->tx_ring[i % adapter->num_queues];
  2463. - /* This is incomplete because we'd like to assign separate
  2464. - * physical cpus to these netdev polling structures and
  2465. - * avoid saturating a subset of cpus.
  2466. - */
  2467. - if (i < adapter->num_queues) {
  2468. - *per_cpu_ptr(adapter->cpu_netdev, cpu) =
  2469. - &adapter->polling_netdev[i];
  2470. - adapter->cpu_for_queue[i] = cpu;
  2471. - } else {
  2472. - *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
  2473. - }
  2474. - i++;
  2475. - }
  2476. - unlock_cpu_hotplug();
  2477. + return E1000_SUCCESS;
  2478. }
  2479. -#endif
  2480. /**
  2481. * iegbe_open - Called when a network interface is made active
  2482. @@ -1300,40 +1179,62 @@ iegbe_setup_queue_mapping(struct iegbe_a
  2483. static int
  2484. iegbe_open(struct net_device *netdev)
  2485. {
  2486. - struct iegbe_adapter *adapter = netdev_priv(netdev);
  2487. - int err;
  2488. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  2489. + struct iegbe_hw *hw = &adapter->hw;
  2490. + int err;
  2491. +
  2492. + /* allocate receive descriptors */
  2493. + if (test_bit(__E1000_TESTING, &adapter->flags))
  2494. + return -EBUSY;
  2495. - /* allocate receive descriptors */
  2496. + /* allocate transmit descriptors */
  2497. + err = iegbe_setup_all_tx_resources(adapter);
  2498. + if (err)
  2499. + goto err_setup_tx;
  2500. - if ((err = iegbe_setup_all_rx_resources(adapter))) {
  2501. + err = iegbe_setup_all_rx_resources(adapter);
  2502. + if (err)
  2503. goto err_setup_rx;
  2504. - }
  2505. - /* allocate transmit descriptors */
  2506. - if ((err = iegbe_setup_all_tx_resources(adapter))) {
  2507. - goto err_setup_tx;
  2508. - }
  2509. - if ((err = iegbe_up(adapter))) {
  2510. - goto err_up;
  2511. - }
  2512. -#ifdef NETIF_F_HW_VLAN_TX
  2513. - adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  2514. - if ((adapter->hw.mng_cookie.status &
  2515. - E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
  2516. - iegbe_update_mng_vlan(adapter);
  2517. - }
  2518. -#endif
  2519. - return E1000_SUCCESS;
  2520. + iegbe_power_up_phy(adapter);
  2521. + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  2522. + if ((hw->mng_cookie.status &
  2523. + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
  2524. + iegbe_update_mng_vlan(adapter);
  2525. + }
  2526. +
  2527. + /* before we allocate an interrupt, we must be ready to handle it.
  2528. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
  2529. + * as soon as we call pci_request_irq, so we have to setup our
  2530. + * clean_rx handler before we do so. */
  2531. + iegbe_configure(adapter);
  2532. + err = iegbe_request_irq(adapter);
  2533. + if (err)
  2534. + goto err_req_irq;
  2535. -err_up:
  2536. - iegbe_free_all_tx_resources(adapter);
  2537. -err_setup_tx:
  2538. - iegbe_free_all_rx_resources(adapter);
  2539. + /* From here on the code is the same as iegbe_up() */
  2540. + clear_bit(__E1000_DOWN, &adapter->flags);
  2541. +
  2542. + napi_enable(&adapter->napi);
  2543. +
  2544. + iegbe_irq_enable(adapter);
  2545. +
  2546. + netif_start_queue(netdev);
  2547. +
  2548. + /* fire a link status change interrupt to start the watchdog */
  2549. +
  2550. + return E1000_SUCCESS;
  2551. +
  2552. +err_req_irq:
  2553. + iegbe_power_down_phy(adapter);
  2554. + iegbe_free_all_rx_resources(adapter);
  2555. err_setup_rx:
  2556. - iegbe_reset(adapter);
  2557. + iegbe_free_all_tx_resources(adapter);
  2558. +err_setup_tx:
  2559. + iegbe_reset(adapter);
  2560. - return err;
  2561. + return err;
  2562. }
  2563. /**
  2564. @@ -1348,22 +1249,25 @@ err_setup_rx:
  2565. * hardware, and all transmit and receive resources are freed.
  2566. **/
  2567. -static int
  2568. -iegbe_close(struct net_device *netdev)
  2569. +static int iegbe_close(struct net_device *netdev)
  2570. {
  2571. - struct iegbe_adapter *adapter = netdev_priv(netdev);
  2572. -
  2573. - iegbe_down(adapter);
  2574. -
  2575. - iegbe_free_all_tx_resources(adapter);
  2576. - iegbe_free_all_rx_resources(adapter);
  2577. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  2578. + struct iegbe_hw *hw = &adapter->hw;
  2579. -#ifdef NETIF_F_HW_VLAN_TX
  2580. - if((adapter->hw.mng_cookie.status &
  2581. - E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
  2582. + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
  2583. + iegbe_down(adapter);
  2584. + iegbe_power_down_phy(adapter);
  2585. + iegbe_free_irq(adapter);
  2586. +
  2587. + iegbe_free_all_tx_resources(adapter);
  2588. + iegbe_free_all_rx_resources(adapter);
  2589. +
  2590. + if ((hw->mng_cookie.status &
  2591. + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
  2592. + !(adapter->vlgrp &&
  2593. + vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
  2594. iegbe_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
  2595. }
  2596. -#endif
  2597. return 0;
  2598. }
  2599. @@ -1375,19 +1279,19 @@ iegbe_close(struct net_device *netdev)
  2600. **/
  2601. static inline boolean_t
  2602. iegbe_check_64k_bound(struct iegbe_adapter *adapter,
  2603. - void *start, unsigned long len)
  2604. + void *start, unsigned long len)
  2605. {
  2606. - unsigned long begin = (unsigned long) start;
  2607. - unsigned long end = begin + len;
  2608. + unsigned long begin = (unsigned long) start;
  2609. + unsigned long end = begin + len;
  2610. - /* First rev 82545 and 82546 need to not allow any memory
  2611. - * write location to cross 64k boundary due to errata 23 */
  2612. - if(adapter->hw.mac_type == iegbe_82545 ||
  2613. - adapter->hw.mac_type == iegbe_82546) {
  2614. - return ((begin ^ (end - 1)) >> 0x10) != 0 ? FALSE : TRUE;
  2615. - }
  2616. + /* First rev 82545 and 82546 need to not allow any memory
  2617. + * write location to cross 64k boundary due to errata 23 */
  2618. + if(adapter->hw.mac_type == iegbe_82545 ||
  2619. + adapter->hw.mac_type == iegbe_82546) {
  2620. + return ((begin ^ (end - 1)) >> 0x10) != 0x0 ? FALSE : TRUE;
  2621. + }
  2622. - return TRUE;
  2623. + return TRUE;
  2624. }
  2625. /**
  2626. @@ -1398,102 +1302,98 @@ iegbe_check_64k_bound(struct iegbe_adapt
  2627. * Return 0 on success, negative on failure
  2628. **/
  2629. -int
  2630. -iegbe_setup_tx_resources(struct iegbe_adapter *adapter,
  2631. +static int iegbe_setup_tx_resources(struct iegbe_adapter *adapter,
  2632. struct iegbe_tx_ring *txdr)
  2633. {
  2634. - struct pci_dev *pdev = adapter->pdev;
  2635. - int size;
  2636. + struct pci_dev *pdev = adapter->pdev;
  2637. + int size;
  2638. - size = sizeof(struct iegbe_buffer) * txdr->count;
  2639. - txdr->buffer_info = vmalloc(size);
  2640. - if (!txdr->buffer_info) {
  2641. - DPRINTK(PROBE, ERR,
  2642. - "Unable to allocate memory for the transmit descriptor ring\n");
  2643. - return -ENOMEM;
  2644. - }
  2645. + size = sizeof(struct iegbe_buffer) * txdr->count;
  2646. + txdr->buffer_info = vmalloc(size);
  2647. + if (!txdr->buffer_info) {
  2648. + DPRINTK(PROBE, ERR,
  2649. + "Unable to allocate memory for the transmit descriptor ring\n");
  2650. + return -ENOMEM;
  2651. + }
  2652. memset(txdr->buffer_info, 0, size);
  2653. - memset(&txdr->previous_buffer_info, 0, sizeof(struct iegbe_buffer));
  2654. - /* round up to nearest 4K */
  2655. + /* round up to nearest 4K */
  2656. - txdr->size = txdr->count * sizeof(struct iegbe_tx_desc);
  2657. - E1000_ROUNDUP(txdr->size, 0x1000);
  2658. + txdr->size = txdr->count * sizeof(struct iegbe_tx_desc);
  2659. + txdr->size = ALIGN(txdr->size, 4096);
  2660. - txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
  2661. - if (!txdr->desc) {
  2662. + txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
  2663. + if (!txdr->desc) {
  2664. setup_tx_desc_die:
  2665. - vfree(txdr->buffer_info);
  2666. - DPRINTK(PROBE, ERR,
  2667. - "Unable to allocate memory for the transmit descriptor ring\n");
  2668. - return -ENOMEM;
  2669. - }
  2670. -
  2671. - /* Fix for errata 23, can't cross 64kB boundary */
  2672. - if (!iegbe_check_64k_bound(adapter, txdr->desc, txdr->size)) {
  2673. - void *olddesc = txdr->desc;
  2674. - dma_addr_t olddma = txdr->dma;
  2675. - DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
  2676. - "at %p\n", txdr->size, txdr->desc);
  2677. - /* Try again, without freeing the previous */
  2678. - txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
  2679. - /* Failed allocation, critical failure */
  2680. - if (!txdr->desc) {
  2681. - pci_free_consistent(pdev, txdr->size, olddesc, olddma);
  2682. - goto setup_tx_desc_die;
  2683. - }
  2684. + vfree(txdr->buffer_info);
  2685. + DPRINTK(PROBE, ERR,
  2686. + "Unable to allocate memory for the transmit descriptor ring\n");
  2687. + return -ENOMEM;
  2688. + }
  2689. +
  2690. + /* Fix for errata 23, can't cross 64kB boundary */
  2691. + if (!iegbe_check_64k_bound(adapter, txdr->desc, txdr->size)) {
  2692. + void *olddesc = txdr->desc;
  2693. + dma_addr_t olddma = txdr->dma;
  2694. + DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
  2695. + "at %p\n", txdr->size, txdr->desc);
  2696. + /* Try again, without freeing the previous */
  2697. + txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
  2698. + /* Failed allocation, critical failure */
  2699. + if (!txdr->desc) {
  2700. + pci_free_consistent(pdev, txdr->size, olddesc, olddma);
  2701. + goto setup_tx_desc_die;
  2702. + }
  2703. - if (!iegbe_check_64k_bound(adapter, txdr->desc, txdr->size)) {
  2704. - /* give up */
  2705. - pci_free_consistent(pdev, txdr->size, txdr->desc,
  2706. - txdr->dma);
  2707. - pci_free_consistent(pdev, txdr->size, olddesc, olddma);
  2708. - DPRINTK(PROBE, ERR,
  2709. - "Unable to allocate aligned memory "
  2710. - "for the transmit descriptor ring\n");
  2711. - vfree(txdr->buffer_info);
  2712. - return -ENOMEM;
  2713. - } else {
  2714. - /* Free old allocation, new allocation was successful */
  2715. - pci_free_consistent(pdev, txdr->size, olddesc, olddma);
  2716. - }
  2717. - }
  2718. + if (!iegbe_check_64k_bound(adapter, txdr->desc, txdr->size)) {
  2719. + /* give up */
  2720. + pci_free_consistent(pdev, txdr->size, txdr->desc,
  2721. + txdr->dma);
  2722. + pci_free_consistent(pdev, txdr->size, olddesc, olddma);
  2723. + DPRINTK(PROBE, ERR,
  2724. + "Unable to allocate aligned memory "
  2725. + "for the transmit descriptor ring\n");
  2726. + vfree(txdr->buffer_info);
  2727. + return -ENOMEM;
  2728. + } else {
  2729. + /* Free old allocation, new allocation was successful */
  2730. + pci_free_consistent(pdev, txdr->size, olddesc, olddma);
  2731. + }
  2732. + }
  2733. memset(txdr->desc, 0, txdr->size);
  2734. txdr->next_to_use = 0;
  2735. txdr->next_to_clean = 0;
  2736. - spin_lock_init(&txdr->tx_lock);
  2737. + spin_lock_init(&txdr->tx_lock);
  2738. return 0;
  2739. }
  2740. /**
  2741. * iegbe_setup_all_tx_resources - wrapper to allocate Tx resources
  2742. - * (Descriptors) for all queues
  2743. + * (Descriptors) for all queues
  2744. * @adapter: board private structure
  2745. *
  2746. - * If this function returns with an error, then it's possible one or
  2747. - * more of the rings is populated (while the rest are not). It is the
  2748. - * callers duty to clean those orphaned rings.
  2749. - *
  2750. * Return 0 on success, negative on failure
  2751. **/
  2752. -int
  2753. -iegbe_setup_all_tx_resources(struct iegbe_adapter *adapter)
  2754. +int iegbe_setup_all_tx_resources(struct iegbe_adapter *adapter)
  2755. {
  2756. int i, err = 0;
  2757. - for (i = 0; i < adapter->num_queues; i++) {
  2758. + for (i = 0; i < adapter->num_tx_queues; i++) {
  2759. err = iegbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
  2760. if (err) {
  2761. DPRINTK(PROBE, ERR,
  2762. "Allocation for Tx Queue %u failed\n", i);
  2763. + for (i-- ; i >= 0; i--)
  2764. + iegbe_free_tx_resources(adapter,
  2765. + &adapter->tx_ring[i]);
  2766. break;
  2767. }
  2768. }
  2769. - return err;
  2770. + return err;
  2771. }
  2772. /**
  2773. @@ -1512,113 +1412,108 @@ iegbe_configure_tx(struct iegbe_adapter
  2774. /* Setup the HW Tx Head and Tail descriptor pointers */
  2775. - switch (adapter->num_queues) {
  2776. + switch (adapter->num_tx_queues) {
  2777. case 0x2:
  2778. tdba = adapter->tx_ring[0x1].dma;
  2779. tdlen = adapter->tx_ring[0x1].count *
  2780. - sizeof(struct iegbe_tx_desc);
  2781. - E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL));
  2782. + sizeof(struct iegbe_tx_desc);
  2783. + E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL));
  2784. E1000_WRITE_REG(hw, TDBAH1, (tdba >> 0x20));
  2785. - E1000_WRITE_REG(hw, TDLEN1, tdlen);
  2786. - E1000_WRITE_REG(hw, TDH1, 0);
  2787. - E1000_WRITE_REG(hw, TDT1, 0);
  2788. + E1000_WRITE_REG(hw, TDLEN1, tdlen);
  2789. + E1000_WRITE_REG(hw, TDH1, 0x0);
  2790. + E1000_WRITE_REG(hw, TDT1, 0x0);
  2791. adapter->tx_ring[0x1].tdh = E1000_TDH1;
  2792. adapter->tx_ring[0x1].tdt = E1000_TDT1;
  2793. - /* Fall Through */
  2794. + /* Fall Through */
  2795. case 0x1:
  2796. - default:
  2797. - tdba = adapter->tx_ring[0].dma;
  2798. - tdlen = adapter->tx_ring[0].count *
  2799. - sizeof(struct iegbe_tx_desc);
  2800. - E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
  2801. + default:
  2802. + tdba = adapter->tx_ring[0x0].dma;
  2803. + tdlen = adapter->tx_ring[0x0].count *
  2804. + sizeof(struct iegbe_tx_desc);
  2805. + E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
  2806. E1000_WRITE_REG(hw, TDBAH, (tdba >> 0x20));
  2807. - E1000_WRITE_REG(hw, TDLEN, tdlen);
  2808. - E1000_WRITE_REG(hw, TDH, 0);
  2809. - E1000_WRITE_REG(hw, TDT, 0);
  2810. - adapter->tx_ring[0].tdh = E1000_TDH;
  2811. - adapter->tx_ring[0].tdt = E1000_TDT;
  2812. - break;
  2813. - }
  2814. -
  2815. - /* Set the default values for the Tx Inter Packet Gap timer */
  2816. -
  2817. - switch (hw->mac_type) {
  2818. - case iegbe_82542_rev2_0:
  2819. - case iegbe_82542_rev2_1:
  2820. - tipg = DEFAULT_82542_TIPG_IPGT;
  2821. - tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
  2822. - tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
  2823. - break;
  2824. - default:
  2825. - switch(hw->media_type) {
  2826. - case iegbe_media_type_fiber:
  2827. - case iegbe_media_type_internal_serdes:
  2828. - tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
  2829. - break;
  2830. - case iegbe_media_type_copper:
  2831. - tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
  2832. - break;
  2833. - case iegbe_media_type_oem:
  2834. - default:
  2835. + E1000_WRITE_REG(hw, TDLEN, tdlen);
  2836. + E1000_WRITE_REG(hw, TDH, 0x0);
  2837. + E1000_WRITE_REG(hw, TDT, 0x0);
  2838. + adapter->tx_ring[0x0].tdh = E1000_TDH;
  2839. + adapter->tx_ring[0x0].tdt = E1000_TDT;
  2840. + break;
  2841. + }
  2842. +
  2843. + /* Set the default values for the Tx Inter Packet Gap timer */
  2844. +
  2845. + switch (hw->mac_type) {
  2846. + case iegbe_82542_rev2_0:
  2847. + case iegbe_82542_rev2_1:
  2848. + tipg = DEFAULT_82542_TIPG_IPGT;
  2849. + tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
  2850. + tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
  2851. + break;
  2852. + default:
  2853. + switch(hw->media_type) {
  2854. + case iegbe_media_type_fiber:
  2855. + case iegbe_media_type_internal_serdes:
  2856. + tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
  2857. + break;
  2858. + case iegbe_media_type_copper:
  2859. + tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
  2860. + break;
  2861. + case iegbe_media_type_oem:
  2862. + default:
  2863. tipg = (0xFFFFFFFFUL >> (sizeof(tipg)*0x8 -
  2864. E1000_TIPG_IPGR1_SHIFT))
  2865. - & iegbe_oem_get_tipg(&adapter->hw);
  2866. - break;
  2867. - }
  2868. - tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
  2869. - tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
  2870. - }
  2871. - E1000_WRITE_REG(hw, TIPG, tipg);
  2872. + & iegbe_oem_get_tipg(&adapter->hw);
  2873. + break;
  2874. + }
  2875. + tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
  2876. + tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
  2877. + }
  2878. + E1000_WRITE_REG(hw, TIPG, tipg);
  2879. - /* Set the Tx Interrupt Delay register */
  2880. + /* Set the Tx Interrupt Delay register */
  2881. - E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
  2882. + E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
  2883. if (hw->mac_type >= iegbe_82540) {
  2884. - E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
  2885. + E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
  2886. }
  2887. - /* Program the Transmit Control Register */
  2888. + /* Program the Transmit Control Register */
  2889. - tctl = E1000_READ_REG(hw, TCTL);
  2890. + tctl = E1000_READ_REG(hw, TCTL);
  2891. - tctl &= ~E1000_TCTL_CT;
  2892. - tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC |
  2893. - (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
  2894. + tctl &= ~E1000_TCTL_CT;
  2895. + tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC |
  2896. + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
  2897. - E1000_WRITE_REG(hw, TCTL, tctl);
  2898. + E1000_WRITE_REG(hw, TCTL, tctl);
  2899. - if (hw->mac_type == iegbe_82571 || hw->mac_type == iegbe_82572) {
  2900. - tarc = E1000_READ_REG(hw, TARC0);
  2901. + if (hw->mac_type == iegbe_82571 || hw->mac_type == iegbe_82572) {
  2902. + tarc = E1000_READ_REG(hw, TARC0);
  2903. tarc |= ((0x1 << 0x19) | (0x1 << 0x15));
  2904. - E1000_WRITE_REG(hw, TARC0, tarc);
  2905. - tarc = E1000_READ_REG(hw, TARC1);
  2906. + E1000_WRITE_REG(hw, TARC0, tarc);
  2907. + tarc = E1000_READ_REG(hw, TARC1);
  2908. tarc |= (0x1 << 0x19);
  2909. if (tctl & E1000_TCTL_MULR) {
  2910. tarc &= ~(0x1 << 0x1c);
  2911. } else {
  2912. tarc |= (0x1 << 0x1c);
  2913. }
  2914. - E1000_WRITE_REG(hw, TARC1, tarc);
  2915. - }
  2916. + E1000_WRITE_REG(hw, TARC1, tarc);
  2917. + }
  2918. - iegbe_config_collision_dist(hw);
  2919. + iegbe_config_collision_dist(hw);
  2920. - /* Setup Transmit Descriptor Settings for eop descriptor */
  2921. - adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
  2922. - E1000_TXD_CMD_IFCS;
  2923. + /* Setup Transmit Descriptor Settings for eop descriptor */
  2924. + adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
  2925. + E1000_TXD_CMD_IFCS;
  2926. if (hw->mac_type < iegbe_82543) {
  2927. - adapter->txd_cmd |= E1000_TXD_CMD_RPS;
  2928. + adapter->txd_cmd |= E1000_TXD_CMD_RPS;
  2929. } else {
  2930. -#ifdef IEGBE_GBE_WORKAROUND
  2931. - /* Disable the RS bit in the Tx descriptor */
  2932. - adapter->txd_cmd &= ~E1000_TXD_CMD_RS;
  2933. -#else
  2934. - adapter->txd_cmd |= E1000_TXD_CMD_RS;
  2935. -#endif
  2936. + adapter->txd_cmd |= E1000_TXD_CMD_RS;
  2937. }
  2938. - /* Cache if we're 82544 running in PCI-X because we'll
  2939. - * need this to apply a workaround later in the send path. */
  2940. - if (hw->mac_type == iegbe_82544 &&
  2941. + /* Cache if we're 82544 running in PCI-X because we'll
  2942. + * need this to apply a workaround later in the send path. */
  2943. + if (hw->mac_type == iegbe_82544 &&
  2944. hw->bus_type == iegbe_bus_type_pcix) {
  2945. adapter->pcix_82544 = 0x1;
  2946. }
  2947. @@ -1632,96 +1527,95 @@ iegbe_configure_tx(struct iegbe_adapter
  2948. * Returns 0 on success, negative on failure
  2949. **/
  2950. -int
  2951. -iegbe_setup_rx_resources(struct iegbe_adapter *adapter,
  2952. +static int iegbe_setup_rx_resources(struct iegbe_adapter *adapter,
  2953. struct iegbe_rx_ring *rxdr)
  2954. {
  2955. - struct pci_dev *pdev = adapter->pdev;
  2956. - int size, desc_len;
  2957. -
  2958. - size = sizeof(struct iegbe_buffer) * rxdr->count;
  2959. - rxdr->buffer_info = vmalloc(size);
  2960. - if (!rxdr->buffer_info) {
  2961. - DPRINTK(PROBE, ERR,
  2962. - "Unable to allocate memory for the receive descriptor ring\n");
  2963. - return -ENOMEM;
  2964. - }
  2965. - memset(rxdr->buffer_info, 0, size);
  2966. -
  2967. - size = sizeof(struct iegbe_ps_page) * rxdr->count;
  2968. - rxdr->ps_page = kmalloc(size, GFP_KERNEL);
  2969. - if (!rxdr->ps_page) {
  2970. - vfree(rxdr->buffer_info);
  2971. - DPRINTK(PROBE, ERR,
  2972. - "Unable to allocate memory for the receive descriptor ring\n");
  2973. - return -ENOMEM;
  2974. - }
  2975. - memset(rxdr->ps_page, 0, size);
  2976. -
  2977. - size = sizeof(struct iegbe_ps_page_dma) * rxdr->count;
  2978. - rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
  2979. - if (!rxdr->ps_page_dma) {
  2980. - vfree(rxdr->buffer_info);
  2981. - kfree(rxdr->ps_page);
  2982. - DPRINTK(PROBE, ERR,
  2983. - "Unable to allocate memory for the receive descriptor ring\n");
  2984. - return -ENOMEM;
  2985. - }
  2986. - memset(rxdr->ps_page_dma, 0, size);
  2987. + struct iegbe_hw *hw = &adapter->hw;
  2988. + struct pci_dev *pdev = adapter->pdev;
  2989. + int size, desc_len;
  2990. - if (adapter->hw.mac_type <= iegbe_82547_rev_2) {
  2991. - desc_len = sizeof(struct iegbe_rx_desc);
  2992. - } else {
  2993. - desc_len = sizeof(union iegbe_rx_desc_packet_split);
  2994. + size = sizeof(struct iegbe_buffer) * rxdr->count;
  2995. + rxdr->buffer_info = vmalloc(size);
  2996. + if (!rxdr->buffer_info) {
  2997. + DPRINTK(PROBE, ERR,
  2998. + "Unable to allocate memory for the receive descriptor ring\n");
  2999. + return -ENOMEM;
  3000. }
  3001. - /* Round up to nearest 4K */
  3002. -
  3003. - rxdr->size = rxdr->count * desc_len;
  3004. - E1000_ROUNDUP(rxdr->size, 0x1000);
  3005. -
  3006. - rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
  3007. + memset(rxdr->buffer_info, 0, size);
  3008. - if (!rxdr->desc) {
  3009. - DPRINTK(PROBE, ERR,
  3010. - "Unable to allocate memory for the receive descriptor ring\n");
  3011. + rxdr->ps_page = kcalloc(rxdr->count, sizeof(struct iegbe_ps_page),
  3012. + GFP_KERNEL);
  3013. + if (!rxdr->ps_page) {
  3014. + vfree(rxdr->buffer_info);
  3015. + DPRINTK(PROBE, ERR,
  3016. + "Unable to allocate memory for the receive descriptor ring\n");
  3017. + return -ENOMEM;
  3018. + }
  3019. +
  3020. + rxdr->ps_page_dma = kcalloc(rxdr->count,
  3021. + sizeof(struct iegbe_ps_page_dma),
  3022. + GFP_KERNEL);
  3023. + if (!rxdr->ps_page_dma) {
  3024. + vfree(rxdr->buffer_info);
  3025. + kfree(rxdr->ps_page);
  3026. + DPRINTK(PROBE, ERR,
  3027. + "Unable to allocate memory for the receive descriptor ring\n");
  3028. + return -ENOMEM;
  3029. + }
  3030. +
  3031. + if (hw->mac_type <= iegbe_82547_rev_2)
  3032. + desc_len = sizeof(struct iegbe_rx_desc);
  3033. + else
  3034. + desc_len = sizeof(union iegbe_rx_desc_packet_split);
  3035. +
  3036. + /* Round up to nearest 4K */
  3037. +
  3038. + rxdr->size = rxdr->count * desc_len;
  3039. + rxdr->size = ALIGN(rxdr->size, 4096);
  3040. +
  3041. + rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
  3042. +
  3043. + if (!rxdr->desc) {
  3044. + DPRINTK(PROBE, ERR,
  3045. + "Unable to allocate memory for the receive descriptor ring\n");
  3046. setup_rx_desc_die:
  3047. - vfree(rxdr->buffer_info);
  3048. - kfree(rxdr->ps_page);
  3049. - kfree(rxdr->ps_page_dma);
  3050. - return -ENOMEM;
  3051. - }
  3052. -
  3053. - /* Fix for errata 23, can't cross 64kB boundary */
  3054. - if (!iegbe_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
  3055. - void *olddesc = rxdr->desc;
  3056. - dma_addr_t olddma = rxdr->dma;
  3057. - DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
  3058. - "at %p\n", rxdr->size, rxdr->desc);
  3059. - /* Try again, without freeing the previous */
  3060. - rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
  3061. - /* Failed allocation, critical failure */
  3062. - if (!rxdr->desc) {
  3063. - pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
  3064. - DPRINTK(PROBE, ERR,
  3065. - "Unable to allocate memory "
  3066. - "for the receive descriptor ring\n");
  3067. - goto setup_rx_desc_die;
  3068. - }
  3069. + vfree(rxdr->buffer_info);
  3070. + kfree(rxdr->ps_page);
  3071. + kfree(rxdr->ps_page_dma);
  3072. + return -ENOMEM;
  3073. + }
  3074. +
  3075. + /* Fix for errata 23, can't cross 64kB boundary */
  3076. + if (!iegbe_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
  3077. + void *olddesc = rxdr->desc;
  3078. + dma_addr_t olddma = rxdr->dma;
  3079. + DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
  3080. + "at %p\n", rxdr->size, rxdr->desc);
  3081. + /* Try again, without freeing the previous */
  3082. + rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
  3083. + /* Failed allocation, critical failure */
  3084. + if (!rxdr->desc) {
  3085. + pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
  3086. + DPRINTK(PROBE, ERR,
  3087. + "Unable to allocate memory "
  3088. + "for the receive descriptor ring\n");
  3089. + goto setup_rx_desc_die;
  3090. + }
  3091. - if (!iegbe_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
  3092. - /* give up */
  3093. - pci_free_consistent(pdev, rxdr->size, rxdr->desc,
  3094. - rxdr->dma);
  3095. - pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
  3096. - DPRINTK(PROBE, ERR,
  3097. - "Unable to allocate aligned memory "
  3098. - "for the receive descriptor ring\n");
  3099. - goto setup_rx_desc_die;
  3100. - } else {
  3101. - /* Free old allocation, new allocation was successful */
  3102. - pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
  3103. - }
  3104. - }
  3105. + if (!iegbe_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
  3106. + /* give up */
  3107. + pci_free_consistent(pdev, rxdr->size, rxdr->desc,
  3108. + rxdr->dma);
  3109. + pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
  3110. + DPRINTK(PROBE, ERR,
  3111. + "Unable to allocate aligned memory "
  3112. + "for the receive descriptor ring\n");
  3113. + goto setup_rx_desc_die;
  3114. + } else {
  3115. + /* Free old allocation, new allocation was successful */
  3116. + pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
  3117. + }
  3118. + }
  3119. memset(rxdr->desc, 0, rxdr->size);
  3120. rxdr->next_to_clean = 0;
  3121. @@ -1732,7 +1626,7 @@ setup_rx_desc_die:
  3122. /**
  3123. * iegbe_setup_all_rx_resources - wrapper to allocate Rx resources
  3124. - * (Descriptors) for all queues
  3125. + * (Descriptors) for all queues
  3126. * @adapter: board private structure
  3127. *
  3128. * If this function returns with an error, then it's possible one or
  3129. @@ -1742,21 +1636,23 @@ setup_rx_desc_die:
  3130. * Return 0 on success, negative on failure
  3131. **/
  3132. -int
  3133. -iegbe_setup_all_rx_resources(struct iegbe_adapter *adapter)
  3134. +int iegbe_setup_all_rx_resources(struct iegbe_adapter *adapter)
  3135. {
  3136. int i, err = 0;
  3137. - for (i = 0; i < adapter->num_queues; i++) {
  3138. + for (i = 0; i < adapter->num_rx_queues; i++) {
  3139. err = iegbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
  3140. if (err) {
  3141. DPRINTK(PROBE, ERR,
  3142. "Allocation for Rx Queue %u failed\n", i);
  3143. + for (i-- ; i >= 0; i--)
  3144. + iegbe_free_rx_resources(adapter,
  3145. + &adapter->rx_ring[i]);
  3146. break;
  3147. }
  3148. }
  3149. - return err;
  3150. + return err;
  3151. }
  3152. /**
  3153. @@ -1765,105 +1661,104 @@ iegbe_setup_all_rx_resources(struct iegb
  3154. **/
  3155. #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
  3156. (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
  3157. -static void
  3158. -iegbe_setup_rctl(struct iegbe_adapter *adapter)
  3159. +static void iegbe_setup_rctl(struct iegbe_adapter *adapter)
  3160. {
  3161. - uint32_t rctl, rfctl;
  3162. - uint32_t psrctl = 0;
  3163. -#ifdef CONFIG_E1000_PACKET_SPLIT
  3164. - uint32_t pages = 0;
  3165. -#endif
  3166. -
  3167. - rctl = E1000_READ_REG(&adapter->hw, RCTL);
  3168. -
  3169. - rctl &= ~(0x3 << E1000_RCTL_MO_SHIFT);
  3170. -
  3171. - rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
  3172. - E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
  3173. - (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
  3174. -
  3175. - if(adapter->hw.tbi_compatibility_on == 0x1) {
  3176. - rctl |= E1000_RCTL_SBP;
  3177. - } else {
  3178. - rctl &= ~E1000_RCTL_SBP;
  3179. - }
  3180. - if(adapter->netdev->mtu <= ETH_DATA_LEN) {
  3181. - rctl &= ~E1000_RCTL_LPE;
  3182. - } else {
  3183. - rctl |= E1000_RCTL_LPE;
  3184. - }
  3185. - /* Setup buffer sizes */
  3186. - if(adapter->hw.mac_type >= iegbe_82571) {
  3187. - /* We can now specify buffers in 1K increments.
  3188. - * BSIZE and BSEX are ignored in this case. */
  3189. - rctl |= adapter->rx_buffer_len << 0x11;
  3190. - } else {
  3191. - rctl &= ~E1000_RCTL_SZ_4096;
  3192. - rctl |= E1000_RCTL_BSEX;
  3193. - switch (adapter->rx_buffer_len) {
  3194. - case E1000_RXBUFFER_2048:
  3195. - default:
  3196. - rctl |= E1000_RCTL_SZ_2048;
  3197. + struct iegbe_hw *hw = &adapter->hw;
  3198. + u32 rctl, rfctl;
  3199. + u32 psrctl = 0;
  3200. +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
  3201. + u32 pages = 0;
  3202. +#endif
  3203. +
  3204. + rctl = E1000_READ_REG(&adapter->hw, RCTL);
  3205. +
  3206. + rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
  3207. +
  3208. + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
  3209. + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
  3210. + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
  3211. +
  3212. + if (hw->tbi_compatibility_on == 1)
  3213. + rctl |= E1000_RCTL_SBP;
  3214. + else
  3215. + rctl &= ~E1000_RCTL_SBP;
  3216. +
  3217. + if (adapter->netdev->mtu <= ETH_DATA_LEN)
  3218. + rctl &= ~E1000_RCTL_LPE;
  3219. + else
  3220. + rctl |= E1000_RCTL_LPE;
  3221. +
  3222. + /* Setup buffer sizes */
  3223. + /* We can now specify buffers in 1K increments.
  3224. + * BSIZE and BSEX are ignored in this case. */
  3225. + rctl &= ~E1000_RCTL_SZ_4096;
  3226. + rctl |= E1000_RCTL_BSEX;
  3227. + switch (adapter->rx_buffer_len) {
  3228. + case E1000_RXBUFFER_256:
  3229. + rctl |= E1000_RCTL_SZ_256;
  3230. rctl &= ~E1000_RCTL_BSEX;
  3231. break;
  3232. - case E1000_RXBUFFER_4096:
  3233. - rctl |= E1000_RCTL_SZ_4096;
  3234. - break;
  3235. - case E1000_RXBUFFER_8192:
  3236. - rctl |= E1000_RCTL_SZ_8192;
  3237. - break;
  3238. - case E1000_RXBUFFER_16384:
  3239. - rctl |= E1000_RCTL_SZ_16384;
  3240. - break;
  3241. - }
  3242. - }
  3243. + case E1000_RXBUFFER_2048:
  3244. + default:
  3245. + rctl |= E1000_RCTL_SZ_2048;
  3246. + rctl &= ~E1000_RCTL_BSEX;
  3247. + break;
  3248. + case E1000_RXBUFFER_4096:
  3249. + rctl |= E1000_RCTL_SZ_4096;
  3250. + break;
  3251. + case E1000_RXBUFFER_8192:
  3252. + rctl |= E1000_RCTL_SZ_8192;
  3253. + break;
  3254. + case E1000_RXBUFFER_16384:
  3255. + rctl |= E1000_RCTL_SZ_16384;
  3256. + break;
  3257. + }
  3258. -#ifdef CONFIG_E1000_PACKET_SPLIT
  3259. - /* 82571 and greater support packet-split where the protocol
  3260. - * header is placed in skb->data and the packet data is
  3261. - * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
  3262. - * In the case of a non-split, skb->data is linearly filled,
  3263. - * followed by the page buffers. Therefore, skb->data is
  3264. - * sized to hold the largest protocol header.
  3265. - */
  3266. - pages = PAGE_USE_COUNT(adapter->netdev->mtu);
  3267. - if ((adapter->hw.mac_type > iegbe_82547_rev_2) && (pages <= 0x3) &&
  3268. - PAGE_SIZE <= 0x4000) {
  3269. - adapter->rx_ps_pages = pages;
  3270. - } else {
  3271. +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
  3272. + /* 82571 and greater support packet-split where the protocol
  3273. + * header is placed in skb->data and the packet data is
  3274. + * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
  3275. + * In the case of a non-split, skb->data is linearly filled,
  3276. + * followed by the page buffers. Therefore, skb->data is
  3277. + * sized to hold the largest protocol header.
  3278. + */
  3279. + pages = PAGE_USE_COUNT(adapter->netdev->mtu);
  3280. + if ((hw->mac_type >= iegbe_82571) && (pages <= 3) &&
  3281. + PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
  3282. + adapter->rx_ps_pages = pages;
  3283. + else
  3284. adapter->rx_ps_pages = 0;
  3285. - }
  3286. #endif
  3287. - if (adapter->rx_ps_pages) {
  3288. - /* Configure extra packet-split registers */
  3289. - rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
  3290. - rfctl |= E1000_RFCTL_EXTEN;
  3291. - /* disable IPv6 packet split support */
  3292. - rfctl |= E1000_RFCTL_IPV6_DIS;
  3293. - E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
  3294. -
  3295. - rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
  3296. -
  3297. - psrctl |= adapter->rx_ps_bsize0 >>
  3298. - E1000_PSRCTL_BSIZE0_SHIFT;
  3299. -
  3300. - switch (adapter->rx_ps_pages) {
  3301. - case 0x3:
  3302. - psrctl |= PAGE_SIZE <<
  3303. - E1000_PSRCTL_BSIZE3_SHIFT;
  3304. - case 0x2:
  3305. - psrctl |= PAGE_SIZE <<
  3306. - E1000_PSRCTL_BSIZE2_SHIFT;
  3307. - case 0x1:
  3308. - psrctl |= PAGE_SIZE >>
  3309. - E1000_PSRCTL_BSIZE1_SHIFT;
  3310. - break;
  3311. - }
  3312. + if (adapter->rx_ps_pages) {
  3313. + /* Configure extra packet-split registers */
  3314. + rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
  3315. + rfctl |= E1000_RFCTL_EXTEN;
  3316. + /* disable IPv6 packet split support */
  3317. + rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
  3318. + E1000_RFCTL_NEW_IPV6_EXT_DIS);
  3319. +
  3320. + rctl |= E1000_RCTL_DTYP_PS;
  3321. +
  3322. + psrctl |= adapter->rx_ps_bsize0 >>
  3323. + E1000_PSRCTL_BSIZE0_SHIFT;
  3324. +
  3325. + switch (adapter->rx_ps_pages) {
  3326. + case 3:
  3327. + psrctl |= PAGE_SIZE <<
  3328. + E1000_PSRCTL_BSIZE3_SHIFT;
  3329. + case 2:
  3330. + psrctl |= PAGE_SIZE <<
  3331. + E1000_PSRCTL_BSIZE2_SHIFT;
  3332. + case 1:
  3333. + psrctl |= PAGE_SIZE >>
  3334. + E1000_PSRCTL_BSIZE1_SHIFT;
  3335. + break;
  3336. + }
  3337. - E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
  3338. - }
  3339. + E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
  3340. + }
  3341. - E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  3342. + E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  3343. }
  3344. /**
  3345. @@ -1873,145 +1768,87 @@ iegbe_setup_rctl(struct iegbe_adapter *a
  3346. * Configure the Rx unit of the MAC after a reset.
  3347. **/
  3348. -static void
  3349. -iegbe_configure_rx(struct iegbe_adapter *adapter)
  3350. +static void iegbe_configure_rx(struct iegbe_adapter *adapter)
  3351. {
  3352. - uint64_t rdba;
  3353. - struct iegbe_hw *hw = &adapter->hw;
  3354. - uint32_t rdlen, rctl, rxcsum, ctrl_ext;
  3355. -#ifdef CONFIG_E1000_MQ
  3356. - uint32_t reta, mrqc;
  3357. - int i;
  3358. -#endif
  3359. + u64 rdba;
  3360. + struct iegbe_hw *hw = &adapter->hw;
  3361. + u32 rdlen, rctl, rxcsum, ctrl_ext;
  3362. - if (adapter->rx_ps_pages) {
  3363. + if (adapter->rx_ps_pages) {
  3364. rdlen = adapter->rx_ring[0].count *
  3365. - sizeof(union iegbe_rx_desc_packet_split);
  3366. - adapter->clean_rx = iegbe_clean_rx_irq_ps;
  3367. - adapter->alloc_rx_buf = iegbe_alloc_rx_buffers_ps;
  3368. - } else {
  3369. + sizeof(union iegbe_rx_desc_packet_split);
  3370. + adapter->clean_rx = iegbe_clean_rx_irq_ps;
  3371. + adapter->alloc_rx_buf = iegbe_alloc_rx_buffers_ps;
  3372. + } else {
  3373. rdlen = adapter->rx_ring[0].count *
  3374. - sizeof(struct iegbe_rx_desc);
  3375. - adapter->clean_rx = iegbe_clean_rx_irq;
  3376. - adapter->alloc_rx_buf = iegbe_alloc_rx_buffers;
  3377. - }
  3378. + sizeof(struct iegbe_rx_desc);
  3379. + adapter->clean_rx = iegbe_clean_rx_irq;
  3380. + adapter->alloc_rx_buf = iegbe_alloc_rx_buffers;
  3381. + }
  3382. - /* disable receives while setting up the descriptors */
  3383. - rctl = E1000_READ_REG(hw, RCTL);
  3384. - E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
  3385. + /* disable receives while setting up the descriptors */
  3386. + rctl = E1000_READ_REG(hw, RCTL);
  3387. + E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
  3388. - /* set the Receive Delay Timer Register */
  3389. - E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
  3390. + /* set the Receive Delay Timer Register */
  3391. + E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
  3392. - if (hw->mac_type >= iegbe_82540) {
  3393. - E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
  3394. - if(adapter->itr > 0x1) {
  3395. - E1000_WRITE_REG(hw, ITR,
  3396. - 0x3b9aca00 / (adapter->itr * 0x100));
  3397. + if (hw->mac_type >= iegbe_82540) {
  3398. + E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
  3399. + if (adapter->itr_setting != 0)
  3400. + E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (adapter->itr * 256));
  3401. }
  3402. - }
  3403. - if (hw->mac_type >= iegbe_82571) {
  3404. - /* Reset delay timers after every interrupt */
  3405. - ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
  3406. - ctrl_ext |= E1000_CTRL_EXT_CANC;
  3407. - E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
  3408. - E1000_WRITE_FLUSH(hw);
  3409. - }
  3410. + if (hw->mac_type >= iegbe_82571) {
  3411. + /* Reset delay timers after every interrupt */
  3412. + ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
  3413. + ctrl_ext |= E1000_CTRL_EXT_CANC;
  3414. + E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
  3415. + E1000_WRITE_FLUSH(hw);
  3416. + }
  3417. /* Setup the HW Rx Head and Tail Descriptor Pointers and
  3418. * the Base and Length of the Rx Descriptor Ring */
  3419. - switch (adapter->num_queues) {
  3420. -#ifdef CONFIG_E1000_MQ
  3421. - case 0x2:
  3422. - rdba = adapter->rx_ring[0x1].dma;
  3423. - E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL));
  3424. - E1000_WRITE_REG(hw, RDBAH1, (rdba >> 0x20));
  3425. - E1000_WRITE_REG(hw, RDLEN1, rdlen);
  3426. - E1000_WRITE_REG(hw, RDH1, 0);
  3427. - E1000_WRITE_REG(hw, RDT1, 0);
  3428. - adapter->rx_ring[1].rdh = E1000_RDH1;
  3429. - adapter->rx_ring[1].rdt = E1000_RDT1;
  3430. - /* Fall Through */
  3431. -#endif
  3432. - case 0x1:
  3433. - default:
  3434. + switch (adapter->num_rx_queues) {
  3435. + case 1:
  3436. + default:
  3437. rdba = adapter->rx_ring[0].dma;
  3438. - E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
  3439. + E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
  3440. E1000_WRITE_REG(hw, RDBAH, (rdba >> 0x20));
  3441. - E1000_WRITE_REG(hw, RDLEN, rdlen);
  3442. - E1000_WRITE_REG(hw, RDH, 0);
  3443. - E1000_WRITE_REG(hw, RDT, 0);
  3444. - adapter->rx_ring[0].rdh = E1000_RDH;
  3445. - adapter->rx_ring[0].rdt = E1000_RDT;
  3446. - break;
  3447. - }
  3448. + E1000_WRITE_REG(hw, RDLEN, rdlen);
  3449. + adapter->rx_ring[0].rdh = ((hw->mac_type >= iegbe_82543) ? E1000_RDH : E1000_82542_RDH);
  3450. + adapter->rx_ring[0].rdt = ((hw->mac_type >= iegbe_82543) ? E1000_RDT : E1000_82542_RDT);
  3451. + break;
  3452. + }
  3453. -#ifdef CONFIG_E1000_MQ
  3454. - if (adapter->num_queues > 0x1) {
  3455. - uint32_t random[0xa];
  3456. -
  3457. - get_random_bytes(&random[0], FORTY);
  3458. -
  3459. - if (hw->mac_type <= iegbe_82572) {
  3460. - E1000_WRITE_REG(hw, RSSIR, 0);
  3461. - E1000_WRITE_REG(hw, RSSIM, 0);
  3462. - }
  3463. - switch (adapter->num_queues) {
  3464. - case 0x2:
  3465. - default:
  3466. - reta = 0x00800080;
  3467. - mrqc = E1000_MRQC_ENABLE_RSS_2Q;
  3468. - break;
  3469. - }
  3470. -
  3471. - /* Fill out redirection table */
  3472. - for (i = 0; i < 0x20; i++)
  3473. - E1000_WRITE_REG_ARRAY(hw, RETA, i, reta);
  3474. - /* Fill out hash function seeds */
  3475. - for (i = 0; i < 0xa; i++)
  3476. - E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]);
  3477. -
  3478. - mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
  3479. - E1000_MRQC_RSS_FIELD_IPV4_TCP);
  3480. - E1000_WRITE_REG(hw, MRQC, mrqc);
  3481. - }
  3482. -
  3483. - /* Multiqueue and packet checksumming are mutually exclusive. */
  3484. - if (hw->mac_type >= iegbe_82571) {
  3485. - rxcsum = E1000_READ_REG(hw, RXCSUM);
  3486. - rxcsum |= E1000_RXCSUM_PCSD;
  3487. - E1000_WRITE_REG(hw, RXCSUM, rxcsum);
  3488. - }
  3489. -
  3490. -#else
  3491. + /* Enable 82543 Receive Checksum Offload for TCP and UDP */
  3492. + if (hw->mac_type >= iegbe_82543) {
  3493. + rxcsum = E1000_READ_REG(hw, RXCSUM);
  3494. + if(adapter->rx_csum == TRUE) {
  3495. + rxcsum |= E1000_RXCSUM_TUOFL;
  3496. +
  3497. + /* Enable 82571 IPv4 payload checksum for UDP fragments
  3498. + * Must be used in conjunction with packet-split. */
  3499. + if ((hw->mac_type >= iegbe_82571) &&
  3500. + (adapter->rx_ps_pages)) {
  3501. + rxcsum |= E1000_RXCSUM_IPPCSE;
  3502. + }
  3503. + } else {
  3504. + rxcsum &= ~E1000_RXCSUM_TUOFL;
  3505. + /* don't need to clear IPPCSE as it defaults to 0 */
  3506. + }
  3507. + E1000_WRITE_REG(hw, RXCSUM, rxcsum);
  3508. + }
  3509. - /* Enable 82543 Receive Checksum Offload for TCP and UDP */
  3510. - if (hw->mac_type >= iegbe_82543) {
  3511. - rxcsum = E1000_READ_REG(hw, RXCSUM);
  3512. - if(adapter->rx_csum == TRUE) {
  3513. - rxcsum |= E1000_RXCSUM_TUOFL;
  3514. -
  3515. - /* Enable 82571 IPv4 payload checksum for UDP fragments
  3516. - * Must be used in conjunction with packet-split. */
  3517. - if ((hw->mac_type >= iegbe_82571) &&
  3518. - (adapter->rx_ps_pages)) {
  3519. - rxcsum |= E1000_RXCSUM_IPPCSE;
  3520. - }
  3521. - } else {
  3522. - rxcsum &= ~E1000_RXCSUM_TUOFL;
  3523. - /* don't need to clear IPPCSE as it defaults to 0 */
  3524. - }
  3525. - E1000_WRITE_REG(hw, RXCSUM, rxcsum);
  3526. - }
  3527. -#endif /* CONFIG_E1000_MQ */
  3528. + /* enable early receives on 82573, only takes effect if using > 2048
  3529. + * byte total frame size. for example only for jumbo frames */
  3530. +#define E1000_ERT_2048 0x100
  3531. + if (hw->mac_type == iegbe_82573)
  3532. + E1000_WRITE_REG(&adapter->hw, ERT, E1000_ERT_2048);
  3533. - if (hw->mac_type == iegbe_82573) {
  3534. - E1000_WRITE_REG(hw, ERT, 0x0100);
  3535. - }
  3536. /* Enable Receives */
  3537. - E1000_WRITE_REG(hw, RCTL, rctl);
  3538. + E1000_WRITE_REG(hw, RCTL, rctl);
  3539. }
  3540. /**
  3541. @@ -2022,20 +1859,19 @@ iegbe_configure_rx(struct iegbe_adapter
  3542. * Free all transmit software resources
  3543. **/
  3544. -void
  3545. -iegbe_free_tx_resources(struct iegbe_adapter *adapter,
  3546. +static void iegbe_free_tx_resources(struct iegbe_adapter *adapter,
  3547. struct iegbe_tx_ring *tx_ring)
  3548. {
  3549. - struct pci_dev *pdev = adapter->pdev;
  3550. + struct pci_dev *pdev = adapter->pdev;
  3551. - iegbe_clean_tx_ring(adapter, tx_ring);
  3552. + iegbe_clean_tx_ring(adapter, tx_ring);
  3553. - vfree(tx_ring->buffer_info);
  3554. - tx_ring->buffer_info = NULL;
  3555. + vfree(tx_ring->buffer_info);
  3556. + tx_ring->buffer_info = NULL;
  3557. - pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
  3558. + pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
  3559. - tx_ring->desc = NULL;
  3560. + tx_ring->desc = NULL;
  3561. }
  3562. /**
  3563. @@ -2048,85 +1884,29 @@ iegbe_free_tx_resources(struct iegbe_ada
  3564. void
  3565. iegbe_free_all_tx_resources(struct iegbe_adapter *adapter)
  3566. {
  3567. - int i;
  3568. + int i;
  3569. - for (i = 0; i < adapter->num_queues; i++)
  3570. + for (i = 0x0; i < adapter->num_tx_queues; i++)
  3571. iegbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
  3572. }
  3573. static inline void
  3574. iegbe_unmap_and_free_tx_resource(struct iegbe_adapter *adapter,
  3575. - struct iegbe_buffer *buffer_info)
  3576. -{
  3577. - if(buffer_info->dma) {
  3578. - pci_unmap_page(adapter->pdev,
  3579. - buffer_info->dma,
  3580. - buffer_info->length,
  3581. - PCI_DMA_TODEVICE);
  3582. - buffer_info->dma = 0;
  3583. - }
  3584. - if(buffer_info->skb) {
  3585. - dev_kfree_skb_any(buffer_info->skb);
  3586. - buffer_info->skb = NULL;
  3587. - }
  3588. -}
  3589. -
  3590. -#ifdef IEGBE_GBE_WORKAROUND
  3591. -/**
  3592. - * iegbe_clean_tx_ring_partial - Free Tx Buffers without using the DD
  3593. - * bit in the descriptor
  3594. - * @adapter: board private structure
  3595. - * @tx_ring: ring to be cleaned
  3596. - **/
  3597. -static void iegbe_clean_tx_ring_partial(struct iegbe_adapter *adapter,
  3598. - struct iegbe_tx_ring *tx_ring)
  3599. + struct iegbe_buffer *buffer_info)
  3600. {
  3601. - struct iegbe_buffer *buffer_info;
  3602. - struct iegbe_tx_desc *tx_desc;
  3603. - struct net_device *netdev = adapter->netdev;
  3604. - unsigned int i;
  3605. - unsigned tail;
  3606. - unsigned head;
  3607. - int cleaned = FALSE;
  3608. -
  3609. - tail = readl(adapter->hw.hw_addr + tx_ring->tdt);
  3610. - head = readl(adapter->hw.hw_addr + tx_ring->tdh);
  3611. -
  3612. - if (head != tail) {
  3613. - adapter->stats.tx_hnet++;
  3614. - }
  3615. - if (head != tx_ring->next_to_use) {
  3616. - adapter->stats.tx_hnentu++;
  3617. - }
  3618. - /* Free all the Tx ring sk_buffs from next_to_clean up until
  3619. - * the current head pointer
  3620. - */
  3621. - i = tx_ring->next_to_clean;
  3622. - while(i != head) {
  3623. - cleaned = TRUE;
  3624. - tx_desc = E1000_TX_DESC(*tx_ring, i);
  3625. -
  3626. - buffer_info = &tx_ring->buffer_info[i];
  3627. - iegbe_unmap_and_free_tx_resource(adapter, buffer_info);
  3628. -
  3629. - tx_desc->upper.data = 0;
  3630. -
  3631. - if (unlikely(++i == tx_ring->count)) { i = 0; }
  3632. -
  3633. - }
  3634. - tx_ring->next_to_clean = head;
  3635. -
  3636. - spin_lock(&tx_ring->tx_lock);
  3637. -
  3638. - /* Wake up the queue if it's currently stopped */
  3639. - if (unlikely(cleaned && netif_queue_stopped(netdev) &&
  3640. - netif_carrier_ok(netdev))) {
  3641. - netif_wake_queue(netdev);
  3642. + if(buffer_info->dma) {
  3643. + pci_unmap_page(adapter->pdev,
  3644. + buffer_info->dma,
  3645. + buffer_info->length,
  3646. + PCI_DMA_TODEVICE);
  3647. + buffer_info->dma = 0x0;
  3648. + }
  3649. + if(buffer_info->skb) {
  3650. + dev_kfree_skb_any(buffer_info->skb);
  3651. + buffer_info->skb = NULL;
  3652. }
  3653. -
  3654. - spin_unlock(&tx_ring->tx_lock);
  3655. }
  3656. -#endif
  3657. +
  3658. /**
  3659. * iegbe_clean_tx_ring - Free Tx Buffers
  3660. @@ -2134,38 +1914,34 @@ static void iegbe_clean_tx_ring_partial(
  3661. * @tx_ring: ring to be cleaned
  3662. **/
  3663. -static void
  3664. -iegbe_clean_tx_ring(struct iegbe_adapter *adapter,
  3665. +static void iegbe_clean_tx_ring(struct iegbe_adapter *adapter,
  3666. struct iegbe_tx_ring *tx_ring)
  3667. {
  3668. - struct iegbe_buffer *buffer_info;
  3669. - unsigned long size;
  3670. - unsigned int i;
  3671. -
  3672. - /* Free all the Tx ring sk_buffs */
  3673. + struct iegbe_hw *hw = &adapter->hw;
  3674. + struct iegbe_buffer *buffer_info;
  3675. + unsigned long size;
  3676. + unsigned int i;
  3677. - if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
  3678. - iegbe_unmap_and_free_tx_resource(adapter,
  3679. - &tx_ring->previous_buffer_info);
  3680. - }
  3681. + /* Free all the Tx ring sk_buffs */
  3682. for (i = 0; i < tx_ring->count; i++) {
  3683. - buffer_info = &tx_ring->buffer_info[i];
  3684. - iegbe_unmap_and_free_tx_resource(adapter, buffer_info);
  3685. - }
  3686. + buffer_info = &tx_ring->buffer_info[i];
  3687. + iegbe_unmap_and_free_tx_resource(adapter, buffer_info);
  3688. + }
  3689. - size = sizeof(struct iegbe_buffer) * tx_ring->count;
  3690. + size = sizeof(struct iegbe_buffer) * tx_ring->count;
  3691. memset(tx_ring->buffer_info, 0, size);
  3692. - /* Zero out the descriptor ring */
  3693. + /* Zero out the descriptor ring */
  3694. memset(tx_ring->desc, 0, tx_ring->size);
  3695. tx_ring->next_to_use = 0;
  3696. tx_ring->next_to_clean = 0;
  3697. + tx_ring->last_tx_tso = 0;
  3698. - writel(0, adapter->hw.hw_addr + tx_ring->tdh);
  3699. - writel(0, adapter->hw.hw_addr + tx_ring->tdt);
  3700. + writel(0, hw->hw_addr + tx_ring->tdh);
  3701. + writel(0, hw->hw_addr + tx_ring->tdt);
  3702. }
  3703. /**
  3704. @@ -2173,12 +1949,11 @@ iegbe_clean_tx_ring(struct iegbe_adapter
  3705. * @adapter: board private structure
  3706. **/
  3707. -static void
  3708. -iegbe_clean_all_tx_rings(struct iegbe_adapter *adapter)
  3709. +static void iegbe_clean_all_tx_rings(struct iegbe_adapter *adapter)
  3710. {
  3711. - int i;
  3712. + int i;
  3713. - for (i = 0; i < adapter->num_queues; i++)
  3714. + for (i = 0; i < adapter->num_tx_queues; i++)
  3715. iegbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
  3716. }
  3717. @@ -2190,24 +1965,23 @@ iegbe_clean_all_tx_rings(struct iegbe_ad
  3718. * Free all receive software resources
  3719. **/
  3720. -void
  3721. -iegbe_free_rx_resources(struct iegbe_adapter *adapter,
  3722. +static void iegbe_free_rx_resources(struct iegbe_adapter *adapter,
  3723. struct iegbe_rx_ring *rx_ring)
  3724. {
  3725. - struct pci_dev *pdev = adapter->pdev;
  3726. + struct pci_dev *pdev = adapter->pdev;
  3727. - iegbe_clean_rx_ring(adapter, rx_ring);
  3728. + iegbe_clean_rx_ring(adapter, rx_ring);
  3729. - vfree(rx_ring->buffer_info);
  3730. - rx_ring->buffer_info = NULL;
  3731. - kfree(rx_ring->ps_page);
  3732. - rx_ring->ps_page = NULL;
  3733. - kfree(rx_ring->ps_page_dma);
  3734. - rx_ring->ps_page_dma = NULL;
  3735. + vfree(rx_ring->buffer_info);
  3736. + rx_ring->buffer_info = NULL;
  3737. + kfree(rx_ring->ps_page);
  3738. + rx_ring->ps_page = NULL;
  3739. + kfree(rx_ring->ps_page_dma);
  3740. + rx_ring->ps_page_dma = NULL;
  3741. - pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
  3742. + pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
  3743. - rx_ring->desc = NULL;
  3744. + rx_ring->desc = NULL;
  3745. }
  3746. /**
  3747. @@ -2217,12 +1991,11 @@ iegbe_free_rx_resources(struct iegbe_ada
  3748. * Free all receive software resources
  3749. **/
  3750. -void
  3751. -iegbe_free_all_rx_resources(struct iegbe_adapter *adapter)
  3752. +void iegbe_free_all_rx_resources(struct iegbe_adapter *adapter)
  3753. {
  3754. - int i;
  3755. + int i;
  3756. - for (i = 0; i < adapter->num_queues; i++)
  3757. + for (i = 0; i < adapter->num_rx_queues; i++)
  3758. iegbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
  3759. }
  3760. @@ -2232,60 +2005,59 @@ iegbe_free_all_rx_resources(struct iegbe
  3761. * @rx_ring: ring to free buffers from
  3762. **/
  3763. -static void
  3764. -iegbe_clean_rx_ring(struct iegbe_adapter *adapter,
  3765. +static void iegbe_clean_rx_ring(struct iegbe_adapter *adapter,
  3766. struct iegbe_rx_ring *rx_ring)
  3767. {
  3768. - struct iegbe_buffer *buffer_info;
  3769. - struct iegbe_ps_page *ps_page;
  3770. - struct iegbe_ps_page_dma *ps_page_dma;
  3771. - struct pci_dev *pdev = adapter->pdev;
  3772. - unsigned long size;
  3773. - unsigned int i, j;
  3774. -
  3775. - /* Free all the Rx ring sk_buffs */
  3776. + struct iegbe_hw *hw = &adapter->hw;
  3777. + struct iegbe_buffer *buffer_info;
  3778. + struct iegbe_ps_page *ps_page;
  3779. + struct iegbe_ps_page_dma *ps_page_dma;
  3780. + struct pci_dev *pdev = adapter->pdev;
  3781. + unsigned long size;
  3782. + unsigned int i, j;
  3783. +
  3784. + /* Free all the Rx ring sk_buffs */
  3785. +
  3786. + for (i = 0; i < rx_ring->count; i++) {
  3787. + buffer_info = &rx_ring->buffer_info[i];
  3788. + if(buffer_info->skb) {
  3789. + pci_unmap_single(pdev,
  3790. + buffer_info->dma,
  3791. + buffer_info->length,
  3792. + PCI_DMA_FROMDEVICE);
  3793. - for(i = 0; i < rx_ring->count; i++) {
  3794. - buffer_info = &rx_ring->buffer_info[i];
  3795. - if(buffer_info->skb) {
  3796. - ps_page = &rx_ring->ps_page[i];
  3797. - ps_page_dma = &rx_ring->ps_page_dma[i];
  3798. - pci_unmap_single(pdev,
  3799. - buffer_info->dma,
  3800. - buffer_info->length,
  3801. - PCI_DMA_FROMDEVICE);
  3802. -
  3803. - dev_kfree_skb(buffer_info->skb);
  3804. - buffer_info->skb = NULL;
  3805. -
  3806. - for(j = 0; j < adapter->rx_ps_pages; j++) {
  3807. - if(!ps_page->ps_page[j]) { break; }
  3808. - pci_unmap_single(pdev,
  3809. - ps_page_dma->ps_page_dma[j],
  3810. - PAGE_SIZE, PCI_DMA_FROMDEVICE);
  3811. - ps_page_dma->ps_page_dma[j] = 0;
  3812. - put_page(ps_page->ps_page[j]);
  3813. - ps_page->ps_page[j] = NULL;
  3814. - }
  3815. + dev_kfree_skb(buffer_info->skb);
  3816. + buffer_info->skb = NULL;
  3817. }
  3818. - }
  3819. + ps_page = &rx_ring->ps_page[i];
  3820. + ps_page_dma = &rx_ring->ps_page_dma[i];
  3821. + for (j = 0; j < adapter->rx_ps_pages; j++) {
  3822. + if (!ps_page->ps_page[j]) break;
  3823. + pci_unmap_page(pdev,
  3824. + ps_page_dma->ps_page_dma[j],
  3825. + PAGE_SIZE, PCI_DMA_FROMDEVICE);
  3826. + ps_page_dma->ps_page_dma[j] = 0;
  3827. + put_page(ps_page->ps_page[j]);
  3828. + ps_page->ps_page[j] = NULL;
  3829. + }
  3830. + }
  3831. - size = sizeof(struct iegbe_buffer) * rx_ring->count;
  3832. + size = sizeof(struct iegbe_buffer) * rx_ring->count;
  3833. memset(rx_ring->buffer_info, 0, size);
  3834. - size = sizeof(struct iegbe_ps_page) * rx_ring->count;
  3835. + size = sizeof(struct iegbe_ps_page) * rx_ring->count;
  3836. memset(rx_ring->ps_page, 0, size);
  3837. - size = sizeof(struct iegbe_ps_page_dma) * rx_ring->count;
  3838. + size = sizeof(struct iegbe_ps_page_dma) * rx_ring->count;
  3839. memset(rx_ring->ps_page_dma, 0, size);
  3840. - /* Zero out the descriptor ring */
  3841. + /* Zero out the descriptor ring */
  3842. memset(rx_ring->desc, 0, rx_ring->size);
  3843. rx_ring->next_to_clean = 0;
  3844. rx_ring->next_to_use = 0;
  3845. - writel(0, adapter->hw.hw_addr + rx_ring->rdh);
  3846. - writel(0, adapter->hw.hw_addr + rx_ring->rdt);
  3847. + writel(0, hw->hw_addr + rx_ring->rdh);
  3848. + writel(0, hw->hw_addr + rx_ring->rdt);
  3849. }
  3850. /**
  3851. @@ -2293,60 +2065,54 @@ iegbe_clean_rx_ring(struct iegbe_adapter
  3852. * @adapter: board private structure
  3853. **/
  3854. -static void
  3855. -iegbe_clean_all_rx_rings(struct iegbe_adapter *adapter)
  3856. +static void iegbe_clean_all_rx_rings(struct iegbe_adapter *adapter)
  3857. {
  3858. - int i;
  3859. + int i;
  3860. - for (i = 0; i < adapter->num_queues; i++)
  3861. + for (i = 0; i < adapter->num_rx_queues; i++)
  3862. iegbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
  3863. }
  3864. /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
  3865. * and memory write and invalidate disabled for certain operations
  3866. */
  3867. -static void
  3868. -iegbe_enter_82542_rst(struct iegbe_adapter *adapter)
  3869. +static void iegbe_enter_82542_rst(struct iegbe_adapter *adapter)
  3870. {
  3871. - struct net_device *netdev = adapter->netdev;
  3872. - uint32_t rctl;
  3873. + struct net_device *netdev = adapter->netdev;
  3874. + uint32_t rctl;
  3875. - iegbe_pci_clear_mwi(&adapter->hw);
  3876. + iegbe_pci_clear_mwi(&adapter->hw);
  3877. - rctl = E1000_READ_REG(&adapter->hw, RCTL);
  3878. - rctl |= E1000_RCTL_RST;
  3879. - E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  3880. - E1000_WRITE_FLUSH(&adapter->hw);
  3881. + rctl = E1000_READ_REG(&adapter->hw, RCTL);
  3882. + rctl |= E1000_RCTL_RST;
  3883. + E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  3884. + E1000_WRITE_FLUSH(&adapter->hw);
  3885. mdelay(0x5);
  3886. if(netif_running(netdev)) {
  3887. - iegbe_clean_all_rx_rings(adapter);
  3888. -}
  3889. + iegbe_clean_all_rx_rings(adapter);
  3890. + }
  3891. }
  3892. static void
  3893. iegbe_leave_82542_rst(struct iegbe_adapter *adapter)
  3894. {
  3895. - struct net_device *netdev = adapter->netdev;
  3896. - uint32_t rctl;
  3897. + struct net_device *netdev = adapter->netdev;
  3898. + uint32_t rctl;
  3899. - rctl = E1000_READ_REG(&adapter->hw, RCTL);
  3900. - rctl &= ~E1000_RCTL_RST;
  3901. - E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  3902. - E1000_WRITE_FLUSH(&adapter->hw);
  3903. + rctl = E1000_READ_REG(&adapter->hw, RCTL);
  3904. + rctl &= ~E1000_RCTL_RST;
  3905. + E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  3906. + E1000_WRITE_FLUSH(&adapter->hw);
  3907. mdelay(0x5);
  3908. if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) {
  3909. - iegbe_pci_set_mwi(&adapter->hw);
  3910. + iegbe_pci_set_mwi(&adapter->hw);
  3911. }
  3912. if(netif_running(netdev)) {
  3913. + struct iegbe_rx_ring *ring = &adapter->rx_ring[0x0];
  3914. iegbe_configure_rx(adapter);
  3915. -#ifdef IEGBE_GBE_WORKAROUND
  3916. - iegbe_alloc_rx_buffers(adapter, &adapter->rx_ring[0],
  3917. - IEGBE_GBE_WORKAROUND_NUM_RX_DESCRIPTORS + 1);
  3918. -#else
  3919. - iegbe_alloc_rx_buffers(adapter, &adapter->rx_ring[0]);
  3920. -#endif
  3921. + adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
  3922. }
  3923. }
  3924. @@ -2358,133 +2124,153 @@ iegbe_leave_82542_rst(struct iegbe_adapt
  3925. * Returns 0 on success, negative on failure
  3926. **/
  3927. -static int
  3928. -iegbe_set_mac(struct net_device *netdev, void *p)
  3929. +static int iegbe_set_mac(struct net_device *netdev, void *p)
  3930. {
  3931. - struct iegbe_adapter *adapter = netdev_priv(netdev);
  3932. - struct sockaddr *addr = p;
  3933. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  3934. + struct sockaddr *addr = p;
  3935. if(!is_valid_ether_addr(addr->sa_data)) {
  3936. - return -EADDRNOTAVAIL;
  3937. + return -EADDRNOTAVAIL;
  3938. }
  3939. - /* 82542 2.0 needs to be in reset to write receive address registers */
  3940. + /* 82542 2.0 needs to be in reset to write receive address registers */
  3941. if(adapter->hw.mac_type == iegbe_82542_rev2_0) {
  3942. - iegbe_enter_82542_rst(adapter);
  3943. + iegbe_enter_82542_rst(adapter);
  3944. }
  3945. - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  3946. - memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
  3947. + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  3948. + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
  3949. - iegbe_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
  3950. + iegbe_rar_set(&adapter->hw, adapter->hw.mac_addr, 0x0);
  3951. - /* With 82571 controllers, LAA may be overwritten (with the default)
  3952. - * due to controller reset from the other port. */
  3953. - if (adapter->hw.mac_type == iegbe_82571) {
  3954. - /* activate the work around */
  3955. + /* With 82571 controllers, LAA may be overwritten (with the default)
  3956. + * due to controller reset from the other port. */
  3957. + if (adapter->hw.mac_type == iegbe_82571) {
  3958. + /* activate the work around */
  3959. adapter->hw.laa_is_present = 0x1;
  3960. - /* Hold a copy of the LAA in RAR[14] This is done so that
  3961. - * between the time RAR[0] gets clobbered and the time it
  3962. - * gets fixed (in iegbe_watchdog), the actual LAA is in one
  3963. - * of the RARs and no incoming packets directed to this port
  3964. - * are dropped. Eventaully the LAA will be in RAR[0] and
  3965. - * RAR[14] */
  3966. - iegbe_rar_set(&adapter->hw, adapter->hw.mac_addr,
  3967. + /* Hold a copy of the LAA in RAR[14] This is done so that
  3968. + * between the time RAR[0] gets clobbered and the time it
  3969. + * gets fixed (in iegbe_watchdog), the actual LAA is in one
  3970. + * of the RARs and no incoming packets directed to this port
  3971. + * are dropped. Eventaully the LAA will be in RAR[0] and
  3972. + * RAR[14] */
  3973. + iegbe_rar_set(&adapter->hw, adapter->hw.mac_addr,
  3974. E1000_RAR_ENTRIES - 0x1);
  3975. - }
  3976. + }
  3977. if(adapter->hw.mac_type == iegbe_82542_rev2_0) {
  3978. - iegbe_leave_82542_rst(adapter);
  3979. + iegbe_leave_82542_rst(adapter);
  3980. }
  3981. - return 0;
  3982. + return 0x0;
  3983. }
  3984. /**
  3985. - * iegbe_set_multi - Multicast and Promiscuous mode set
  3986. + * iegbe_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
  3987. * @netdev: network interface device structure
  3988. *
  3989. - * The set_multi entry point is called whenever the multicast address
  3990. - * list or the network interface flags are updated. This routine is
  3991. - * responsible for configuring the hardware for proper multicast,
  3992. + * The set_rx_mode entry point is called whenever the unicast or multicast
  3993. + * address lists or the network interface flags are updated. This routine is
  3994. + * responsible for configuring the hardware for proper unicast, multicast,
  3995. * promiscuous mode, and all-multi behavior.
  3996. **/
  3997. -static void
  3998. -iegbe_set_multi(struct net_device *netdev)
  3999. +static void iegbe_set_rx_mode(struct net_device *netdev)
  4000. {
  4001. struct iegbe_adapter *adapter = netdev_priv(netdev);
  4002. struct iegbe_hw *hw = &adapter->hw;
  4003. - struct dev_mc_list *mc_ptr;
  4004. - uint32_t rctl;
  4005. - uint32_t hash_value;
  4006. + struct dev_addr_list *uc_ptr;
  4007. + struct dev_addr_list *mc_ptr;
  4008. + u32 rctl;
  4009. + u32 hash_value;
  4010. int i, rar_entries = E1000_RAR_ENTRIES;
  4011. +int mta_reg_count = E1000_NUM_MTA_REGISTERS;
  4012. /* reserve RAR[14] for LAA over-write work-around */
  4013. - if (adapter->hw.mac_type == iegbe_82571) {
  4014. + if (hw->mac_type == iegbe_82571)
  4015. rar_entries--;
  4016. - }
  4017. +
  4018. /* Check for Promiscuous and All Multicast modes */
  4019. - rctl = E1000_READ_REG(hw, RCTL);
  4020. + rctl = E1000_READ_REG(&adapter->hw, RCTL);
  4021. if (netdev->flags & IFF_PROMISC) {
  4022. rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
  4023. - } else if (netdev->flags & IFF_ALLMULTI) {
  4024. - rctl |= E1000_RCTL_MPE;
  4025. - rctl &= ~E1000_RCTL_UPE;
  4026. + rctl &= ~E1000_RCTL_VFE;
  4027. } else {
  4028. - rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
  4029. + if (netdev->flags & IFF_ALLMULTI) {
  4030. + rctl |= E1000_RCTL_MPE;
  4031. + } else {
  4032. + rctl &= ~E1000_RCTL_MPE;
  4033. + }
  4034. + }
  4035. +
  4036. + uc_ptr = NULL;
  4037. + if (netdev->uc_count > rar_entries - 1) {
  4038. + rctl |= E1000_RCTL_UPE;
  4039. + } else if (!(netdev->flags & IFF_PROMISC)) {
  4040. + rctl &= ~E1000_RCTL_UPE;
  4041. + uc_ptr = netdev->uc_list;
  4042. }
  4043. - E1000_WRITE_REG(hw, RCTL, rctl);
  4044. + E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  4045. /* 82542 2.0 needs to be in reset to write receive address registers */
  4046. - if (hw->mac_type == iegbe_82542_rev2_0) {
  4047. + if (hw->mac_type == iegbe_82542_rev2_0)
  4048. iegbe_enter_82542_rst(adapter);
  4049. - }
  4050. - /* load the first 14 multicast address into the exact filters 1-14
  4051. +
  4052. + /* load the first 14 addresses into the exact filters 1-14. Unicast
  4053. + * addresses take precedence to avoid disabling unicast filtering
  4054. + * when possible.
  4055. + *
  4056. * RAR 0 is used for the station MAC adddress
  4057. * if there are not 14 addresses, go ahead and clear the filters
  4058. * -- with 82571 controllers only 0-13 entries are filled here
  4059. */
  4060. mc_ptr = netdev->mc_list;
  4061. - for (i = 0x1; i < rar_entries; i++) {
  4062. - if (mc_ptr) {
  4063. - iegbe_rar_set(hw, mc_ptr->dmi_addr, i);
  4064. + for (i = 1; i < rar_entries; i++) {
  4065. + if (uc_ptr) {
  4066. + iegbe_rar_set(hw, uc_ptr->da_addr, i);
  4067. + uc_ptr = uc_ptr->next;
  4068. + } else if (mc_ptr) {
  4069. + iegbe_rar_set(hw, mc_ptr->da_addr, i);
  4070. mc_ptr = mc_ptr->next;
  4071. } else {
  4072. - E1000_WRITE_REG_ARRAY(hw, RA, i << 0x1, 0);
  4073. - E1000_WRITE_REG_ARRAY(hw, RA, (i << 0x1) + 0x1, 0);
  4074. + E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
  4075. + E1000_WRITE_FLUSH(&adapter->hw);
  4076. + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
  4077. + E1000_WRITE_FLUSH(&adapter->hw);
  4078. }
  4079. }
  4080. + WARN_ON(uc_ptr != NULL);
  4081. /* clear the old settings from the multicast hash table */
  4082. - for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
  4083. + for (i = 0; i < mta_reg_count; i++) {
  4084. E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
  4085. + E1000_WRITE_FLUSH(&adapter->hw);
  4086. + }
  4087. /* load any remaining addresses into the hash table */
  4088. for (; mc_ptr; mc_ptr = mc_ptr->next) {
  4089. - hash_value = iegbe_hash_mc_addr(hw, mc_ptr->dmi_addr);
  4090. + hash_value = iegbe_hash_mc_addr(hw, mc_ptr->da_addr);
  4091. iegbe_mta_set(hw, hash_value);
  4092. }
  4093. - if (hw->mac_type == iegbe_82542_rev2_0) {
  4094. + if (hw->mac_type == iegbe_82542_rev2_0)
  4095. iegbe_leave_82542_rst(adapter);
  4096. }
  4097. -}
  4098. /* Need to wait a few seconds after link up to get diagnostic information from
  4099. * the phy */
  4100. -static void
  4101. -iegbe_update_phy_info(unsigned long data)
  4102. +static void iegbe_update_phy_info(unsigned long data)
  4103. {
  4104. - struct iegbe_adapter *adapter = (struct iegbe_adapter *) data;
  4105. - iegbe_phy_get_info(&adapter->hw, &adapter->phy_info);
  4106. + struct iegbe_adapter *adapter = (struct iegbe_adapter *) data;
  4107. + struct iegbe_hw *hw = &adapter->hw;
  4108. + iegbe_phy_get_info(hw, &adapter->phy_info);
  4109. }
  4110. /**
  4111. @@ -2492,54 +2278,54 @@ iegbe_update_phy_info(unsigned long data
  4112. * @data: pointer to adapter cast into an unsigned long
  4113. **/
  4114. -static void
  4115. -iegbe_82547_tx_fifo_stall(unsigned long data)
  4116. +static void iegbe_82547_tx_fifo_stall(unsigned long data)
  4117. {
  4118. - struct iegbe_adapter *adapter = (struct iegbe_adapter *) data;
  4119. - struct net_device *netdev = adapter->netdev;
  4120. - uint32_t tctl;
  4121. + struct iegbe_adapter *adapter = (struct iegbe_adapter *) data;
  4122. + struct net_device *netdev = adapter->netdev;
  4123. + u32 tctl;
  4124. - if(atomic_read(&adapter->tx_fifo_stall)) {
  4125. - if((E1000_READ_REG(&adapter->hw, TDT) ==
  4126. - E1000_READ_REG(&adapter->hw, TDH)) &&
  4127. - (E1000_READ_REG(&adapter->hw, TDFT) ==
  4128. - E1000_READ_REG(&adapter->hw, TDFH)) &&
  4129. - (E1000_READ_REG(&adapter->hw, TDFTS) ==
  4130. - E1000_READ_REG(&adapter->hw, TDFHS))) {
  4131. - tctl = E1000_READ_REG(&adapter->hw, TCTL);
  4132. - E1000_WRITE_REG(&adapter->hw, TCTL,
  4133. - tctl & ~E1000_TCTL_EN);
  4134. - E1000_WRITE_REG(&adapter->hw, TDFT,
  4135. - adapter->tx_head_addr);
  4136. - E1000_WRITE_REG(&adapter->hw, TDFH,
  4137. - adapter->tx_head_addr);
  4138. - E1000_WRITE_REG(&adapter->hw, TDFTS,
  4139. - adapter->tx_head_addr);
  4140. - E1000_WRITE_REG(&adapter->hw, TDFHS,
  4141. - adapter->tx_head_addr);
  4142. - E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
  4143. - E1000_WRITE_FLUSH(&adapter->hw);
  4144. -
  4145. - adapter->tx_fifo_head = 0;
  4146. - atomic_set(&adapter->tx_fifo_stall, 0);
  4147. - netif_wake_queue(netdev);
  4148. - } else {
  4149. + if(atomic_read(&adapter->tx_fifo_stall)) {
  4150. + if((E1000_READ_REG(&adapter->hw, TDT) ==
  4151. + E1000_READ_REG(&adapter->hw, TDH)) &&
  4152. + (E1000_READ_REG(&adapter->hw, TDFT) ==
  4153. + E1000_READ_REG(&adapter->hw, TDFH)) &&
  4154. + (E1000_READ_REG(&adapter->hw, TDFTS) ==
  4155. + E1000_READ_REG(&adapter->hw, TDFHS))) {
  4156. + tctl = E1000_READ_REG(&adapter->hw, TCTL);
  4157. + E1000_WRITE_REG(&adapter->hw, TCTL,
  4158. + tctl & ~E1000_TCTL_EN);
  4159. + E1000_WRITE_REG(&adapter->hw, TDFT,
  4160. + adapter->tx_head_addr);
  4161. + E1000_WRITE_REG(&adapter->hw, TDFH,
  4162. + adapter->tx_head_addr);
  4163. + E1000_WRITE_REG(&adapter->hw, TDFTS,
  4164. + adapter->tx_head_addr);
  4165. + E1000_WRITE_REG(&adapter->hw, TDFHS,
  4166. + adapter->tx_head_addr);
  4167. + E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
  4168. + E1000_WRITE_FLUSH(&adapter->hw);
  4169. +
  4170. + adapter->tx_fifo_head = 0x0;
  4171. + atomic_set(&adapter->tx_fifo_stall, 0x0);
  4172. + netif_wake_queue(netdev);
  4173. + } else {
  4174. mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 0x1);
  4175. - }
  4176. - }
  4177. + }
  4178. + }
  4179. }
  4180. +
  4181. /**
  4182. * iegbe_watchdog - Timer Call-back
  4183. * @data: pointer to adapter cast into an unsigned long
  4184. **/
  4185. -static void
  4186. -iegbe_watchdog(unsigned long data)
  4187. +static void iegbe_watchdog(unsigned long data)
  4188. {
  4189. - struct iegbe_adapter *adapter = (struct iegbe_adapter *) data;
  4190. - struct net_device *netdev = adapter->netdev;
  4191. - struct iegbe_tx_ring *txdr = &adapter->tx_ring[0];
  4192. - uint32_t link;
  4193. + struct iegbe_adapter *adapter = (struct iegbe_adapter *) data;
  4194. + struct iegbe_hw *hw = &adapter->hw;
  4195. + struct net_device *netdev = adapter->netdev;
  4196. + struct iegbe_tx_ring *txdr = adapter->tx_ring;
  4197. + u32 link, tctl;
  4198. /*
  4199. * Test the PHY for link status on icp_xxxx MACs.
  4200. @@ -2547,123 +2333,305 @@ iegbe_watchdog(unsigned long data)
  4201. * in the adapter->hw structure, then set hw->get_link_status = 1
  4202. */
  4203. if(adapter->hw.mac_type == iegbe_icp_xxxx) {
  4204. - int isUp = 0;
  4205. + int isUp = 0x0;
  4206. int32_t ret_val;
  4207. ret_val = iegbe_oem_phy_is_link_up(&adapter->hw, &isUp);
  4208. if(ret_val != E1000_SUCCESS) {
  4209. - isUp = 0;
  4210. - }
  4211. + isUp = 0x0;
  4212. + }
  4213. if(isUp != adapter->hw.icp_xxxx_is_link_up) {
  4214. adapter->hw.get_link_status = 0x1;
  4215. }
  4216. }
  4217. - iegbe_check_for_link(&adapter->hw);
  4218. - if (adapter->hw.mac_type == iegbe_82573) {
  4219. - iegbe_enable_tx_pkt_filtering(&adapter->hw);
  4220. + iegbe_check_for_link(&adapter->hw);
  4221. + if (adapter->hw.mac_type == iegbe_82573) {
  4222. + iegbe_enable_tx_pkt_filtering(&adapter->hw);
  4223. #ifdef NETIF_F_HW_VLAN_TX
  4224. if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) {
  4225. - iegbe_update_mng_vlan(adapter);
  4226. + iegbe_update_mng_vlan(adapter);
  4227. }
  4228. #endif
  4229. - }
  4230. + }
  4231. - if ((adapter->hw.media_type == iegbe_media_type_internal_serdes) &&
  4232. - !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) {
  4233. - link = !adapter->hw.serdes_link_down;
  4234. - } else {
  4235. + if ((adapter->hw.media_type == iegbe_media_type_internal_serdes) &&
  4236. + !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) {
  4237. + link = !adapter->hw.serdes_link_down;
  4238. + } else {
  4239. - if(adapter->hw.mac_type != iegbe_icp_xxxx) {
  4240. - link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
  4241. - } else {
  4242. - int isUp = 0;
  4243. + if(adapter->hw.mac_type != iegbe_icp_xxxx) {
  4244. + link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
  4245. + } else {
  4246. + int isUp = 0x0;
  4247. if(iegbe_oem_phy_is_link_up(&adapter->hw, &isUp) != E1000_SUCCESS) {
  4248. - isUp = 0;
  4249. + isUp = 0x0;
  4250. }
  4251. - link = isUp;
  4252. - }
  4253. - }
  4254. + link = isUp;
  4255. + }
  4256. + }
  4257. - if (link) {
  4258. - if (!netif_carrier_ok(netdev)) {
  4259. - iegbe_get_speed_and_duplex(&adapter->hw,
  4260. - &adapter->link_speed,
  4261. - &adapter->link_duplex);
  4262. -
  4263. - DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
  4264. - adapter->link_speed,
  4265. - adapter->link_duplex == FULL_DUPLEX ?
  4266. - "Full Duplex" : "Half Duplex");
  4267. + if (link) {
  4268. + if (!netif_carrier_ok(netdev)) {
  4269. + u32 ctrl;
  4270. + bool txb2b = true;
  4271. + iegbe_get_speed_and_duplex(hw,
  4272. + &adapter->link_speed,
  4273. + &adapter->link_duplex);
  4274. - netif_carrier_on(netdev);
  4275. - netif_wake_queue(netdev);
  4276. - mod_timer(&adapter->phy_info_timer, jiffies + 0x2 * HZ);
  4277. + ctrl = E1000_READ_REG(&adapter->hw, CTRL);
  4278. + DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
  4279. + "Flow Control: %s\n",
  4280. + adapter->link_speed,
  4281. + adapter->link_duplex == FULL_DUPLEX ?
  4282. + "Full Duplex" : "Half Duplex",
  4283. + ((ctrl & E1000_CTRL_TFCE) && (ctrl &
  4284. + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
  4285. + E1000_CTRL_RFCE) ? "RX" : ((ctrl &
  4286. + E1000_CTRL_TFCE) ? "TX" : "None" )));
  4287. +
  4288. + /* tweak tx_queue_len according to speed/duplex
  4289. + * and adjust the timeout factor */
  4290. + netdev->tx_queue_len = adapter->tx_queue_len;
  4291. + adapter->tx_timeout_factor = 1;
  4292. + switch (adapter->link_speed) {
  4293. + case SPEED_10:
  4294. + txb2b = false;
  4295. + netdev->tx_queue_len = 10;
  4296. + adapter->tx_timeout_factor = 8;
  4297. + break;
  4298. + case SPEED_100:
  4299. + txb2b = false;
  4300. + netdev->tx_queue_len = 100;
  4301. + break;
  4302. + }
  4303. + if ((hw->mac_type == iegbe_82571 ||
  4304. + hw->mac_type == iegbe_82572) &&
  4305. + !txb2b) {
  4306. + u32 tarc0;
  4307. + tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
  4308. + tarc0 &= ~(1 << 21);
  4309. + E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
  4310. + }
  4311. + /* disable TSO for pcie and 10/100 speeds, to avoid
  4312. + * some hardware issues */
  4313. + if (!adapter->tso_force &&
  4314. + hw->bus_type == iegbe_bus_type_pci_express){
  4315. + switch (adapter->link_speed) {
  4316. + case SPEED_10:
  4317. + case SPEED_100:
  4318. + DPRINTK(PROBE,INFO,
  4319. + "10/100 speed: disabling TSO\n");
  4320. + netdev->features &= ~NETIF_F_TSO;
  4321. + netdev->features &= ~NETIF_F_TSO6;
  4322. + break;
  4323. + case SPEED_1000:
  4324. + netdev->features |= NETIF_F_TSO;
  4325. + netdev->features |= NETIF_F_TSO6;
  4326. + break;
  4327. + default:
  4328. + break;
  4329. + }
  4330. + }
  4331. + tctl = E1000_READ_REG(&adapter->hw, TCTL);
  4332. + tctl |= E1000_TCTL_EN;
  4333. + E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
  4334. + netif_carrier_on(netdev);
  4335. + netif_wake_queue(netdev);
  4336. + mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
  4337. adapter->smartspeed = 0;
  4338. + } else {
  4339. + if (hw->rx_needs_kicking) {
  4340. + u32 rctl = E1000_READ_REG(&adapter->hw, RCTL);
  4341. + E1000_WRITE_REG(&adapter->hw, RCTL, rctl | E1000_RCTL_EN);
  4342. + }
  4343. }
  4344. - } else {
  4345. - if (netif_carrier_ok(netdev)) {
  4346. + } else {
  4347. + if (netif_carrier_ok(netdev)) {
  4348. adapter->link_speed = 0;
  4349. adapter->link_duplex = 0;
  4350. - DPRINTK(LINK, INFO, "NIC Link is Down\n");
  4351. - netif_carrier_off(netdev);
  4352. - netif_stop_queue(netdev);
  4353. - mod_timer(&adapter->phy_info_timer, jiffies + 0x2 * HZ);
  4354. - }
  4355. + DPRINTK(LINK, INFO, "NIC Link is Down\n");
  4356. + netif_carrier_off(netdev);
  4357. + netif_stop_queue(netdev);
  4358. + mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
  4359. + }
  4360. - iegbe_smartspeed(adapter);
  4361. - }
  4362. + iegbe_smartspeed(adapter);
  4363. + }
  4364. +
  4365. + iegbe_update_stats(adapter);
  4366. +
  4367. + hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
  4368. + adapter->tpt_old = adapter->stats.tpt;
  4369. + hw->collision_delta = adapter->stats.colc - adapter->colc_old;
  4370. + adapter->colc_old = adapter->stats.colc;
  4371. +
  4372. + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
  4373. + adapter->gorcl_old = adapter->stats.gorcl;
  4374. + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
  4375. + adapter->gotcl_old = adapter->stats.gotcl;
  4376. +
  4377. + iegbe_update_adaptive(hw);
  4378. +
  4379. + if (!netif_carrier_ok(netdev)) {
  4380. + if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
  4381. + /* We've lost link, so the controller stops DMA,
  4382. + * but we've got queued Tx work that's never going
  4383. + * to get done, so reset controller to flush Tx.
  4384. + * (Do the reset outside of interrupt context). */
  4385. + adapter->tx_timeout_count++;
  4386. + schedule_work(&adapter->reset_task);
  4387. + }
  4388. + }
  4389. +
  4390. + /* Cause software interrupt to ensure rx ring is cleaned */
  4391. + E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
  4392. +
  4393. + /* Force detection of hung controller every watchdog period */
  4394. + adapter->detect_tx_hung = TRUE;
  4395. +
  4396. + /* With 82571 controllers, LAA may be overwritten due to controller
  4397. + * reset from the other port. Set the appropriate LAA in RAR[0] */
  4398. + if (adapter->hw.mac_type == iegbe_82571 && adapter->hw.laa_is_present) {
  4399. + iegbe_rar_set(&adapter->hw, adapter->hw.mac_addr, 0x0);
  4400. + }
  4401. + /* Reset the timer */
  4402. + mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
  4403. +}
  4404. +
  4405. +enum latency_range {
  4406. + lowest_latency = 0,
  4407. + low_latency = 1,
  4408. + bulk_latency = 2,
  4409. + latency_invalid = 255
  4410. +};
  4411. - iegbe_update_stats(adapter);
  4412. +/**
  4413. + * iegbe_update_itr - update the dynamic ITR value based on statistics
  4414. + * Stores a new ITR value based on packets and byte
  4415. + * counts during the last interrupt. The advantage of per interrupt
  4416. + * computation is faster updates and more accurate ITR for the current
  4417. + * traffic pattern. Constants in this function were computed
  4418. + * based on theoretical maximum wire speed and thresholds were set based
  4419. + * on testing data as well as attempting to minimize response time
  4420. + * while increasing bulk throughput.
  4421. + * this functionality is controlled by the InterruptThrottleRate module
  4422. + * parameter (see iegbe_param.c)
  4423. + * @adapter: pointer to adapter
  4424. + * @itr_setting: current adapter->itr
  4425. + * @packets: the number of packets during this measurement interval
  4426. + * @bytes: the number of bytes during this measurement interval
  4427. + **/
  4428. +static unsigned int iegbe_update_itr(struct iegbe_adapter *adapter,
  4429. + u16 itr_setting, int packets, int bytes)
  4430. +{
  4431. + unsigned int retval = itr_setting;
  4432. + struct iegbe_hw *hw = &adapter->hw;
  4433. - adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
  4434. - adapter->tpt_old = adapter->stats.tpt;
  4435. - adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
  4436. - adapter->colc_old = adapter->stats.colc;
  4437. -
  4438. - adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
  4439. - adapter->gorcl_old = adapter->stats.gorcl;
  4440. - adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
  4441. - adapter->gotcl_old = adapter->stats.gotcl;
  4442. -
  4443. - iegbe_update_adaptive(&adapter->hw);
  4444. -
  4445. - if (adapter->num_queues == 0x1 && !netif_carrier_ok(netdev)) {
  4446. - if (E1000_DESC_UNUSED(txdr) + 0x1 < txdr->count) {
  4447. - /* We've lost link, so the controller stops DMA,
  4448. - * but we've got queued Tx work that's never going
  4449. - * to get done, so reset controller to flush Tx.
  4450. - * (Do the reset outside of interrupt context). */
  4451. - schedule_work(&adapter->tx_timeout_task);
  4452. + if (unlikely(hw->mac_type < iegbe_82540))
  4453. + goto update_itr_done;
  4454. +
  4455. + if (packets == 0)
  4456. + goto update_itr_done;
  4457. +
  4458. + switch (itr_setting) {
  4459. + case lowest_latency:
  4460. + /* jumbo frames get bulk treatment*/
  4461. + if (bytes/packets > 8000)
  4462. + retval = bulk_latency;
  4463. + else if ((packets < 5) && (bytes > 512))
  4464. + retval = low_latency;
  4465. + break;
  4466. + case low_latency: /* 50 usec aka 20000 ints/s */
  4467. + if (bytes > 10000) {
  4468. + /* jumbo frames need bulk latency setting */
  4469. + if (bytes/packets > 8000)
  4470. + retval = bulk_latency;
  4471. + else if ((packets < 10) || ((bytes/packets) > 1200))
  4472. + retval = bulk_latency;
  4473. + else if ((packets > 35))
  4474. + retval = lowest_latency;
  4475. + } else if (bytes/packets > 2000)
  4476. + retval = bulk_latency;
  4477. + else if (packets <= 2 && bytes < 512)
  4478. + retval = lowest_latency;
  4479. + break;
  4480. + case bulk_latency: /* 250 usec aka 4000 ints/s */
  4481. + if (bytes > 25000) {
  4482. + if (packets > 35)
  4483. + retval = low_latency;
  4484. + } else if (bytes < 6000) {
  4485. + retval = low_latency;
  4486. }
  4487. + break;
  4488. }
  4489. - /* Dynamic mode for Interrupt Throttle Rate (ITR) */
  4490. - if (adapter->hw.mac_type >= iegbe_82540 && adapter->itr == 0x1) {
  4491. - /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
  4492. - * asymmetrical Tx or Rx gets ITR=8000; everyone
  4493. - * else is between 2000-8000. */
  4494. - uint32_t goc = (adapter->gotcl + adapter->gorcl) / 0x2710;
  4495. - uint32_t dif = (adapter->gotcl > adapter->gorcl ?
  4496. - adapter->gotcl - adapter->gorcl :
  4497. - adapter->gorcl - adapter->gotcl) / 0x2710;
  4498. - uint32_t itr = goc > 0 ? (dif * 0x1770 / goc + 0x7d0) : 0x1f40;
  4499. - E1000_WRITE_REG(&adapter->hw, ITR, 0x3b9aca00 / (itr * 0x100));
  4500. - }
  4501. +update_itr_done:
  4502. + return retval;
  4503. +}
  4504. - /* Cause software interrupt to ensure rx ring is cleaned */
  4505. - E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
  4506. +static void iegbe_set_itr(struct iegbe_adapter *adapter)
  4507. +{
  4508. + struct iegbe_hw *hw = &adapter->hw;
  4509. + u16 current_itr;
  4510. + u32 new_itr = adapter->itr;
  4511. - /* Force detection of hung controller every watchdog period */
  4512. - adapter->detect_tx_hung = TRUE;
  4513. + if (unlikely(hw->mac_type < iegbe_82540))
  4514. + return;
  4515. - /* With 82571 controllers, LAA may be overwritten due to controller
  4516. - * reset from the other port. Set the appropriate LAA in RAR[0] */
  4517. - if (adapter->hw.mac_type == iegbe_82571 && adapter->hw.laa_is_present) {
  4518. - iegbe_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
  4519. - }
  4520. - /* Reset the timer */
  4521. - mod_timer(&adapter->watchdog_timer, jiffies + 0x2 * HZ);
  4522. + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
  4523. + if (unlikely(adapter->link_speed != SPEED_1000)) {
  4524. + current_itr = 0;
  4525. + new_itr = 4000;
  4526. + goto set_itr_now;
  4527. + }
  4528. +
  4529. + adapter->tx_itr = iegbe_update_itr(adapter,
  4530. + adapter->tx_itr,
  4531. + adapter->total_tx_packets,
  4532. + adapter->total_tx_bytes);
  4533. + /* conservative mode (itr 3) eliminates the lowest_latency setting */
  4534. + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
  4535. + adapter->tx_itr = low_latency;
  4536. +
  4537. + adapter->rx_itr = iegbe_update_itr(adapter,
  4538. + adapter->rx_itr,
  4539. + adapter->total_rx_packets,
  4540. + adapter->total_rx_bytes);
  4541. + /* conservative mode (itr 3) eliminates the lowest_latency setting */
  4542. + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
  4543. + adapter->rx_itr = low_latency;
  4544. +
  4545. + current_itr = max(adapter->rx_itr, adapter->tx_itr);
  4546. +
  4547. + switch (current_itr) {
  4548. + /* counts and packets in update_itr are dependent on these numbers */
  4549. + case lowest_latency:
  4550. + new_itr = 70000;
  4551. + break;
  4552. + case low_latency:
  4553. + new_itr = 20000; /* aka hwitr = ~200 */
  4554. + break;
  4555. + case bulk_latency:
  4556. + new_itr = 4000;
  4557. + break;
  4558. + default:
  4559. + break;
  4560. + }
  4561. +
  4562. +set_itr_now:
  4563. + if (new_itr != adapter->itr) {
  4564. + /* this attempts to bias the interrupt rate towards Bulk
  4565. + * by adding intermediate steps when interrupt rate is
  4566. + * increasing */
  4567. + new_itr = new_itr > adapter->itr ?
  4568. + min(adapter->itr + (new_itr >> 2), new_itr) :
  4569. + new_itr;
  4570. + adapter->itr = new_itr;
  4571. + E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (new_itr * 256));
  4572. + }
  4573. +
  4574. + return;
  4575. }
  4576. #define E1000_TX_FLAGS_CSUM 0x00000001
  4577. @@ -2673,55 +2641,48 @@ iegbe_watchdog(unsigned long data)
  4578. #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
  4579. #define E1000_TX_FLAGS_VLAN_SHIFT 16
  4580. -static inline int
  4581. -iegbe_tso(struct iegbe_adapter *adapter, struct iegbe_tx_ring *tx_ring,
  4582. - struct sk_buff *skb)
  4583. +static int iegbe_tso(struct iegbe_adapter *adapter,
  4584. + struct iegbe_tx_ring *tx_ring, struct sk_buff *skb)
  4585. {
  4586. -#ifdef NETIF_F_TSO
  4587. struct iegbe_context_desc *context_desc;
  4588. + struct iegbe_buffer *buffer_info;
  4589. unsigned int i;
  4590. - uint32_t cmd_length = 0;
  4591. - uint16_t ipcse = 0, tucse, mss;
  4592. - uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
  4593. + u32 cmd_length = 0;
  4594. + u16 ipcse = 0, tucse, mss;
  4595. + u8 ipcss, ipcso, tucss, tucso, hdr_len;
  4596. int err;
  4597. if (skb_is_gso(skb)) {
  4598. if (skb_header_cloned(skb)) {
  4599. err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  4600. - if (err) {
  4601. + if (err)
  4602. return err;
  4603. }
  4604. - }
  4605. - hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 0x2));
  4606. + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  4607. mss = skb_shinfo(skb)->gso_size;
  4608. if (skb->protocol == htons(ETH_P_IP)) {
  4609. - skb->nh.iph->tot_len = 0;
  4610. - skb->nh.iph->check = 0;
  4611. - skb->h.th->check =
  4612. - ~csum_tcpudp_magic(skb->nh.iph->saddr,
  4613. - skb->nh.iph->daddr,
  4614. - 0,
  4615. - IPPROTO_TCP,
  4616. - 0);
  4617. + struct iphdr *iph = ip_hdr(skb);
  4618. + iph->tot_len = 0;
  4619. + iph->check = 0;
  4620. + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  4621. + iph->daddr, 0,
  4622. + IPPROTO_TCP,
  4623. + 0);
  4624. cmd_length = E1000_TXD_CMD_IP;
  4625. - ipcse = skb->h.raw - skb->data - 0x1;
  4626. -#ifdef NETIF_F_TSO_IPV6
  4627. - } else if (skb->protocol == ntohs(ETH_P_IPV6)) {
  4628. - skb->nh.ipv6h->payload_len = 0;
  4629. - skb->h.th->check =
  4630. - ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
  4631. - &skb->nh.ipv6h->daddr,
  4632. - 0,
  4633. - IPPROTO_TCP,
  4634. - 0);
  4635. + ipcse = skb_transport_offset(skb) - 1;
  4636. + } else if (skb->protocol == htons(ETH_P_IPV6)) {
  4637. + ipv6_hdr(skb)->payload_len = 0;
  4638. + tcp_hdr(skb)->check =
  4639. + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  4640. + &ipv6_hdr(skb)->daddr,
  4641. + 0, IPPROTO_TCP, 0);
  4642. ipcse = 0;
  4643. -#endif
  4644. }
  4645. - ipcss = skb->nh.raw - skb->data;
  4646. - ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
  4647. - tucss = skb->h.raw - skb->data;
  4648. - tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
  4649. + ipcss = skb_network_offset(skb);
  4650. + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
  4651. + tucss = skb_transport_offset(skb);
  4652. + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
  4653. tucse = 0;
  4654. cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
  4655. @@ -2729,6 +2690,7 @@ iegbe_tso(struct iegbe_adapter *adapter,
  4656. i = tx_ring->next_to_use;
  4657. context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
  4658. + buffer_info = &tx_ring->buffer_info[i];
  4659. context_desc->lower_setup.ip_fields.ipcss = ipcss;
  4660. context_desc->lower_setup.ip_fields.ipcso = ipcso;
  4661. @@ -2740,205 +2702,218 @@ iegbe_tso(struct iegbe_adapter *adapter,
  4662. context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
  4663. context_desc->cmd_and_length = cpu_to_le32(cmd_length);
  4664. - if (++i == tx_ring->count) { i = 0; }
  4665. + buffer_info->time_stamp = jiffies;
  4666. + buffer_info->next_to_watch = i;
  4667. +
  4668. + if (++i == tx_ring->count) i = 0;
  4669. tx_ring->next_to_use = i;
  4670. - return TRUE;
  4671. + return true;
  4672. }
  4673. -#endif
  4674. -
  4675. - return FALSE;
  4676. + return false;
  4677. }
  4678. -static inline boolean_t
  4679. -iegbe_tx_csum(struct iegbe_adapter *adapter, struct iegbe_tx_ring *tx_ring,
  4680. - struct sk_buff *skb)
  4681. +static bool iegbe_tx_csum(struct iegbe_adapter *adapter,
  4682. + struct iegbe_tx_ring *tx_ring, struct sk_buff *skb)
  4683. {
  4684. struct iegbe_context_desc *context_desc;
  4685. + struct iegbe_buffer *buffer_info;
  4686. unsigned int i;
  4687. - uint8_t css;
  4688. + u8 css;
  4689. - if (likely(skb->ip_summed == CHECKSUM_HW)) {
  4690. - css = skb->h.raw - skb->data;
  4691. + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
  4692. + css = skb_transport_offset(skb);
  4693. - i = tx_ring->next_to_use;
  4694. - context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
  4695. + i = tx_ring->next_to_use;
  4696. + buffer_info = &tx_ring->buffer_info[i];
  4697. + context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
  4698. + context_desc->lower_setup.ip_config = 0;
  4699. context_desc->upper_setup.tcp_fields.tucss = css;
  4700. - context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
  4701. + context_desc->upper_setup.tcp_fields.tucso =
  4702. + css + skb->csum_offset;
  4703. context_desc->upper_setup.tcp_fields.tucse = 0;
  4704. context_desc->tcp_seg_setup.data = 0;
  4705. context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
  4706. - if (unlikely(++i == tx_ring->count)) { i = 0; }
  4707. + buffer_info->time_stamp = jiffies;
  4708. + buffer_info->next_to_watch = i;
  4709. +
  4710. + if (unlikely(++i == tx_ring->count)) i = 0;
  4711. tx_ring->next_to_use = i;
  4712. - return TRUE;
  4713. + return true;
  4714. }
  4715. - return FALSE;
  4716. + return false;
  4717. }
  4718. -#define E1000_MAX_TXD_PWR 12
  4719. -#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
  4720. +#define E1000_MAX_TXD_PWR 12
  4721. +#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
  4722. -static inline int
  4723. -iegbe_tx_map(struct iegbe_adapter *adapter, struct iegbe_tx_ring *tx_ring,
  4724. - struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
  4725. - unsigned int nr_frags, unsigned int mss)
  4726. +static int iegbe_tx_map(struct iegbe_adapter *adapter,
  4727. + struct iegbe_tx_ring *tx_ring,
  4728. + struct sk_buff *skb, unsigned int first,
  4729. + unsigned int max_per_txd, unsigned int nr_frags,
  4730. + unsigned int mss)
  4731. {
  4732. - struct iegbe_buffer *buffer_info;
  4733. - unsigned int len = skb->len;
  4734. + struct iegbe_hw *hw = &adapter->hw;
  4735. + struct iegbe_buffer *buffer_info;
  4736. + unsigned int len = skb->len;
  4737. unsigned int offset = 0, size, count = 0, i;
  4738. -#ifdef MAX_SKB_FRAGS
  4739. - unsigned int f;
  4740. - len -= skb->data_len;
  4741. -#endif
  4742. + unsigned int f;
  4743. + len -= skb->data_len;
  4744. - i = tx_ring->next_to_use;
  4745. + i = tx_ring->next_to_use;
  4746. +
  4747. + while(len) {
  4748. + buffer_info = &tx_ring->buffer_info[i];
  4749. + size = min(len, max_per_txd);
  4750. + /* Workaround for Controller erratum --
  4751. + * descriptor for non-tso packet in a linear SKB that follows a
  4752. + * tso gets written back prematurely before the data is fully
  4753. + * DMA'd to the controller */
  4754. + if (!skb->data_len && tx_ring->last_tx_tso &&
  4755. + !skb_is_gso(skb)) {
  4756. + tx_ring->last_tx_tso = 0;
  4757. + size -= 4;
  4758. + }
  4759. - while(len) {
  4760. - buffer_info = &tx_ring->buffer_info[i];
  4761. - size = min(len, max_per_txd);
  4762. -#ifdef NETIF_F_TSO
  4763. /* Workaround for premature desc write-backs
  4764. * in TSO mode. Append 4-byte sentinel desc */
  4765. - if(unlikely(mss && !nr_frags && size == len && size > 0x8)) {
  4766. - size -= 0x4;
  4767. + if (unlikely(mss && !nr_frags && size == len && size > 8))
  4768. + size -= 4;
  4769. + /* work-around for errata 10 and it applies
  4770. + * to all controllers in PCI-X mode
  4771. + * The fix is to make sure that the first descriptor of a
  4772. + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
  4773. + */
  4774. + if (unlikely((hw->bus_type == iegbe_bus_type_pcix) &&
  4775. + (size > 2015) && count == 0))
  4776. + size = 2015;
  4777. +
  4778. + /* Workaround for potential 82544 hang in PCI-X. Avoid
  4779. + * terminating buffers within evenly-aligned dwords. */
  4780. + if(unlikely(adapter->pcix_82544 &&
  4781. + !((unsigned long)(skb->data + offset + size - 1) & 4) &&
  4782. + size > 4))
  4783. + size -= 4;
  4784. +
  4785. + buffer_info->length = size;
  4786. + buffer_info->dma =
  4787. + pci_map_single(adapter->pdev,
  4788. + skb->data + offset,
  4789. + size,
  4790. + PCI_DMA_TODEVICE);
  4791. + buffer_info->time_stamp = jiffies;
  4792. + buffer_info->next_to_watch = i;
  4793. +
  4794. + len -= size;
  4795. + offset += size;
  4796. + count++;
  4797. + if (unlikely(++i == tx_ring->count)) i = 0;
  4798. + }
  4799. +
  4800. + for (f = 0; f < nr_frags; f++) {
  4801. + struct skb_frag_struct *frag;
  4802. +
  4803. + frag = &skb_shinfo(skb)->frags[f];
  4804. + len = frag->size;
  4805. + offset = frag->page_offset;
  4806. +
  4807. + while(len) {
  4808. + buffer_info = &tx_ring->buffer_info[i];
  4809. + size = min(len, max_per_txd);
  4810. + /* Workaround for premature desc write-backs
  4811. + * in TSO mode. Append 4-byte sentinel desc */
  4812. + if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
  4813. + size -= 4;
  4814. + /* Workaround for potential 82544 hang in PCI-X.
  4815. + * Avoid terminating buffers within evenly-aligned
  4816. + * dwords. */
  4817. + if(unlikely(adapter->pcix_82544 &&
  4818. + !((unsigned long)(frag->page+offset+size-1) & 4) &&
  4819. + size > 4))
  4820. + size -= 4;
  4821. +
  4822. + buffer_info->length = size;
  4823. + buffer_info->dma =
  4824. + pci_map_page(adapter->pdev,
  4825. + frag->page,
  4826. + offset,
  4827. + size,
  4828. + PCI_DMA_TODEVICE);
  4829. + buffer_info->time_stamp = jiffies;
  4830. + buffer_info->next_to_watch = i;
  4831. +
  4832. + len -= size;
  4833. + offset += size;
  4834. + count++;
  4835. + if (unlikely(++i == tx_ring->count)) i = 0;
  4836. }
  4837. -#endif
  4838. - /* work-around for errata 10 and it applies
  4839. - * to all controllers in PCI-X mode
  4840. - * The fix is to make sure that the first descriptor of a
  4841. - * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
  4842. - */
  4843. - if(unlikely((adapter->hw.bus_type == iegbe_bus_type_pcix) &&
  4844. - (size > 0x7df) && count == 0)) {
  4845. - size = 0x7df;
  4846. - }
  4847. - /* Workaround for potential 82544 hang in PCI-X. Avoid
  4848. - * terminating buffers within evenly-aligned dwords. */
  4849. - if(unlikely(adapter->pcix_82544 &&
  4850. - !((unsigned long)(skb->data + offset + size - 0x8) & 0x4) &&
  4851. - size > 0x4)) {
  4852. - size -= 0x4;
  4853. - }
  4854. - buffer_info->length = size;
  4855. - buffer_info->dma =
  4856. - pci_map_single(adapter->pdev,
  4857. - skb->data + offset,
  4858. - size,
  4859. - PCI_DMA_TODEVICE);
  4860. - buffer_info->time_stamp = jiffies;
  4861. -
  4862. - len -= size;
  4863. - offset += size;
  4864. - count++;
  4865. - if(unlikely(++i == tx_ring->count)) { i = 0; }
  4866. - }
  4867. -
  4868. -#ifdef MAX_SKB_FRAGS
  4869. - for(f = 0; f < nr_frags; f++) {
  4870. - struct skb_frag_struct *frag;
  4871. -
  4872. - frag = &skb_shinfo(skb)->frags[f];
  4873. - len = frag->size;
  4874. - offset = frag->page_offset;
  4875. -
  4876. - while(len) {
  4877. - buffer_info = &tx_ring->buffer_info[i];
  4878. - size = min(len, max_per_txd);
  4879. -#ifdef NETIF_F_TSO
  4880. - /* Workaround for premature desc write-backs
  4881. - * in TSO mode. Append 4-byte sentinel desc */
  4882. - if(unlikely(mss && f == (nr_frags-0x1) &&
  4883. - size == len && size > 0x8)) {
  4884. - size -= 0x4;
  4885. - }
  4886. -#endif
  4887. - /* Workaround for potential 82544 hang in PCI-X.
  4888. - * Avoid terminating buffers within evenly-aligned
  4889. - * dwords. */
  4890. - if(unlikely(adapter->pcix_82544 &&
  4891. - !((unsigned long)(frag->page+offset+size-0x1) & 0x4) &&
  4892. - size > 0x4)) {
  4893. - size -= 0x4;
  4894. - }
  4895. - buffer_info->length = size;
  4896. - buffer_info->dma =
  4897. - pci_map_page(adapter->pdev,
  4898. - frag->page,
  4899. - offset,
  4900. - size,
  4901. - PCI_DMA_TODEVICE);
  4902. - buffer_info->time_stamp = jiffies;
  4903. -
  4904. - len -= size;
  4905. - offset += size;
  4906. - count++;
  4907. - if(unlikely(++i == tx_ring->count)) { i = 0; }
  4908. - }
  4909. - }
  4910. -#endif
  4911. + }
  4912. - i = (i == 0) ? tx_ring->count - 0x1 : i - 0x1;
  4913. - tx_ring->buffer_info[i].skb = skb;
  4914. - tx_ring->buffer_info[first].next_to_watch = i;
  4915. + i = (i == 0) ? tx_ring->count - 1 : i - 1;
  4916. + tx_ring->buffer_info[i].skb = skb;
  4917. + tx_ring->buffer_info[first].next_to_watch = i;
  4918. - return count;
  4919. + return count;
  4920. }
  4921. -static inline void
  4922. -iegbe_tx_queue(struct iegbe_adapter *adapter, struct iegbe_tx_ring *tx_ring,
  4923. - int tx_flags, int count)
  4924. +static void iegbe_tx_queue(struct iegbe_adapter *adapter,
  4925. + struct iegbe_tx_ring *tx_ring, int tx_flags,
  4926. + int count)
  4927. {
  4928. + struct iegbe_hw *hw = &adapter->hw;
  4929. struct iegbe_tx_desc *tx_desc = NULL;
  4930. struct iegbe_buffer *buffer_info;
  4931. - uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
  4932. + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
  4933. unsigned int i;
  4934. - if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
  4935. + if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
  4936. txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
  4937. E1000_TXD_CMD_TSE;
  4938. - txd_upper |= E1000_TXD_POPTS_TXSM << 0x8;
  4939. + txd_upper |= E1000_TXD_POPTS_TXSM << 8;
  4940. - if(likely(tx_flags & E1000_TX_FLAGS_IPV4)) {
  4941. - txd_upper |= E1000_TXD_POPTS_IXSM << 0x8;
  4942. - }
  4943. + if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
  4944. + txd_upper |= E1000_TXD_POPTS_IXSM << 8;
  4945. }
  4946. - if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
  4947. + if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
  4948. txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
  4949. - txd_upper |= E1000_TXD_POPTS_TXSM << 0x8;
  4950. - }
  4951. -
  4952. - if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
  4953. - txd_lower |= E1000_TXD_CMD_VLE;
  4954. - txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
  4955. + txd_upper |= E1000_TXD_POPTS_TXSM << 8;
  4956. }
  4957. - i = tx_ring->next_to_use;
  4958. + if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
  4959. + txd_lower |= E1000_TXD_CMD_VLE;
  4960. + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
  4961. + }
  4962. - while(count--) {
  4963. - buffer_info = &tx_ring->buffer_info[i];
  4964. - tx_desc = E1000_TX_DESC(*tx_ring, i);
  4965. - tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  4966. - tx_desc->lower.data =
  4967. - cpu_to_le32(txd_lower | buffer_info->length);
  4968. - tx_desc->upper.data = cpu_to_le32(txd_upper);
  4969. - if(unlikely(++i == tx_ring->count)) { i = 0; }
  4970. - }
  4971. - if(tx_desc != NULL) {
  4972. - tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
  4973. - }
  4974. - /* Force memory writes to complete before letting h/w
  4975. - * know there are new descriptors to fetch. (Only
  4976. - * applicable for weak-ordered memory model archs,
  4977. - * such as IA-64). */
  4978. - wmb();
  4979. + i = tx_ring->next_to_use;
  4980. - tx_ring->next_to_use = i;
  4981. - writel(i, adapter->hw.hw_addr + tx_ring->tdt);
  4982. + while(count--) {
  4983. + buffer_info = &tx_ring->buffer_info[i];
  4984. + tx_desc = E1000_TX_DESC(*tx_ring, i);
  4985. + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  4986. + tx_desc->lower.data =
  4987. + cpu_to_le32(txd_lower | buffer_info->length);
  4988. + tx_desc->upper.data = cpu_to_le32(txd_upper);
  4989. + if (unlikely(++i == tx_ring->count)) i = 0;
  4990. + }
  4991. +
  4992. + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
  4993. +
  4994. + /* Force memory writes to complete before letting h/w
  4995. + * know there are new descriptors to fetch. (Only
  4996. + * applicable for weak-ordered memory model archs,
  4997. + * such as IA-64). */
  4998. + wmb();
  4999. +
  5000. + tx_ring->next_to_use = i;
  5001. + writel(i, hw->hw_addr + tx_ring->tdt);
  5002. + /* we need this if more than one processor can write to our tail
  5003. + * at a time, it syncronizes IO on IA64/Altix systems */
  5004. + mmiowb();
  5005. }
  5006. /**
  5007. @@ -2950,113 +2925,132 @@ iegbe_tx_queue(struct iegbe_adapter *ada
  5008. * to the beginning of the Tx FIFO.
  5009. **/
  5010. -static inline int
  5011. -iegbe_82547_fifo_workaround(struct iegbe_adapter *adapter, struct sk_buff *skb)
  5012. +#define E1000_FIFO_HDR 0x10
  5013. +#define E1000_82547_PAD_LEN 0x3E0
  5014. +static int iegbe_82547_fifo_workaround(struct iegbe_adapter *adapter,
  5015. + struct sk_buff *skb)
  5016. {
  5017. - uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
  5018. - uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
  5019. + u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
  5020. + u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
  5021. - E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
  5022. + skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
  5023. - if(adapter->link_duplex != HALF_DUPLEX) {
  5024. - goto no_fifo_stall_required;
  5025. - }
  5026. - if(atomic_read(&adapter->tx_fifo_stall)) {
  5027. - return 1;
  5028. + if (adapter->link_duplex != HALF_DUPLEX)
  5029. + goto no_fifo_stall_required;
  5030. +
  5031. + if (atomic_read(&adapter->tx_fifo_stall))
  5032. + return 1;
  5033. +
  5034. + if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
  5035. + atomic_set(&adapter->tx_fifo_stall, 1);
  5036. + return 1;
  5037. }
  5038. - if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
  5039. - atomic_set(&adapter->tx_fifo_stall, 0x1);
  5040. - return 1;
  5041. - }
  5042. no_fifo_stall_required:
  5043. - adapter->tx_fifo_head += skb_fifo_len;
  5044. - if(adapter->tx_fifo_head >= adapter->tx_fifo_size) {
  5045. - adapter->tx_fifo_head -= adapter->tx_fifo_size;
  5046. - }
  5047. + adapter->tx_fifo_head += skb_fifo_len;
  5048. + if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
  5049. + adapter->tx_fifo_head -= adapter->tx_fifo_size;
  5050. return 0;
  5051. }
  5052. -static inline int
  5053. -iegbe_transfer_dhcp_info(struct iegbe_adapter *adapter, struct sk_buff *skb)
  5054. +#define MINIMUM_DHCP_PACKET_SIZE 282
  5055. +static int iegbe_transfer_dhcp_info(struct iegbe_adapter *adapter,
  5056. + struct sk_buff *skb)
  5057. {
  5058. struct iegbe_hw *hw = &adapter->hw;
  5059. - uint16_t length, offset;
  5060. -#ifdef NETIF_F_HW_VLAN_TX
  5061. - if(vlan_tx_tag_present(skb)) {
  5062. - if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
  5063. - ( adapter->hw.mng_cookie.status &
  5064. - E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) {
  5065. + u16 length, offset;
  5066. + if (vlan_tx_tag_present(skb)) {
  5067. + if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
  5068. + ( hw->mng_cookie.status &
  5069. + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
  5070. return 0;
  5071. }
  5072. - }
  5073. -#endif
  5074. - if(htons(ETH_P_IP) == skb->protocol) {
  5075. - const struct iphdr *ip = skb->nh.iph;
  5076. - if(IPPROTO_UDP == ip->protocol) {
  5077. - struct udphdr *udp = (struct udphdr *)(skb->h.uh);
  5078. - if(ntohs(udp->dest) == 0x43) { /* 0x43 = 67 */
  5079. - offset = (uint8_t *)udp + 0x8 - skb->data;
  5080. - length = skb->len - offset;
  5081. -
  5082. - return iegbe_mng_write_dhcp_info(hw,
  5083. - (uint8_t *)udp + 0x8, length);
  5084. - }
  5085. - }
  5086. - } else if((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
  5087. - struct ethhdr *eth = (struct ethhdr *) skb->data;
  5088. - if((htons(ETH_P_IP) == eth->h_proto)) {
  5089. + if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
  5090. + struct ethhdr *eth = (struct ethhdr *)skb->data;
  5091. + if ((htons(ETH_P_IP) == eth->h_proto)) {
  5092. const struct iphdr *ip =
  5093. - (struct iphdr *)((uint8_t *)skb->data+0xe);
  5094. - if(IPPROTO_UDP == ip->protocol) {
  5095. + (struct iphdr *)((u8 *)skb->data+14);
  5096. + if (IPPROTO_UDP == ip->protocol) {
  5097. struct udphdr *udp =
  5098. - (struct udphdr *)((uint8_t *)ip +
  5099. - (ip->ihl << 0x2));
  5100. - if(ntohs(udp->dest) == 0x43) {
  5101. - offset = (uint8_t *)udp + 0x8 - skb->data;
  5102. + (struct udphdr *)((u8 *)ip +
  5103. + (ip->ihl << 2));
  5104. + if (ntohs(udp->dest) == 67) {
  5105. + offset = (u8 *)udp + 8 - skb->data;
  5106. length = skb->len - offset;
  5107. return iegbe_mng_write_dhcp_info(hw,
  5108. - (uint8_t *)udp + 0x8,
  5109. + (u8 *)udp + 8,
  5110. length);
  5111. - }
  5112. + }
  5113. }
  5114. }
  5115. }
  5116. return 0;
  5117. }
  5118. -static int
  5119. -iegbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  5120. +static int __iegbe_maybe_stop_tx(struct net_device *netdev, int size)
  5121. +{
  5122. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  5123. + struct iegbe_tx_ring *tx_ring = adapter->tx_ring;
  5124. +
  5125. + netif_stop_queue(netdev);
  5126. + /* Herbert's original patch had:
  5127. + * smp_mb__after_netif_stop_queue();
  5128. + * but since that doesn't exist yet, just open code it. */
  5129. + smp_mb();
  5130. +
  5131. + /* We need to check again in a case another CPU has just
  5132. + * made room available. */
  5133. + if (likely(E1000_DESC_UNUSED(tx_ring) < size))
  5134. + return -EBUSY;
  5135. +
  5136. + /* A reprieve! */
  5137. + netif_start_queue(netdev);
  5138. + ++adapter->restart_queue;
  5139. + return 0;
  5140. +}
  5141. +
  5142. +static int iegbe_maybe_stop_tx(struct net_device *netdev,
  5143. + struct iegbe_tx_ring *tx_ring, int size)
  5144. +{
  5145. + if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
  5146. + return 0;
  5147. + return __iegbe_maybe_stop_tx(netdev, size);
  5148. +}
  5149. +
  5150. +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
  5151. +static int iegbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  5152. {
  5153. struct iegbe_adapter *adapter = netdev_priv(netdev);
  5154. + struct iegbe_hw *hw = &adapter->hw;
  5155. struct iegbe_tx_ring *tx_ring;
  5156. unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
  5157. unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
  5158. unsigned int tx_flags = 0;
  5159. - unsigned int len = skb->len;
  5160. + unsigned int len = skb->len - skb->data_len;
  5161. unsigned long flags = 0;
  5162. - unsigned int nr_frags = 0;
  5163. - unsigned int mss = 0;
  5164. + unsigned int nr_frags;
  5165. + unsigned int mss;
  5166. int count = 0;
  5167. - int tso;
  5168. -#ifdef MAX_SKB_FRAGS
  5169. + int tso;
  5170. unsigned int f;
  5171. - len -= skb->data_len;
  5172. -#endif
  5173. -#ifdef CONFIG_E1000_MQ
  5174. - tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
  5175. -#else
  5176. + /* This goes back to the question of how to logically map a tx queue
  5177. + * to a flow. Right now, performance is impacted slightly negatively
  5178. + * if using multiple tx queues. If the stack breaks away from a
  5179. + * single qdisc implementation, we can look at this again. */
  5180. tx_ring = adapter->tx_ring;
  5181. -#endif
  5182. if (unlikely(skb->len <= 0)) {
  5183. dev_kfree_skb_any(skb);
  5184. return NETDEV_TX_OK;
  5185. }
  5186. -#ifdef NETIF_F_TSO
  5187. + /* 82571 and newer doesn't need the workaround that limited descriptor
  5188. + * length to 4kB */
  5189. + if (hw->mac_type >= iegbe_82571)
  5190. + max_per_txd = 8192;
  5191. +
  5192. mss = skb_shinfo(skb)->gso_size;
  5193. /* The controller does a simple calculation to
  5194. * make sure there is enough room in the FIFO before
  5195. @@ -3064,164 +3058,150 @@ iegbe_xmit_frame(struct sk_buff *skb, st
  5196. * 4 = ceil(buffer len/mss). To make sure we don't
  5197. * overrun the FIFO, adjust the max buffer len if mss
  5198. * drops. */
  5199. - if(mss) {
  5200. - max_per_txd = min(mss << 0x2, max_per_txd);
  5201. - max_txd_pwr = fls(max_per_txd) - 0x1;
  5202. + if (mss) {
  5203. + u8 hdr_len;
  5204. + max_per_txd = min(mss << 2, max_per_txd);
  5205. + max_txd_pwr = fls(max_per_txd) - 1;
  5206. +
  5207. + /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
  5208. + * points to just header, pull a few bytes of payload from
  5209. + * frags into skb->data */
  5210. + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  5211. + if (skb->data_len && hdr_len == len) {
  5212. + switch (hw->mac_type) {
  5213. + case iegbe_82544:
  5214. + /* Make sure we have room to chop off 4 bytes,
  5215. + * and that the end alignment will work out to
  5216. + * this hardware's requirements
  5217. + * NOTE: this is a TSO only workaround
  5218. + * if end byte alignment not correct move us
  5219. + * into the next dword */
  5220. + break;
  5221. + /* fall through */
  5222. + case iegbe_82571:
  5223. + case iegbe_82572:
  5224. + case iegbe_82573:
  5225. + break;
  5226. + default:
  5227. + /* do nothing */
  5228. + break;
  5229. + }
  5230. + }
  5231. }
  5232. - if((mss) || (skb->ip_summed == CHECKSUM_HW)) {
  5233. + /* reserve a descriptor for the offload context */
  5234. + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
  5235. count++;
  5236. - }
  5237. count++;
  5238. -#else
  5239. - if(skb->ip_summed == CHECKSUM_HW) {
  5240. +
  5241. + /* Controller Erratum workaround */
  5242. + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
  5243. count++;
  5244. - {
  5245. -#endif
  5246. +
  5247. count += TXD_USE_COUNT(len, max_txd_pwr);
  5248. - if(adapter->pcix_82544) {
  5249. + if (adapter->pcix_82544)
  5250. count++;
  5251. - }
  5252. +
  5253. /* work-around for errata 10 and it applies to all controllers
  5254. * in PCI-X mode, so add one more descriptor to the count
  5255. */
  5256. - if(unlikely((adapter->hw.bus_type == iegbe_bus_type_pcix) &&
  5257. - (len > 0x7df))) {
  5258. + if (unlikely((hw->bus_type == iegbe_bus_type_pcix) &&
  5259. + (len > 2015)))
  5260. count++;
  5261. - }
  5262. -#ifdef MAX_SKB_FRAGS
  5263. +
  5264. nr_frags = skb_shinfo(skb)->nr_frags;
  5265. - for(f = 0; f < nr_frags; f++)
  5266. + for (f = 0; f < nr_frags; f++)
  5267. count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
  5268. max_txd_pwr);
  5269. - if(adapter->pcix_82544) {
  5270. + if (adapter->pcix_82544)
  5271. count += nr_frags;
  5272. - }
  5273. -#ifdef NETIF_F_TSO
  5274. - /* TSO Workaround for 82571/2 Controllers -- if skb->data
  5275. - * points to just header, pull a few bytes of payload from
  5276. - * frags into skb->data */
  5277. - if (skb_is_gso(skb)) {
  5278. - uint8_t hdr_len;
  5279. - hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 0x2));
  5280. - if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) &&
  5281. - (adapter->hw.mac_type == iegbe_82571 ||
  5282. - adapter->hw.mac_type == iegbe_82572)) {
  5283. - unsigned int pull_size;
  5284. - pull_size = min((unsigned int)0x4, skb->data_len);
  5285. - if (!__pskb_pull_tail(skb, pull_size)) {
  5286. - printk(KERN_ERR "__pskb_pull_tail failed.\n");
  5287. - dev_kfree_skb_any(skb);
  5288. - return -EFAULT;
  5289. - }
  5290. - }
  5291. - }
  5292. -#endif
  5293. -#endif
  5294. - if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == iegbe_82573) ) {
  5295. +
  5296. + if (hw->tx_pkt_filtering &&
  5297. + (hw->mac_type == iegbe_82573))
  5298. iegbe_transfer_dhcp_info(adapter, skb);
  5299. - }
  5300. -#ifdef NETIF_F_LLTX
  5301. - local_irq_save(flags);
  5302. - if (!spin_trylock(&tx_ring->tx_lock)) {
  5303. +
  5304. + if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
  5305. /* Collision - tell upper layer to requeue */
  5306. - local_irq_restore(flags);
  5307. return NETDEV_TX_LOCKED;
  5308. - }
  5309. -#else
  5310. - spin_lock_irqsave(&tx_ring->tx_lock, flags);
  5311. -#endif
  5312. /* need: count + 2 desc gap to keep tail from touching
  5313. * head, otherwise try next time */
  5314. - if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 0x2)) {
  5315. - netif_stop_queue(netdev);
  5316. + if (unlikely(iegbe_maybe_stop_tx(netdev, tx_ring, count + 2))) {
  5317. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  5318. return NETDEV_TX_BUSY;
  5319. }
  5320. - if(unlikely(adapter->hw.mac_type == iegbe_82547)) {
  5321. - if(unlikely(iegbe_82547_fifo_workaround(adapter, skb))) {
  5322. + if (unlikely(hw->mac_type == iegbe_82547)) {
  5323. + if (unlikely(iegbe_82547_fifo_workaround(adapter, skb))) {
  5324. netif_stop_queue(netdev);
  5325. - mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
  5326. + mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
  5327. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  5328. return NETDEV_TX_BUSY;
  5329. }
  5330. }
  5331. -#ifndef NETIF_F_LLTX
  5332. - spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  5333. -#endif
  5334. -
  5335. -#ifdef NETIF_F_HW_VLAN_TX
  5336. - if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
  5337. + if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
  5338. tx_flags |= E1000_TX_FLAGS_VLAN;
  5339. tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
  5340. }
  5341. -#endif
  5342. first = tx_ring->next_to_use;
  5343. tso = iegbe_tso(adapter, tx_ring, skb);
  5344. if (tso < 0) {
  5345. dev_kfree_skb_any(skb);
  5346. -#ifdef NETIF_F_LLTX
  5347. spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  5348. -#endif
  5349. return NETDEV_TX_OK;
  5350. }
  5351. - if (likely(tso)) {
  5352. + if (likely(tso)) {
  5353. + tx_ring->last_tx_tso = 1;
  5354. tx_flags |= E1000_TX_FLAGS_TSO;
  5355. - } else if (likely(iegbe_tx_csum(adapter, tx_ring, skb))) {
  5356. + } else if (likely(iegbe_tx_csum(adapter, tx_ring, skb)))
  5357. tx_flags |= E1000_TX_FLAGS_CSUM;
  5358. - }
  5359. +
  5360. /* Old method was to assume IPv4 packet by default if TSO was enabled.
  5361. * 82571 hardware supports TSO capabilities for IPv6 as well...
  5362. * no longer assume, we must. */
  5363. - if (likely(skb->protocol == ntohs(ETH_P_IP))) {
  5364. + if (likely(skb->protocol == htons(ETH_P_IP)))
  5365. tx_flags |= E1000_TX_FLAGS_IPV4;
  5366. - }
  5367. +
  5368. iegbe_tx_queue(adapter, tx_ring, tx_flags,
  5369. iegbe_tx_map(adapter, tx_ring, skb, first,
  5370. max_per_txd, nr_frags, mss));
  5371. netdev->trans_start = jiffies;
  5372. -#ifdef NETIF_F_LLTX
  5373. /* Make sure there is space in the ring for the next send. */
  5374. - if (unlikely(E1000_DESC_UNUSED(tx_ring) < MAX_SKB_FRAGS + 0x2)) {
  5375. - netif_stop_queue(netdev);
  5376. - }
  5377. - spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  5378. -#endif
  5379. + iegbe_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
  5380. + spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  5381. return NETDEV_TX_OK;
  5382. }
  5383. +
  5384. /**
  5385. * iegbe_tx_timeout - Respond to a Tx Hang
  5386. * @netdev: network interface device structure
  5387. **/
  5388. -static void
  5389. -iegbe_tx_timeout(struct net_device *netdev)
  5390. +static void iegbe_tx_timeout(struct net_device *netdev)
  5391. {
  5392. - struct iegbe_adapter *adapter = netdev_priv(netdev);
  5393. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  5394. - /* Do the reset outside of interrupt context */
  5395. - schedule_work(&adapter->tx_timeout_task);
  5396. + /* Do the reset outside of interrupt context */
  5397. + adapter->tx_timeout_count++;
  5398. + schedule_work(&adapter->reset_task);
  5399. }
  5400. -static void
  5401. -iegbe_tx_timeout_task(struct net_device *netdev)
  5402. +static void iegbe_reset_task(struct work_struct *work)
  5403. {
  5404. - struct iegbe_adapter *adapter = netdev_priv(netdev);
  5405. + struct iegbe_adapter *adapter =
  5406. + container_of(work, struct iegbe_adapter, reset_task);
  5407. - iegbe_down(adapter);
  5408. - iegbe_up(adapter);
  5409. + iegbe_reinit_locked(adapter);
  5410. }
  5411. /**
  5412. @@ -3232,13 +3212,12 @@ iegbe_tx_timeout_task(struct net_device
  5413. * The statistics are actually updated from the timer callback.
  5414. **/
  5415. -static struct net_device_stats *
  5416. -iegbe_get_stats(struct net_device *netdev)
  5417. +static struct net_device_stats *iegbe_get_stats(struct net_device *netdev)
  5418. {
  5419. - struct iegbe_adapter *adapter = netdev_priv(netdev);
  5420. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  5421. - iegbe_update_stats(adapter);
  5422. - return &adapter->net_stats;
  5423. + /* only return the current stats */
  5424. + return &adapter->net_stats;
  5425. }
  5426. /**
  5427. @@ -3249,67 +3228,55 @@ iegbe_get_stats(struct net_device *netde
  5428. * Returns 0 on success, negative on failure
  5429. **/
  5430. -static int
  5431. -iegbe_change_mtu(struct net_device *netdev, int new_mtu)
  5432. +static int iegbe_change_mtu(struct net_device *netdev, int new_mtu)
  5433. {
  5434. - struct iegbe_adapter *adapter = netdev_priv(netdev);
  5435. - int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
  5436. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  5437. + struct iegbe_hw *hw = &adapter->hw;
  5438. + int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
  5439. - if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
  5440. - (max_frame > MAX_JUMBO_FRAME_SIZE)) {
  5441. - DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
  5442. - return -EINVAL;
  5443. - }
  5444. + if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
  5445. + (max_frame > MAX_JUMBO_FRAME_SIZE)) {
  5446. + DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
  5447. + return -EINVAL;
  5448. + }
  5449. + /* Adapter-specific max frame size limits. */
  5450. + switch (hw->mac_type) {
  5451. + case iegbe_undefined ... iegbe_82542_rev2_1:
  5452. + if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
  5453. + DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
  5454. + return -EINVAL;
  5455. + }
  5456. + break;
  5457. + case iegbe_82571:
  5458. + case iegbe_82572:
  5459. #define MAX_STD_JUMBO_FRAME_SIZE 9234
  5460. - /* might want this to be bigger enum check... */
  5461. - /* 82571 controllers limit jumbo frame size to 10500 bytes */
  5462. - if ((adapter->hw.mac_type == iegbe_82571 ||
  5463. - adapter->hw.mac_type == iegbe_82572) &&
  5464. - max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
  5465. - DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
  5466. - "on 82571 and 82572 controllers.\n");
  5467. - return -EINVAL;
  5468. - }
  5469. -
  5470. - if(adapter->hw.mac_type == iegbe_82573 &&
  5471. - max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
  5472. - DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
  5473. - "on 82573\n");
  5474. - return -EINVAL;
  5475. - }
  5476. -
  5477. - if(adapter->hw.mac_type > iegbe_82547_rev_2) {
  5478. - adapter->rx_buffer_len = max_frame;
  5479. - E1000_ROUNDUP(adapter->rx_buffer_len, 0x1024);
  5480. - } else {
  5481. - if(unlikely((adapter->hw.mac_type < iegbe_82543) &&
  5482. - (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
  5483. - DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
  5484. - "on 82542\n");
  5485. - return -EINVAL;
  5486. -
  5487. - } else {
  5488. - if(max_frame <= E1000_RXBUFFER_2048) {
  5489. - adapter->rx_buffer_len = E1000_RXBUFFER_2048;
  5490. - } else if(max_frame <= E1000_RXBUFFER_4096) {
  5491. - adapter->rx_buffer_len = E1000_RXBUFFER_4096;
  5492. - } else if(max_frame <= E1000_RXBUFFER_8192) {
  5493. - adapter->rx_buffer_len = E1000_RXBUFFER_8192;
  5494. - } else if(max_frame <= E1000_RXBUFFER_16384) {
  5495. - adapter->rx_buffer_len = E1000_RXBUFFER_16384;
  5496. - }
  5497. + if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
  5498. + DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
  5499. + return -EINVAL;
  5500. }
  5501. + break;
  5502. + default:
  5503. + break;
  5504. }
  5505. + if (max_frame <= E1000_RXBUFFER_256)
  5506. + adapter->rx_buffer_len = E1000_RXBUFFER_256;
  5507. + else if (max_frame <= E1000_RXBUFFER_2048)
  5508. + adapter->rx_buffer_len = E1000_RXBUFFER_2048;
  5509. + else if (max_frame <= E1000_RXBUFFER_4096)
  5510. + adapter->rx_buffer_len = E1000_RXBUFFER_4096;
  5511. + else if (max_frame <= E1000_RXBUFFER_8192)
  5512. + adapter->rx_buffer_len = E1000_RXBUFFER_8192;
  5513. + else if (max_frame <= E1000_RXBUFFER_16384)
  5514. + adapter->rx_buffer_len = E1000_RXBUFFER_16384;
  5515. - netdev->mtu = new_mtu;
  5516. + /* adjust allocation if LPE protects us, and we aren't using SBP */
  5517. - if(netif_running(netdev)) {
  5518. - iegbe_down(adapter);
  5519. - iegbe_up(adapter);
  5520. - }
  5521. + netdev->mtu = new_mtu;
  5522. + hw->max_frame_size = max_frame;
  5523. - adapter->hw.max_frame_size = max_frame;
  5524. + if (netif_running(netdev))
  5525. + iegbe_reinit_locked(adapter);
  5526. return 0;
  5527. }
  5528. @@ -3319,224 +3286,189 @@ iegbe_change_mtu(struct net_device *netd
  5529. * @adapter: board private structure
  5530. **/
  5531. -void
  5532. -iegbe_update_stats(struct iegbe_adapter *adapter)
  5533. +void iegbe_update_stats(struct iegbe_adapter *adapter)
  5534. {
  5535. - struct iegbe_hw *hw = &adapter->hw;
  5536. - unsigned long flags = 0;
  5537. - uint16_t phy_tmp;
  5538. + struct iegbe_hw *hw = &adapter->hw;
  5539. + unsigned long flags = 0x0;
  5540. + uint16_t phy_tmp;
  5541. #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
  5542. - spin_lock_irqsave(&adapter->stats_lock, flags);
  5543. + spin_lock_irqsave(&adapter->stats_lock, flags);
  5544. - /* these counters are modified from iegbe_adjust_tbi_stats,
  5545. - * called from the interrupt context, so they must only
  5546. - * be written while holding adapter->stats_lock
  5547. - */
  5548. + /* these counters are modified from iegbe_adjust_tbi_stats,
  5549. + * called from the interrupt context, so they must only
  5550. + * be written while holding adapter->stats_lock
  5551. + */
  5552. - adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
  5553. - adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
  5554. - adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
  5555. - adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
  5556. - adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
  5557. - adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
  5558. - adapter->stats.roc += E1000_READ_REG(hw, ROC);
  5559. - adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
  5560. - adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
  5561. - adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
  5562. - adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
  5563. - adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
  5564. - adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
  5565. -
  5566. - adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
  5567. - adapter->stats.mpc += E1000_READ_REG(hw, MPC);
  5568. - adapter->stats.scc += E1000_READ_REG(hw, SCC);
  5569. - adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
  5570. - adapter->stats.mcc += E1000_READ_REG(hw, MCC);
  5571. - adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
  5572. - adapter->stats.dc += E1000_READ_REG(hw, DC);
  5573. - adapter->stats.sec += E1000_READ_REG(hw, SEC);
  5574. - adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
  5575. - adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
  5576. - adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
  5577. - adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
  5578. - adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
  5579. - adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
  5580. - adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
  5581. - adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
  5582. - adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
  5583. - adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
  5584. - adapter->stats.ruc += E1000_READ_REG(hw, RUC);
  5585. - adapter->stats.rfc += E1000_READ_REG(hw, RFC);
  5586. - adapter->stats.rjc += E1000_READ_REG(hw, RJC);
  5587. - adapter->stats.torl += E1000_READ_REG(hw, TORL);
  5588. - adapter->stats.torh += E1000_READ_REG(hw, TORH);
  5589. - adapter->stats.totl += E1000_READ_REG(hw, TOTL);
  5590. - adapter->stats.toth += E1000_READ_REG(hw, TOTH);
  5591. - adapter->stats.tpr += E1000_READ_REG(hw, TPR);
  5592. - adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
  5593. - adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
  5594. - adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
  5595. - adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
  5596. - adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
  5597. - adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
  5598. - adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
  5599. - adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
  5600. -
  5601. - /* used for adaptive IFS */
  5602. -
  5603. - hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
  5604. - adapter->stats.tpt += hw->tx_packet_delta;
  5605. - hw->collision_delta = E1000_READ_REG(hw, COLC);
  5606. - adapter->stats.colc += hw->collision_delta;
  5607. -
  5608. - if(hw->mac_type >= iegbe_82543) {
  5609. - adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
  5610. - adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
  5611. - adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
  5612. - adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
  5613. - adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
  5614. - adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
  5615. - }
  5616. - if(hw->mac_type > iegbe_82547_rev_2) {
  5617. - adapter->stats.iac += E1000_READ_REG(hw, IAC);
  5618. - adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
  5619. - adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
  5620. - adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
  5621. - adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
  5622. - adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
  5623. - adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
  5624. - adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
  5625. - adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
  5626. - }
  5627. -
  5628. - /* Fill out the OS statistics structure */
  5629. -
  5630. - adapter->net_stats.rx_packets = adapter->stats.gprc;
  5631. - adapter->net_stats.tx_packets = adapter->stats.gptc;
  5632. - adapter->net_stats.rx_bytes = adapter->stats.gorcl;
  5633. - adapter->net_stats.tx_bytes = adapter->stats.gotcl;
  5634. - adapter->net_stats.multicast = adapter->stats.mprc;
  5635. - adapter->net_stats.collisions = adapter->stats.colc;
  5636. -
  5637. - /* Rx Errors */
  5638. -
  5639. - adapter->net_stats.rx_errors = adapter->stats.rxerrc +
  5640. - adapter->stats.crcerrs + adapter->stats.algnerrc +
  5641. - adapter->stats.rlec + adapter->stats.mpc +
  5642. - adapter->stats.cexterr;
  5643. - adapter->net_stats.rx_dropped = adapter->stats.mpc;
  5644. - adapter->net_stats.rx_length_errors = adapter->stats.rlec;
  5645. - adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
  5646. - adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
  5647. - adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
  5648. - adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
  5649. -
  5650. - /* Tx Errors */
  5651. -
  5652. - adapter->net_stats.tx_errors = adapter->stats.ecol +
  5653. - adapter->stats.latecol;
  5654. - adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
  5655. - adapter->net_stats.tx_window_errors = adapter->stats.latecol;
  5656. - adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
  5657. + adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
  5658. + adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
  5659. + adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
  5660. + adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
  5661. + adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
  5662. + adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
  5663. + adapter->stats.roc += E1000_READ_REG(hw, ROC);
  5664. + adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
  5665. + adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
  5666. + adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
  5667. + adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
  5668. + adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
  5669. + adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
  5670. +
  5671. + adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
  5672. + adapter->stats.mpc += E1000_READ_REG(hw, MPC);
  5673. + adapter->stats.scc += E1000_READ_REG(hw, SCC);
  5674. + adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
  5675. + adapter->stats.mcc += E1000_READ_REG(hw, MCC);
  5676. + adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
  5677. + adapter->stats.dc += E1000_READ_REG(hw, DC);
  5678. + adapter->stats.sec += E1000_READ_REG(hw, SEC);
  5679. + adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
  5680. + adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
  5681. + adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
  5682. + adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
  5683. + adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
  5684. + adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
  5685. + adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
  5686. + adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
  5687. + adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
  5688. + adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
  5689. + adapter->stats.ruc += E1000_READ_REG(hw, RUC);
  5690. + adapter->stats.rfc += E1000_READ_REG(hw, RFC);
  5691. + adapter->stats.rjc += E1000_READ_REG(hw, RJC);
  5692. + adapter->stats.torl += E1000_READ_REG(hw, TORL);
  5693. + adapter->stats.torh += E1000_READ_REG(hw, TORH);
  5694. + adapter->stats.totl += E1000_READ_REG(hw, TOTL);
  5695. + adapter->stats.toth += E1000_READ_REG(hw, TOTH);
  5696. + adapter->stats.tpr += E1000_READ_REG(hw, TPR);
  5697. + adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
  5698. + adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
  5699. + adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
  5700. + adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
  5701. + adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
  5702. + adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
  5703. + adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
  5704. + adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
  5705. +
  5706. + /* used for adaptive IFS */
  5707. +
  5708. + hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
  5709. + adapter->stats.tpt += hw->tx_packet_delta;
  5710. + hw->collision_delta = E1000_READ_REG(hw, COLC);
  5711. + adapter->stats.colc += hw->collision_delta;
  5712. +
  5713. + if(hw->mac_type >= iegbe_82543) {
  5714. + adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
  5715. + adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
  5716. + adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
  5717. + adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
  5718. + adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
  5719. + adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
  5720. + }
  5721. + if(hw->mac_type > iegbe_82547_rev_2) {
  5722. + adapter->stats.iac += E1000_READ_REG(hw, IAC);
  5723. + adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
  5724. + adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
  5725. + adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
  5726. + adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
  5727. + adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
  5728. + adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
  5729. + adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
  5730. + adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
  5731. + }
  5732. +
  5733. + /* Fill out the OS statistics structure */
  5734. +
  5735. + adapter->net_stats.rx_packets = adapter->stats.gprc;
  5736. + adapter->net_stats.tx_packets = adapter->stats.gptc;
  5737. + adapter->net_stats.rx_bytes = adapter->stats.gorcl;
  5738. + adapter->net_stats.tx_bytes = adapter->stats.gotcl;
  5739. + adapter->net_stats.multicast = adapter->stats.mprc;
  5740. + adapter->net_stats.collisions = adapter->stats.colc;
  5741. +
  5742. + /* Rx Errors */
  5743. +
  5744. + adapter->net_stats.rx_errors = adapter->stats.rxerrc +
  5745. + adapter->stats.crcerrs + adapter->stats.algnerrc +
  5746. + adapter->stats.rlec + adapter->stats.mpc +
  5747. + adapter->stats.cexterr;
  5748. + adapter->net_stats.rx_dropped = adapter->stats.mpc;
  5749. + adapter->net_stats.rx_length_errors = adapter->stats.rlec;
  5750. + adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
  5751. + adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
  5752. + adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
  5753. + adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
  5754. +
  5755. + /* Tx Errors */
  5756. +
  5757. + adapter->net_stats.tx_errors = adapter->stats.ecol +
  5758. + adapter->stats.latecol;
  5759. + adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
  5760. + adapter->net_stats.tx_window_errors = adapter->stats.latecol;
  5761. + adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
  5762. - /* Tx Dropped needs to be maintained elsewhere */
  5763. + /* Tx Dropped needs to be maintained elsewhere */
  5764. - /* Phy Stats */
  5765. + /* Phy Stats */
  5766. - if(hw->media_type == iegbe_media_type_copper
  5767. + if(hw->media_type == iegbe_media_type_copper
  5768. || (hw->media_type == iegbe_media_type_oem
  5769. && iegbe_oem_phy_is_copper(&adapter->hw))) {
  5770. - if((adapter->link_speed == SPEED_1000) &&
  5771. - (!iegbe_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
  5772. - phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
  5773. - adapter->phy_stats.idle_errors += phy_tmp;
  5774. - }
  5775. + if((adapter->link_speed == SPEED_1000) &&
  5776. + (!iegbe_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
  5777. + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
  5778. + adapter->phy_stats.idle_errors += phy_tmp;
  5779. + }
  5780. - if((hw->mac_type <= iegbe_82546) &&
  5781. - (hw->phy_type == iegbe_phy_m88) &&
  5782. + if((hw->mac_type <= iegbe_82546) &&
  5783. + (hw->phy_type == iegbe_phy_m88) &&
  5784. !iegbe_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) {
  5785. - adapter->phy_stats.receive_errors += phy_tmp;
  5786. - }
  5787. + adapter->phy_stats.receive_errors += phy_tmp;
  5788. + }
  5789. }
  5790. - spin_unlock_irqrestore(&adapter->stats_lock, flags);
  5791. + spin_unlock_irqrestore(&adapter->stats_lock, flags);
  5792. }
  5793. -#ifdef CONFIG_E1000_MQ
  5794. -void
  5795. -iegbe_rx_schedule(void *data)
  5796. +/**
  5797. + * iegbe_intr_msi - Interrupt Handler
  5798. + * @irq: interrupt number
  5799. + * @data: pointer to a network interface device structure
  5800. + **/
  5801. +
  5802. +static irqreturn_t iegbe_intr_msi(int irq, void *data)
  5803. {
  5804. - struct net_device *poll_dev, *netdev = data;
  5805. - struct iegbe_adapter *adapter = netdev->priv;
  5806. - int this_cpu = get_cpu();
  5807. -
  5808. - poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu);
  5809. - if (poll_dev == NULL) {
  5810. - put_cpu();
  5811. - return;
  5812. + struct net_device *netdev = data;
  5813. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  5814. + struct iegbe_hw *hw = &adapter->hw;
  5815. + u32 icr = E1000_READ_REG(&adapter->hw, ICR);
  5816. + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
  5817. + hw->get_link_status = 1;
  5818. + if (!test_bit(__E1000_DOWN, &adapter->flags))
  5819. + mod_timer(&adapter->watchdog_timer, jiffies + 1);
  5820. }
  5821. - if (likely(netif_rx_schedule_prep(poll_dev))) {
  5822. - __netif_rx_schedule(poll_dev);
  5823. - } else {
  5824. - iegbe_irq_enable(adapter);
  5825. - }
  5826. - put_cpu();
  5827. -}
  5828. -#endif
  5829. -
  5830. -#ifdef IEGBE_GBE_WORKAROUND
  5831. -/*
  5832. - * Check for tx hang condition. This is the condition where a
  5833. - * decsriptor is in the hardware and hasn't been processed for a
  5834. - * while. This code is similar to the check in iegbe_clean_rx_irq()
  5835. - */
  5836. -static void
  5837. -iegbe_tx_hang_check(struct iegbe_adapter *adapter,
  5838. - struct iegbe_tx_ring *tx_ring)
  5839. -{
  5840. - struct net_device *netdev = adapter->netdev;
  5841. - unsigned int i;
  5842. + if(unlikely(icr & (E1000_ICR_RX_DESC_FIFO_PAR
  5843. + | E1000_ICR_TX_DESC_FIFO_PAR
  5844. + | E1000_ICR_PB
  5845. + | E1000_ICR_CPP_TARGET
  5846. + | E1000_ICR_CPP_MASTER ))) {
  5847. - /* Check for a hang condition using the buffer currently at the Tx
  5848. - head pointer */
  5849. - i = readl(adapter->hw.hw_addr + tx_ring->tdh);
  5850. -
  5851. - if (adapter->detect_tx_hung) {
  5852. - /* Detect a transmit hang in hardware, this serializes the
  5853. - * check with the clearing of time_stamp and movement of i */
  5854. - adapter->detect_tx_hung = FALSE;
  5855. -
  5856. - if (tx_ring->buffer_info[i].dma &&
  5857. - time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
  5858. - && !(E1000_READ_REG(&adapter->hw, STATUS) &
  5859. - E1000_STATUS_TXOFF)) {
  5860. -
  5861. - /* detected Tx unit hang */
  5862. - DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
  5863. - " TDH <%x>\n"
  5864. - " TDT <%x>\n"
  5865. - " next_to_use <%x>\n"
  5866. - " next_to_clean <%x>\n"
  5867. - "buffer_info[tdh]\n"
  5868. - " dma <%zx>\n"
  5869. - " time_stamp <%lx>\n"
  5870. - " jiffies <%lx>\n",
  5871. - readl(adapter->hw.hw_addr + tx_ring->tdh),
  5872. - readl(adapter->hw.hw_addr + tx_ring->tdt),
  5873. - tx_ring->next_to_use,
  5874. - tx_ring->next_to_clean,
  5875. - (size_t)tx_ring->buffer_info[i].dma,
  5876. - tx_ring->buffer_info[i].time_stamp,
  5877. - jiffies);
  5878. - netif_stop_queue(netdev);
  5879. - }
  5880. + iegbe_irq_disable(adapter);
  5881. + printk("Critical error! ICR = 0x%x\n", icr);
  5882. + return IRQ_HANDLED;
  5883. }
  5884. -}
  5885. + if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
  5886. + adapter->total_tx_bytes = 0;
  5887. + adapter->total_tx_packets = 0;
  5888. + adapter->total_rx_bytes = 0;
  5889. + adapter->total_rx_packets = 0;
  5890. + __netif_rx_schedule(netdev, &adapter->napi);
  5891. + } else
  5892. + iegbe_irq_enable(adapter);
  5893. -#endif
  5894. + return IRQ_HANDLED;
  5895. +}
  5896. /**
  5897. * iegbe_intr - Interrupt Handler
  5898. @@ -3546,364 +3478,208 @@ iegbe_tx_hang_check(struct iegbe_adapter
  5899. **/
  5900. static irqreturn_t
  5901. -iegbe_intr(int irq, void *data, struct pt_regs *regs)
  5902. +iegbe_intr(int irq, void *data)
  5903. {
  5904. - struct net_device *netdev = data;
  5905. - struct iegbe_adapter *adapter = netdev_priv(netdev);
  5906. - struct iegbe_hw *hw = &adapter->hw;
  5907. - uint32_t rctl, tctl;
  5908. - uint32_t icr = E1000_READ_REG(hw, ICR);
  5909. -#ifndef CONFIG_E1000_NAPI
  5910. - uint32_t i;
  5911. -#ifdef IEGBE_GBE_WORKAROUND
  5912. - int rx_cleaned;
  5913. -#endif
  5914. -#endif
  5915. + struct net_device *netdev = data;
  5916. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  5917. + struct iegbe_hw *hw = &adapter->hw;
  5918. + u32 icr = E1000_READ_REG(&adapter->hw, ICR);
  5919. - if(unlikely(!icr)) {
  5920. + if (unlikely(!icr))
  5921. return IRQ_NONE; /* Not our interrupt */
  5922. - }
  5923. +
  5924. + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
  5925. + * not set, then the adapter didn't send an interrupt */
  5926. + if (unlikely(hw->mac_type >= iegbe_82571 &&
  5927. + !(icr & E1000_ICR_INT_ASSERTED)))
  5928. + return IRQ_NONE;
  5929. +
  5930. +
  5931. if(unlikely(icr & (E1000_ICR_RX_DESC_FIFO_PAR
  5932. - | E1000_ICR_TX_DESC_FIFO_PAR
  5933. - | E1000_ICR_PB
  5934. - | E1000_ICR_CPP_TARGET
  5935. - | E1000_ICR_CPP_MASTER ))) {
  5936. + | E1000_ICR_TX_DESC_FIFO_PAR
  5937. + | E1000_ICR_PB
  5938. + | E1000_ICR_CPP_TARGET
  5939. + | E1000_ICR_CPP_MASTER ))) {
  5940. iegbe_irq_disable(adapter);
  5941. - tctl = E1000_READ_REG(&adapter->hw, TCTL);
  5942. - rctl = E1000_READ_REG(&adapter->hw, RCTL);
  5943. - E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_TCTL_EN);
  5944. - E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
  5945. -
  5946. - tasklet_data = (unsigned long) (icr + adapter->bd_number);
  5947. - tasklet_schedule(&iegbe_reset_tasklet);
  5948. -
  5949. - return IRQ_HANDLED;
  5950. - }
  5951. -
  5952. -#ifdef CONFIG_E1000_NAPI
  5953. - atomic_inc(&adapter->irq_sem);
  5954. -#ifdef IEGBE_GBE_WORKAROUND
  5955. - /* Ensure that the TXQE interrupt is enabled in NAPI mode */
  5956. - E1000_WRITE_REG(hw, IMC, ~E1000_IMS_TXQE);
  5957. -#else
  5958. - E1000_WRITE_REG(hw, IMC, ~0);
  5959. -#endif
  5960. - E1000_WRITE_FLUSH(hw);
  5961. -#ifdef CONFIG_E1000_MQ
  5962. - if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
  5963. - cpu_set(adapter->cpu_for_queue[0],
  5964. - adapter->rx_sched_call_data.cpumask);
  5965. - for (i = 1; i < adapter->num_queues; i++) {
  5966. - cpu_set(adapter->cpu_for_queue[i],
  5967. - adapter->rx_sched_call_data.cpumask);
  5968. - atomic_inc(&adapter->irq_sem);
  5969. - }
  5970. - atomic_set(&adapter->rx_sched_call_data.count, i);
  5971. - smp_call_async_mask(&adapter->rx_sched_call_data);
  5972. - } else {
  5973. - DEBUGOUT("call_data.count == %u\n",
  5974. - atomic_read(&adapter->rx_sched_call_data.count));
  5975. + printk("Critical error! ICR = 0x%x\n", icr);
  5976. + return IRQ_HANDLED;
  5977. }
  5978. -#else
  5979. - if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) {
  5980. - __netif_rx_schedule(&adapter->polling_netdev[0]);
  5981. - } else {
  5982. - iegbe_irq_enable(adapter);
  5983. - }
  5984. -#endif
  5985. -
  5986. -#ifdef IEGBE_GBE_WORKAROUND
  5987. - /* Clean the Tx ring */
  5988. - for (i = 0; i < E1000_MAX_INTR; i++) {
  5989. - adapter->stats.rx_next_to_clean = adapter->rx_ring->next_to_clean;
  5990. - adapter->stats.rx_next_to_use = adapter->rx_ring->next_to_use;
  5991. -
  5992. - adapter->stats.tx_next_to_clean = adapter->tx_ring->next_to_clean;
  5993. - adapter->stats.tx_next_to_use = adapter->tx_ring->next_to_use;
  5994. -
  5995. - /* Only clean Tx descriptors for a TXQE interrupt */
  5996. - if(icr & E1000_ICR_TXQE) {
  5997. - adapter->stats.txqec++;
  5998. - iegbe_clean_tx_ring_partial(adapter, adapter->tx_ring);
  5999. - }
  6000. - else {
  6001. - iegbe_tx_hang_check(adapter, adapter->tx_ring);
  6002. - }
  6003. - }
  6004. -#endif /*IEGBE_GBE_WORKAROUND */
  6005. -
  6006. -#else
  6007. - /* Writing IMC and IMS is needed for 82547.
  6008. - * Due to Hub Link bus being occupied, an interrupt
  6009. - * de-assertion message is not able to be sent.
  6010. - * When an interrupt assertion message is generated later,
  6011. - * two messages are re-ordered and sent out.
  6012. - * That causes APIC to think 82547 is in de-assertion
  6013. - * state, while 82547 is in assertion state, resulting
  6014. - * in dead lock. Writing IMC forces 82547 into
  6015. - * de-assertion state.
  6016. - */
  6017. - if (hw->mac_type == iegbe_82547 || hw->mac_type == iegbe_82547_rev_2) {
  6018. - atomic_inc(&adapter->irq_sem);
  6019. - E1000_WRITE_REG(hw, IMC, ~0);
  6020. - }
  6021. -
  6022. -#ifdef IEGBE_GBE_WORKAROUND
  6023. -
  6024. - for (i = 0; i < E1000_MAX_INTR; i++) {
  6025. - rx_cleaned = adapter->clean_rx(adapter, adapter->rx_ring);
  6026. - adapter->stats.rx_next_to_clean = adapter->rx_ring->next_to_clean;
  6027. - adapter->stats.rx_next_to_use = adapter->rx_ring->next_to_use;
  6028. -
  6029. - adapter->stats.tx_next_to_clean = adapter->tx_ring->next_to_clean;
  6030. - adapter->stats.tx_next_to_use = adapter->tx_ring->next_to_use;
  6031. -
  6032. - /* Only clean Tx descriptors for a TXQE interrupt */
  6033. - if(icr & E1000_ICR_TXQE) {
  6034. - adapter->stats.txqec++;
  6035. - iegbe_clean_tx_ring_partial(adapter, adapter->tx_ring);
  6036. - }
  6037. - else {
  6038. - iegbe_tx_hang_check(adapter, adapter->tx_ring);
  6039. - }
  6040. - if(!rx_cleaned) {
  6041. - break;
  6042. - }
  6043. + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
  6044. + * need for the IMC write */
  6045. + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
  6046. + hw->get_link_status = 1;
  6047. + /* guard against interrupt when we're going down */
  6048. + if (!test_bit(__E1000_DOWN, &adapter->flags))
  6049. + mod_timer(&adapter->watchdog_timer, jiffies + 1);
  6050. +
  6051. }
  6052. -#else
  6053. - for (i = 0; i < E1000_MAX_INTR; i++)
  6054. - if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
  6055. - !iegbe_clean_tx_irq(adapter, adapter->tx_ring))) {
  6056. - break;
  6057. - }
  6058. -#endif
  6059. -
  6060. - if (hw->mac_type == iegbe_82547 || hw->mac_type == iegbe_82547_rev_2) {
  6061. - iegbe_irq_enable(adapter);
  6062. - }
  6063. -#endif
  6064. -#ifdef E1000_COUNT_ICR
  6065. - adapter->icr_txdw += icr & 0x01UL;
  6066. - icr >>= 0x1;
  6067. - adapter->icr_txqe += icr & 0x01UL;
  6068. - icr >>= 0x1;
  6069. - adapter->icr_lsc += icr & 0x01UL;
  6070. - icr >>= 0x1;
  6071. - adapter->icr_rxseq += icr & 0x01UL;
  6072. - icr >>= 0x1;
  6073. - adapter->icr_rxdmt += icr & 0x01UL;
  6074. - icr >>= 0x1;
  6075. - adapter->icr_rxo += icr & 0x01UL;
  6076. - icr >>= 0x1;
  6077. - adapter->icr_rxt += icr & 0x01UL;
  6078. - if(hw->mac_type != iegbe_icp_xxxx) {
  6079. - icr >>= 0x2;
  6080. - adapter->icr_mdac += icr & 0x01UL;
  6081. - icr >>= 0x1;
  6082. - adapter->icr_rxcfg += icr & 0x01UL;
  6083. - icr >>= 0x1;
  6084. - adapter->icr_gpi += icr & 0x01UL;
  6085. - } else {
  6086. - icr >>= 0x4;
  6087. - }
  6088. - if(hw->mac_type == iegbe_icp_xxxx) {
  6089. - icr >>= 0xc;
  6090. - adapter->icr_pb += icr & 0x01UL;
  6091. - icr >>= 0x3;
  6092. - adapter->icr_intmem_icp_xxxx += icr & 0x01UL;
  6093. - icr >>= 0x1;
  6094. - adapter->icr_cpp_target += icr & 0x01UL;
  6095. - icr >>= 0x1;
  6096. - adapter->icr_cpp_master += icr & 0x01UL;
  6097. - icr >>= 0x1;
  6098. - adapter->icr_stat += icr & 0x01UL;
  6099. + if (unlikely(hw->mac_type < iegbe_82571)) {
  6100. + E1000_WRITE_REG(&adapter->hw, IMC, ~0);
  6101. + E1000_WRITE_FLUSH(&adapter->hw);
  6102. }
  6103. -#endif
  6104. + if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
  6105. + adapter->total_tx_bytes = 0;
  6106. + adapter->total_tx_packets = 0;
  6107. + adapter->total_rx_bytes = 0;
  6108. + adapter->total_rx_packets = 0;
  6109. + __netif_rx_schedule(netdev, &adapter->napi);
  6110. + } else
  6111. + /* this really should not happen! if it does it is basically a
  6112. + * bug, but not a hard error, so enable ints and continue */
  6113. + iegbe_irq_enable(adapter);
  6114. return IRQ_HANDLED;
  6115. }
  6116. -#ifdef CONFIG_E1000_NAPI
  6117. /**
  6118. * iegbe_clean - NAPI Rx polling callback
  6119. * @adapter: board private structure
  6120. **/
  6121. -
  6122. -static int
  6123. -iegbe_clean(struct net_device *poll_dev, int *budget)
  6124. +static int iegbe_clean(struct napi_struct *napi, int budget)
  6125. {
  6126. - struct iegbe_adapter *adapter;
  6127. - int work_to_do = min(*budget, poll_dev->quota);
  6128. - int tx_cleaned, i = 0, work_done = 0;
  6129. + struct iegbe_adapter *adapter = container_of(napi, struct iegbe_adapter, napi);
  6130. + struct net_device *poll_dev = adapter->netdev;
  6131. + int tx_cleaned = 0, work_done = 0;
  6132. /* Must NOT use netdev_priv macro here. */
  6133. adapter = poll_dev->priv;
  6134. - /* Keep link state information with original netdev */
  6135. - if (!netif_carrier_ok(adapter->netdev)) {
  6136. - goto quit_polling;
  6137. - }
  6138. - while (poll_dev != &adapter->polling_netdev[i]) {
  6139. - i++;
  6140. - if (unlikely(i == adapter->num_queues)) {
  6141. - BUG();
  6142. - }
  6143. - }
  6144. -
  6145. -#ifdef IEGBE_GBE_WORKAROUND
  6146. - /* Tx descriptors are cleaned in iegbe_intr(). No need to clean
  6147. - them here */
  6148. - tx_cleaned = FALSE;
  6149. -#else
  6150. - tx_cleaned = iegbe_clean_tx_irq(adapter, &adapter->tx_ring[i]);
  6151. -#endif
  6152. - adapter->clean_rx(adapter, &adapter->rx_ring[i],
  6153. - &work_done, work_to_do);
  6154. -
  6155. - *budget -= work_done;
  6156. - poll_dev->quota -= work_done;
  6157. -
  6158. - /* If no Tx and not enough Rx work done, exit the polling mode */
  6159. - if((!tx_cleaned && (work_done == 0)) ||
  6160. - !netif_running(adapter->netdev)) {
  6161. -quit_polling:
  6162. - netif_rx_complete(poll_dev);
  6163. + /* iegbe_clean is called per-cpu. This lock protects
  6164. + * tx_ring[0] from being cleaned by multiple cpus
  6165. + * simultaneously. A failure obtaining the lock means
  6166. + * tx_ring[0] is currently being cleaned anyway. */
  6167. + if (spin_trylock(&adapter->tx_queue_lock)) {
  6168. + tx_cleaned = iegbe_clean_tx_irq(adapter,
  6169. + &adapter->tx_ring[0]);
  6170. + spin_unlock(&adapter->tx_queue_lock);
  6171. + }
  6172. +
  6173. + adapter->clean_rx(adapter, &adapter->rx_ring[0],
  6174. + &work_done, budget);
  6175. +
  6176. + if (tx_cleaned)
  6177. + work_done = budget;
  6178. +
  6179. + /* If budget not fully consumed, exit the polling mode */
  6180. + if (work_done < budget) {
  6181. + if (likely(adapter->itr_setting & 3))
  6182. + iegbe_set_itr(adapter);
  6183. + netif_rx_complete(poll_dev, napi);
  6184. iegbe_irq_enable(adapter);
  6185. - return 0;
  6186. }
  6187. - return 1;
  6188. + return work_done;
  6189. }
  6190. -#endif
  6191. -
  6192. -
  6193. -#ifndef IEGBE_GBE_WORKAROUND
  6194. /**
  6195. * iegbe_clean_tx_irq - Reclaim resources after transmit completes
  6196. * @adapter: board private structure
  6197. **/
  6198. -
  6199. -static boolean_t
  6200. -iegbe_clean_tx_irq(struct iegbe_adapter *adapter,
  6201. +static bool iegbe_clean_tx_irq(struct iegbe_adapter *adapter,
  6202. struct iegbe_tx_ring *tx_ring)
  6203. {
  6204. - struct net_device *netdev = adapter->netdev;
  6205. - struct iegbe_tx_desc *tx_desc, *eop_desc;
  6206. - struct iegbe_buffer *buffer_info;
  6207. - unsigned int i, eop;
  6208. - boolean_t cleaned = FALSE;
  6209. + struct iegbe_hw *hw = &adapter->hw;
  6210. + struct net_device *netdev = adapter->netdev;
  6211. + struct iegbe_tx_desc *tx_desc, *eop_desc;
  6212. + struct iegbe_buffer *buffer_info;
  6213. + unsigned int i, eop;
  6214. + unsigned int count = 0;
  6215. + bool cleaned = false;
  6216. + unsigned int total_tx_bytes=0, total_tx_packets=0;
  6217. - i = tx_ring->next_to_clean;
  6218. - eop = tx_ring->buffer_info[i].next_to_watch;
  6219. - eop_desc = E1000_TX_DESC(*tx_ring, eop);
  6220. + i = tx_ring->next_to_clean;
  6221. + eop = tx_ring->buffer_info[i].next_to_watch;
  6222. + eop_desc = E1000_TX_DESC(*tx_ring, eop);
  6223. while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
  6224. - /* Premature writeback of Tx descriptors clear (free buffers
  6225. - * and unmap pci_mapping) previous_buffer_info */
  6226. - if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
  6227. - iegbe_unmap_and_free_tx_resource(adapter,
  6228. - &tx_ring->previous_buffer_info);
  6229. - }
  6230. -
  6231. - for (cleaned = FALSE; !cleaned; ) {
  6232. - tx_desc = E1000_TX_DESC(*tx_ring, i);
  6233. - buffer_info = &tx_ring->buffer_info[i];
  6234. - cleaned = (i == eop);
  6235. -
  6236. -#ifdef NETIF_F_TSO
  6237. - if (!(netdev->features & NETIF_F_TSO)) {
  6238. -#endif
  6239. - iegbe_unmap_and_free_tx_resource(adapter,
  6240. - buffer_info);
  6241. -#ifdef NETIF_F_TSO
  6242. - } else {
  6243. - if (cleaned) {
  6244. - memcpy(&tx_ring->previous_buffer_info,
  6245. - buffer_info,
  6246. - sizeof(struct iegbe_buffer));
  6247. - memset(buffer_info, 0,
  6248. - sizeof(struct iegbe_buffer));
  6249. - } else {
  6250. - iegbe_unmap_and_free_tx_resource(
  6251. - adapter, buffer_info);
  6252. - }
  6253. - }
  6254. -#endif
  6255. -
  6256. - tx_desc->buffer_addr = 0;
  6257. - tx_desc->lower.data = 0;
  6258. + for (cleaned = false; !cleaned; ) {
  6259. + tx_desc = E1000_TX_DESC(*tx_ring, i);
  6260. + buffer_info = &tx_ring->buffer_info[i];
  6261. + cleaned = (i == eop);
  6262. +
  6263. + if (cleaned) {
  6264. + struct sk_buff *skb = buffer_info->skb;
  6265. + unsigned int segs = 0, bytecount;
  6266. + segs = skb_shinfo(skb)->gso_segs ?: 1;
  6267. + bytecount = ((segs - 1) * skb_headlen(skb)) +
  6268. + skb->len;
  6269. + total_tx_packets += segs;
  6270. + total_tx_bytes += bytecount;
  6271. + }
  6272. + iegbe_unmap_and_free_tx_resource(adapter, buffer_info);
  6273. tx_desc->upper.data = 0;
  6274. - if (unlikely(++i == tx_ring->count)) { i = 0; }
  6275. - }
  6276. -
  6277. - tx_ring->pkt++;
  6278. + if (unlikely(++i == tx_ring->count)) i = 0;
  6279. + }
  6280. - eop = tx_ring->buffer_info[i].next_to_watch;
  6281. - eop_desc = E1000_TX_DESC(*tx_ring, eop);
  6282. - }
  6283. + eop = tx_ring->buffer_info[i].next_to_watch;
  6284. + eop_desc = E1000_TX_DESC(*tx_ring, eop);
  6285. +#define E1000_TX_WEIGHT 64
  6286. + /* weight of a sort for tx, to avoid endless transmit cleanup */
  6287. + if (count++ == E1000_TX_WEIGHT)
  6288. + break;
  6289. + }
  6290. tx_ring->next_to_clean = i;
  6291. - spin_lock(&tx_ring->tx_lock);
  6292. +#define TX_WAKE_THRESHOLD 32
  6293. - if (unlikely(cleaned && netif_queue_stopped(netdev) &&
  6294. - netif_carrier_ok(netdev))) {
  6295. - netif_wake_queue(netdev);
  6296. - }
  6297. - spin_unlock(&tx_ring->tx_lock);
  6298. -
  6299. - if (adapter->detect_tx_hung) {
  6300. - /* Detect a transmit hang in hardware, this serializes the
  6301. - * check with the clearing of time_stamp and movement of i */
  6302. - adapter->detect_tx_hung = FALSE;
  6303. -
  6304. - if (tx_ring->buffer_info[i].dma &&
  6305. - time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
  6306. - && !(E1000_READ_REG(&adapter->hw, STATUS) &
  6307. - E1000_STATUS_TXOFF)) {
  6308. -
  6309. - /* detected Tx unit hang */
  6310. - i = tx_ring->next_to_clean;
  6311. - eop = tx_ring->buffer_info[i].next_to_watch;
  6312. - eop_desc = E1000_TX_DESC(*tx_ring, eop);
  6313. - DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
  6314. - " TDH <%x>\n"
  6315. - " TDT <%x>\n"
  6316. - " next_to_use <%x>\n"
  6317. - " next_to_clean <%x>\n"
  6318. - "buffer_info[next_to_clean]\n"
  6319. - " dma <%zx>\n"
  6320. - " time_stamp <%lx>\n"
  6321. - " next_to_watch <%x>\n"
  6322. - " jiffies <%lx>\n"
  6323. - " next_to_watch.status <%x>\n",
  6324. - readl(adapter->hw.hw_addr + tx_ring->tdh),
  6325. - readl(adapter->hw.hw_addr + tx_ring->tdt),
  6326. - tx_ring->next_to_use,
  6327. - i,
  6328. - (size_t)tx_ring->buffer_info[i].dma,
  6329. - tx_ring->buffer_info[i].time_stamp,
  6330. - eop,
  6331. - jiffies,
  6332. - eop_desc->upper.fields.status);
  6333. - netif_stop_queue(netdev);
  6334. + if (unlikely(cleaned && netif_carrier_ok(netdev) &&
  6335. + E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
  6336. + /* Make sure that anybody stopping the queue after this
  6337. + * sees the new next_to_clean.
  6338. + */
  6339. + smp_mb();
  6340. + if (netif_queue_stopped(netdev)) {
  6341. + netif_wake_queue(netdev);
  6342. + ++adapter->restart_queue;
  6343. }
  6344. }
  6345. -#ifdef NETIF_F_TSO
  6346. - if (unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
  6347. - time_after(jiffies, tx_ring->previous_buffer_info.time_stamp + HZ))) {
  6348. - iegbe_unmap_and_free_tx_resource(
  6349. - adapter, &tx_ring->previous_buffer_info);
  6350. +
  6351. + if (adapter->detect_tx_hung) {
  6352. + /* Detect a transmit hang in hardware, this serializes the
  6353. + * check with the clearing of time_stamp and movement of i */
  6354. + adapter->detect_tx_hung = false;
  6355. +
  6356. + if (tx_ring->buffer_info[eop].dma &&
  6357. + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
  6358. + (adapter->tx_timeout_factor * HZ))
  6359. + && !(E1000_READ_REG(hw, STATUS) & E1000_STATUS_TXOFF)) {
  6360. +
  6361. + /* detected Tx unit hang */
  6362. + DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
  6363. + " Tx Queue <%lu>\n"
  6364. + " TDH <%x>\n"
  6365. + " TDT <%x>\n"
  6366. + " next_to_use <%x>\n"
  6367. + " next_to_clean <%x>\n"
  6368. + "buffer_info[next_to_clean]\n"
  6369. + " time_stamp <%lx>\n"
  6370. + " next_to_watch <%x>\n"
  6371. + " jiffies <%lx>\n"
  6372. + " next_to_watch.status <%x>\n",
  6373. + (unsigned long)((tx_ring - adapter->tx_ring) /
  6374. + sizeof(struct iegbe_tx_ring)),
  6375. + readl(hw->hw_addr + tx_ring->tdh),
  6376. + readl(hw->hw_addr + tx_ring->tdt),
  6377. + tx_ring->next_to_use,
  6378. + tx_ring->next_to_clean,
  6379. + tx_ring->buffer_info[eop].time_stamp,
  6380. + eop,
  6381. + jiffies,
  6382. + eop_desc->upper.fields.status);
  6383. + netif_stop_queue(netdev);
  6384. + }
  6385. }
  6386. -#endif
  6387. - return cleaned;
  6388. + adapter->total_tx_bytes += total_tx_bytes;
  6389. + adapter->total_tx_packets += total_tx_packets;
  6390. + adapter->net_stats.tx_bytes += total_tx_bytes;
  6391. + adapter->net_stats.tx_packets += total_tx_packets;
  6392. + return cleaned;
  6393. }
  6394. -#endif
  6395. /**
  6396. * iegbe_rx_checksum - Receive Checksum Offload for 82543
  6397. @@ -3913,192 +3689,193 @@ iegbe_clean_tx_irq(struct iegbe_adapter
  6398. * @sk_buff: socket buffer with received data
  6399. **/
  6400. -static inline void
  6401. -iegbe_rx_checksum(struct iegbe_adapter *adapter,
  6402. - uint32_t status_err, uint32_t csum,
  6403. - struct sk_buff *skb)
  6404. +static void iegbe_rx_checksum(struct iegbe_adapter *adapter, u32 status_err,
  6405. + u32 csum, struct sk_buff *skb)
  6406. {
  6407. - uint16_t status = (uint16_t)status_err;
  6408. - uint8_t errors = (uint8_t)(status_err >> 0x18);
  6409. + struct iegbe_hw *hw = &adapter->hw;
  6410. + u16 status = (u16)status_err;
  6411. + u8 errors = (u8)(status_err >> 24);
  6412. skb->ip_summed = CHECKSUM_NONE;
  6413. - /* 82543 or newer only */
  6414. - if(unlikely(adapter->hw.mac_type < iegbe_82543)) { return; }
  6415. - /* Ignore Checksum bit is set */
  6416. - if(unlikely(status & E1000_RXD_STAT_IXSM)) { return; }
  6417. - /* TCP/UDP checksum error bit is set */
  6418. - if(unlikely(errors & E1000_RXD_ERR_TCPE)) {
  6419. - /* let the stack verify checksum errors */
  6420. - adapter->hw_csum_err++;
  6421. - return;
  6422. - }
  6423. - /* TCP/UDP Checksum has not been calculated */
  6424. - if(adapter->hw.mac_type <= iegbe_82547_rev_2) {
  6425. - if(!(status & E1000_RXD_STAT_TCPCS)) {
  6426. - return;
  6427. + /* 82543 or newer only */
  6428. + if (unlikely(hw->mac_type < iegbe_82543)) return;
  6429. + /* Ignore Checksum bit is set */
  6430. + if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
  6431. + /* TCP/UDP checksum error bit is set */
  6432. + if(unlikely(errors & E1000_RXD_ERR_TCPE)) {
  6433. + /* let the stack verify checksum errors */
  6434. + adapter->hw_csum_err++;
  6435. + return;
  6436. + }
  6437. + /* TCP/UDP Checksum has not been calculated */
  6438. + if (hw->mac_type <= iegbe_82547_rev_2) {
  6439. + if (!(status & E1000_RXD_STAT_TCPCS))
  6440. + return;
  6441. + } else {
  6442. + if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
  6443. + return;
  6444. }
  6445. - } else {
  6446. - if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) {
  6447. - return;
  6448. - }
  6449. + /* It must be a TCP or UDP packet with a valid checksum */
  6450. + if(likely(status & E1000_RXD_STAT_TCPCS)) {
  6451. + /* TCP checksum is good */
  6452. + skb->ip_summed = CHECKSUM_UNNECESSARY;
  6453. + } else if (hw->mac_type > iegbe_82547_rev_2) {
  6454. + /* IP fragment with UDP payload */
  6455. + /* Hardware complements the payload checksum, so we undo it
  6456. + * and then put the value in host order for further stack use.
  6457. + */
  6458. + __sum16 sum = (__force __sum16)htons(csum);
  6459. + skb->csum = csum_unfold(~sum);
  6460. + skb->ip_summed = CHECKSUM_COMPLETE;
  6461. }
  6462. - /* It must be a TCP or UDP packet with a valid checksum */
  6463. - if(likely(status & E1000_RXD_STAT_TCPCS)) {
  6464. - /* TCP checksum is good */
  6465. - skb->ip_summed = CHECKSUM_UNNECESSARY;
  6466. - } else if(adapter->hw.mac_type > iegbe_82547_rev_2) {
  6467. - /* IP fragment with UDP payload */
  6468. - /* Hardware complements the payload checksum, so we undo it
  6469. - * and then put the value in host order for further stack use.
  6470. - */
  6471. - csum = ntohl(csum ^ 0xFFFF);
  6472. - skb->csum = csum;
  6473. - skb->ip_summed = CHECKSUM_HW;
  6474. - }
  6475. - adapter->hw_csum_good++;
  6476. + adapter->hw_csum_good++;
  6477. }
  6478. /**
  6479. * iegbe_clean_rx_irq - Send received data up the network stack; legacy
  6480. * @adapter: board private structure
  6481. **/
  6482. -
  6483. -static boolean_t
  6484. -#ifdef CONFIG_E1000_NAPI
  6485. -iegbe_clean_rx_irq(struct iegbe_adapter *adapter,
  6486. +static bool iegbe_clean_rx_irq(struct iegbe_adapter *adapter,
  6487. struct iegbe_rx_ring *rx_ring,
  6488. int *work_done, int work_to_do)
  6489. -#else
  6490. -iegbe_clean_rx_irq(struct iegbe_adapter *adapter,
  6491. - struct iegbe_rx_ring *rx_ring)
  6492. -#endif
  6493. {
  6494. - struct net_device *netdev = adapter->netdev;
  6495. - struct pci_dev *pdev = adapter->pdev;
  6496. - struct iegbe_rx_desc *rx_desc;
  6497. - struct iegbe_buffer *buffer_info;
  6498. - struct sk_buff *skb;
  6499. - unsigned long flags = 0;
  6500. - uint32_t length;
  6501. - uint8_t last_byte;
  6502. - unsigned int i;
  6503. - boolean_t cleaned = FALSE;
  6504. -
  6505. -#ifdef IEGBE_GBE_WORKAROUND
  6506. - /* Need to keep track of the amount of Rx descriptors that we
  6507. - cleaned to ensure that we don't supply too many back to the
  6508. - hardware */
  6509. - int cleaned_count = 0;
  6510. -#endif
  6511. -
  6512. - i = rx_ring->next_to_clean;
  6513. - rx_desc = E1000_RX_DESC(*rx_ring, i);
  6514. -
  6515. - while(rx_desc->status & E1000_RXD_STAT_DD) {
  6516. - buffer_info = &rx_ring->buffer_info[i];
  6517. -#ifdef CONFIG_E1000_NAPI
  6518. - if(*work_done >= work_to_do) {
  6519. - break;
  6520. - }
  6521. - (*work_done)++;
  6522. -#endif
  6523. - cleaned = TRUE;
  6524. + struct iegbe_hw *hw = &adapter->hw;
  6525. + struct net_device *netdev = adapter->netdev;
  6526. + struct pci_dev *pdev = adapter->pdev;
  6527. + struct iegbe_rx_desc *rx_desc, *next_rxd;
  6528. + struct iegbe_buffer *buffer_info, *next_buffer;
  6529. + unsigned long flags;
  6530. + u32 length;
  6531. + u8 last_byte;
  6532. + unsigned int i;
  6533. + int cleaned_count = 0;
  6534. + bool cleaned = false;
  6535. + unsigned int total_rx_bytes=0, total_rx_packets=0;
  6536. -#ifdef IEGBE_GBE_WORKAROUND
  6537. - cleaned_count++;
  6538. -#endif
  6539. + i = rx_ring->next_to_clean;
  6540. + rx_desc = E1000_RX_DESC(*rx_ring, i);
  6541. + buffer_info = &rx_ring->buffer_info[i];
  6542. - pci_unmap_single(pdev,
  6543. - buffer_info->dma,
  6544. - buffer_info->length,
  6545. - PCI_DMA_FROMDEVICE);
  6546. + while(rx_desc->status & E1000_RXD_STAT_DD) {
  6547. + struct sk_buff *skb;
  6548. + u8 status;
  6549. + if (*work_done >= work_to_do)
  6550. + break;
  6551. + (*work_done)++;
  6552. + status = rx_desc->status;
  6553. skb = buffer_info->skb;
  6554. - length = le16_to_cpu(rx_desc->length);
  6555. + buffer_info->skb = NULL;
  6556. + prefetch(skb->data - NET_IP_ALIGN);
  6557. + if (++i == rx_ring->count) i = 0;
  6558. + next_rxd = E1000_RX_DESC(*rx_ring, i);
  6559. + prefetch(next_rxd);
  6560. + next_buffer = &rx_ring->buffer_info[i];
  6561. + cleaned = true;
  6562. + cleaned_count++;
  6563. + pci_unmap_single(pdev,
  6564. + buffer_info->dma,
  6565. + buffer_info->length,
  6566. + PCI_DMA_FROMDEVICE);
  6567. +
  6568. + length = le16_to_cpu(rx_desc->length);
  6569. +
  6570. + if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
  6571. + /* All receives must fit into a single buffer */
  6572. + E1000_DBG("%s: Receive packet consumed multiple"
  6573. + " buffers\n", netdev->name);
  6574. + buffer_info->skb = skb;
  6575. + goto next_desc;
  6576. + }
  6577. - if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
  6578. - /* All receives must fit into a single buffer */
  6579. - E1000_DBG("%s: Receive packet consumed multiple"
  6580. - " buffers\n", netdev->name);
  6581. - dev_kfree_skb_irq(skb);
  6582. - goto next_desc;
  6583. - }
  6584. + if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
  6585. + last_byte = *(skb->data + length - 1);
  6586. + if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
  6587. + last_byte)) {
  6588. + spin_lock_irqsave(&adapter->stats_lock, flags);
  6589. + iegbe_tbi_adjust_stats(hw, &adapter->stats,
  6590. + length, skb->data);
  6591. + spin_unlock_irqrestore(&adapter->stats_lock,
  6592. + flags);
  6593. + length--;
  6594. + } else {
  6595. + buffer_info->skb = skb;
  6596. + goto next_desc;
  6597. + }
  6598. + }
  6599. - if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
  6600. - last_byte = *(skb->data + length - 0x1);
  6601. - if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
  6602. - rx_desc->errors, length, last_byte)) {
  6603. - spin_lock_irqsave(&adapter->stats_lock, flags);
  6604. - iegbe_tbi_adjust_stats(&adapter->hw,
  6605. - &adapter->stats,
  6606. - length, skb->data);
  6607. - spin_unlock_irqrestore(&adapter->stats_lock,
  6608. - flags);
  6609. - length--;
  6610. - } else {
  6611. - dev_kfree_skb_irq(skb);
  6612. - goto next_desc;
  6613. + /* adjust length to remove Ethernet CRC, this must be
  6614. + * done after the TBI_ACCEPT workaround above */
  6615. + length -= 4;
  6616. +
  6617. + /* probably a little skewed due to removing CRC */
  6618. + total_rx_bytes += length;
  6619. + total_rx_packets++;
  6620. +
  6621. + /* code added for copybreak, this should improve
  6622. + * performance for small packets with large amounts
  6623. + * of reassembly being done in the stack */
  6624. + if (length < copybreak) {
  6625. + struct sk_buff *new_skb =
  6626. + netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
  6627. + if (new_skb) {
  6628. + skb_reserve(new_skb, NET_IP_ALIGN);
  6629. + skb_copy_to_linear_data_offset(new_skb,
  6630. + -NET_IP_ALIGN,
  6631. + (skb->data -
  6632. + NET_IP_ALIGN),
  6633. + (length +
  6634. + NET_IP_ALIGN));
  6635. + /* save the skb in buffer_info as good */
  6636. + buffer_info->skb = skb;
  6637. + skb = new_skb;
  6638. }
  6639. + /* else just continue with the old one */
  6640. }
  6641. -
  6642. - /* Good Receive */
  6643. - skb_put(skb, length - ETHERNET_FCS_SIZE);
  6644. + /* Good Receive */
  6645. + skb_put(skb, length);
  6646. /* Receive Checksum Offload */
  6647. iegbe_rx_checksum(adapter,
  6648. - (uint32_t)(rx_desc->status) |
  6649. - ((uint32_t)(rx_desc->errors) << 0x18),
  6650. - rx_desc->csum, skb);
  6651. + (u32)(status) |
  6652. + ((u32)(rx_desc->errors) << 24),
  6653. + le16_to_cpu(rx_desc->csum), skb);
  6654. +
  6655. skb->protocol = eth_type_trans(skb, netdev);
  6656. -#ifdef CONFIG_E1000_NAPI
  6657. -#ifdef NETIF_F_HW_VLAN_TX
  6658. - if(unlikely(adapter->vlgrp &&
  6659. - (rx_desc->status & E1000_RXD_STAT_VP))) {
  6660. +
  6661. + if (unlikely(adapter->vlgrp &&
  6662. + (status & E1000_RXD_STAT_VP))) {
  6663. vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
  6664. - le16_to_cpu(rx_desc->special) &
  6665. - E1000_RXD_SPC_VLAN_MASK);
  6666. + le16_to_cpu(rx_desc->special));
  6667. } else {
  6668. netif_receive_skb(skb);
  6669. }
  6670. -#else
  6671. - netif_receive_skb(skb);
  6672. -#endif
  6673. -#else /* CONFIG_E1000_NAPI */
  6674. -#ifdef NETIF_F_HW_VLAN_TX
  6675. - if(unlikely(adapter->vlgrp &&
  6676. - (rx_desc->status & E1000_RXD_STAT_VP))) {
  6677. - vlan_hwaccel_rx(skb, adapter->vlgrp,
  6678. - le16_to_cpu(rx_desc->special) &
  6679. - E1000_RXD_SPC_VLAN_MASK);
  6680. - } else {
  6681. - netif_rx(skb);
  6682. - }
  6683. -#else
  6684. - netif_rx(skb);
  6685. -#endif
  6686. -#endif /* CONFIG_E1000_NAPI */
  6687. +
  6688. netdev->last_rx = jiffies;
  6689. - rx_ring->pkt++;
  6690. next_desc:
  6691. rx_desc->status = 0;
  6692. - buffer_info->skb = NULL;
  6693. - if(unlikely(++i == rx_ring->count)) { i = 0; }
  6694. - rx_desc = E1000_RX_DESC(*rx_ring, i);
  6695. + /* return some buffers to hardware, one at a time is too slow */
  6696. + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
  6697. + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  6698. + cleaned_count = 0;
  6699. + }
  6700. +
  6701. + /* use prefetched values */
  6702. + rx_desc = next_rxd;
  6703. + buffer_info = next_buffer;
  6704. }
  6705. rx_ring->next_to_clean = i;
  6706. -#ifdef IEGBE_GBE_WORKAROUND
  6707. - /* Only allocate the number of buffers that we have actually
  6708. - cleaned! */
  6709. - if (cleaned_count) {
  6710. - adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  6711. - }
  6712. -#else
  6713. - adapter->alloc_rx_buf(adapter, rx_ring);
  6714. -#endif
  6715. -
  6716. + cleaned_count = E1000_DESC_UNUSED(rx_ring);
  6717. + if (cleaned_count)
  6718. + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  6719. +
  6720. + adapter->total_rx_packets += total_rx_packets;
  6721. + adapter->total_rx_bytes += total_rx_bytes;
  6722. + adapter->net_stats.rx_bytes += total_rx_bytes;
  6723. + adapter->net_stats.rx_packets += total_rx_packets;
  6724. return cleaned;
  6725. }
  6726. @@ -4107,161 +3884,153 @@ next_desc:
  6727. * @adapter: board private structure
  6728. **/
  6729. -static boolean_t
  6730. -#ifdef CONFIG_E1000_NAPI
  6731. -iegbe_clean_rx_irq_ps(struct iegbe_adapter *adapter,
  6732. +static bool iegbe_clean_rx_irq_ps(struct iegbe_adapter *adapter,
  6733. struct iegbe_rx_ring *rx_ring,
  6734. int *work_done, int work_to_do)
  6735. -#else
  6736. -iegbe_clean_rx_irq_ps(struct iegbe_adapter *adapter,
  6737. - struct iegbe_rx_ring *rx_ring)
  6738. -#endif
  6739. {
  6740. - union iegbe_rx_desc_packet_split *rx_desc;
  6741. - struct net_device *netdev = adapter->netdev;
  6742. - struct pci_dev *pdev = adapter->pdev;
  6743. - struct iegbe_buffer *buffer_info;
  6744. - struct iegbe_ps_page *ps_page;
  6745. - struct iegbe_ps_page_dma *ps_page_dma;
  6746. - struct sk_buff *skb;
  6747. - unsigned int i, j;
  6748. - uint32_t length, staterr;
  6749. - boolean_t cleaned = FALSE;
  6750. -
  6751. -#ifdef IEGBE_GBE_WORKAROUND
  6752. - /* Need to keep track of the amount of Rx descriptors that we
  6753. - cleaned to ensure that we don't supply too many back to the
  6754. - hardware */
  6755. - int cleaned_count = 0;
  6756. -#endif
  6757. -
  6758. - i = rx_ring->next_to_clean;
  6759. - rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  6760. - staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  6761. -
  6762. - while(staterr & E1000_RXD_STAT_DD) {
  6763. - buffer_info = &rx_ring->buffer_info[i];
  6764. - ps_page = &rx_ring->ps_page[i];
  6765. - ps_page_dma = &rx_ring->ps_page_dma[i];
  6766. -#ifdef CONFIG_E1000_NAPI
  6767. - if(unlikely(*work_done >= work_to_do)) {
  6768. - break;
  6769. - }
  6770. - (*work_done)++;
  6771. -#endif
  6772. - cleaned = TRUE;
  6773. -
  6774. -#ifdef IEGBE_GBE_WORKAROUND
  6775. - cleaned_count++;
  6776. -#endif
  6777. + union iegbe_rx_desc_packet_split *rx_desc, *next_rxd;
  6778. + struct net_device *netdev = adapter->netdev;
  6779. + struct pci_dev *pdev = adapter->pdev;
  6780. + struct iegbe_buffer *buffer_info, *next_buffer;
  6781. + struct iegbe_ps_page *ps_page;
  6782. + struct iegbe_ps_page_dma *ps_page_dma;
  6783. + struct sk_buff *skb;
  6784. + unsigned int i, j;
  6785. + u32 length, staterr;
  6786. + int cleaned_count = 0;
  6787. + bool cleaned = false;
  6788. + unsigned int total_rx_bytes=0, total_rx_packets=0;
  6789. +
  6790. + i = rx_ring->next_to_clean;
  6791. + rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  6792. + staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  6793. + buffer_info = &rx_ring->buffer_info[i];
  6794. - pci_unmap_single(pdev, buffer_info->dma,
  6795. - buffer_info->length,
  6796. - PCI_DMA_FROMDEVICE);
  6797. + while(staterr & E1000_RXD_STAT_DD) {
  6798. + ps_page = &rx_ring->ps_page[i];
  6799. + ps_page_dma = &rx_ring->ps_page_dma[i];
  6800. +
  6801. + if (unlikely(*work_done >= work_to_do))
  6802. + break;
  6803. + (*work_done)++;
  6804. skb = buffer_info->skb;
  6805. + prefetch(skb->data - NET_IP_ALIGN);
  6806. + if (++i == rx_ring->count) i = 0;
  6807. + next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
  6808. + prefetch(next_rxd);
  6809. + next_buffer = &rx_ring->buffer_info[i];
  6810. + cleaned = true;
  6811. + cleaned_count++;
  6812. + pci_unmap_single(pdev, buffer_info->dma,
  6813. + buffer_info->length,
  6814. + PCI_DMA_FROMDEVICE);
  6815. +
  6816. + if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
  6817. + E1000_DBG("%s: Packet Split buffers didn't pick up"
  6818. + " the full packet\n", netdev->name);
  6819. + dev_kfree_skb_irq(skb);
  6820. + goto next_desc;
  6821. + }
  6822. - if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
  6823. - E1000_DBG("%s: Packet Split buffers didn't pick up"
  6824. - " the full packet\n", netdev->name);
  6825. - dev_kfree_skb_irq(skb);
  6826. - goto next_desc;
  6827. - }
  6828. -
  6829. - if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
  6830. - dev_kfree_skb_irq(skb);
  6831. - goto next_desc;
  6832. - }
  6833. -
  6834. - length = le16_to_cpu(rx_desc->wb.middle.length0);
  6835. + if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
  6836. + dev_kfree_skb_irq(skb);
  6837. + goto next_desc;
  6838. + }
  6839. - if(unlikely(!length)) {
  6840. - E1000_DBG("%s: Last part of the packet spanning"
  6841. - " multiple descriptors\n", netdev->name);
  6842. - dev_kfree_skb_irq(skb);
  6843. - goto next_desc;
  6844. - }
  6845. + length = le16_to_cpu(rx_desc->wb.middle.length0);
  6846. - /* Good Receive */
  6847. - skb_put(skb, length);
  6848. -
  6849. - for(j = 0; j < adapter->rx_ps_pages; j++) {
  6850. - if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) {
  6851. - break;
  6852. - }
  6853. - pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
  6854. - PAGE_SIZE, PCI_DMA_FROMDEVICE);
  6855. - ps_page_dma->ps_page_dma[j] = 0;
  6856. - skb_shinfo(skb)->frags[j].page =
  6857. - ps_page->ps_page[j];
  6858. - ps_page->ps_page[j] = NULL;
  6859. - skb_shinfo(skb)->frags[j].page_offset = 0;
  6860. - skb_shinfo(skb)->frags[j].size = length;
  6861. - skb_shinfo(skb)->nr_frags++;
  6862. - skb->len += length;
  6863. - skb->data_len += length;
  6864. - }
  6865. + if(unlikely(!length)) {
  6866. + E1000_DBG("%s: Last part of the packet spanning"
  6867. + " multiple descriptors\n", netdev->name);
  6868. + dev_kfree_skb_irq(skb);
  6869. + goto next_desc;
  6870. + }
  6871. - iegbe_rx_checksum(adapter, staterr,
  6872. - rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
  6873. - skb->protocol = eth_type_trans(skb, netdev);
  6874. + /* Good Receive */
  6875. + skb_put(skb, length);
  6876. - if(likely(rx_desc->wb.upper.header_status &
  6877. - E1000_RXDPS_HDRSTAT_HDRSP)) {
  6878. - adapter->rx_hdr_split++;
  6879. -#ifdef HAVE_RX_ZERO_COPY
  6880. - skb_shinfo(skb)->zero_copy = TRUE;
  6881. -#endif
  6882. - }
  6883. -#ifdef CONFIG_E1000_NAPI
  6884. -#ifdef NETIF_F_HW_VLAN_TX
  6885. - if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
  6886. - vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
  6887. - le16_to_cpu(rx_desc->wb.middle.vlan) &
  6888. - E1000_RXD_SPC_VLAN_MASK);
  6889. - } else {
  6890. - netif_receive_skb(skb);
  6891. - }
  6892. -#else
  6893. - netif_receive_skb(skb);
  6894. -#endif
  6895. -#else /* CONFIG_E1000_NAPI */
  6896. -#ifdef NETIF_F_HW_VLAN_TX
  6897. - if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
  6898. - vlan_hwaccel_rx(skb, adapter->vlgrp,
  6899. - le16_to_cpu(rx_desc->wb.middle.vlan) &
  6900. - E1000_RXD_SPC_VLAN_MASK);
  6901. - } else {
  6902. - netif_rx(skb);
  6903. - }
  6904. -#else
  6905. - netif_rx(skb);
  6906. -#endif
  6907. -#endif /* CONFIG_E1000_NAPI */
  6908. - netdev->last_rx = jiffies;
  6909. - rx_ring->pkt++;
  6910. + {
  6911. + int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
  6912. + if (l1 && (l1 <= copybreak) && ((length + l1) <= adapter->rx_ps_bsize0)) {
  6913. + u8 *vaddr;
  6914. + pci_dma_sync_single_for_cpu(pdev,
  6915. + ps_page_dma->ps_page_dma[0],
  6916. + PAGE_SIZE,
  6917. + PCI_DMA_FROMDEVICE);
  6918. + vaddr = kmap_atomic(ps_page->ps_page[0],
  6919. + KM_SKB_DATA_SOFTIRQ);
  6920. + memcpy(skb_tail_pointer(skb), vaddr, l1);
  6921. + kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
  6922. + pci_dma_sync_single_for_device(pdev,
  6923. + ps_page_dma->ps_page_dma[0],
  6924. + PAGE_SIZE, PCI_DMA_FROMDEVICE);
  6925. + l1 -= 4;
  6926. + skb_put(skb, l1);
  6927. + goto copydone;
  6928. + } /* if */
  6929. + }
  6930. + for (j = 0; j < adapter->rx_ps_pages; j++) {
  6931. + length = le16_to_cpu(rx_desc->wb.upper.length[j]);
  6932. + if (!length)
  6933. + break;
  6934. + pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
  6935. + PAGE_SIZE, PCI_DMA_FROMDEVICE);
  6936. + ps_page_dma->ps_page_dma[j] = 0;
  6937. + skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
  6938. + length);
  6939. + ps_page->ps_page[j] = NULL;
  6940. + skb->len += length;
  6941. + skb->data_len += length;
  6942. + skb->truesize += length;
  6943. + }
  6944. -next_desc:
  6945. - rx_desc->wb.middle.status_error &= ~0xFF;
  6946. - buffer_info->skb = NULL;
  6947. - if(unlikely(++i == rx_ring->count)) { i = 0; }
  6948. + pskb_trim(skb, skb->len - 4);
  6949. +copydone:
  6950. + total_rx_bytes += skb->len;
  6951. + total_rx_packets++;
  6952. + iegbe_rx_checksum(adapter, staterr,
  6953. + le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
  6954. + skb->protocol = eth_type_trans(skb, netdev);
  6955. +
  6956. + if(likely(rx_desc->wb.upper.header_status &
  6957. + cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
  6958. + adapter->rx_hdr_split++;
  6959. +
  6960. + if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
  6961. + vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
  6962. + le16_to_cpu(rx_desc->wb.middle.vlan));
  6963. + } else {
  6964. + netif_receive_skb(skb);
  6965. + }
  6966. - rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  6967. - staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  6968. - }
  6969. - rx_ring->next_to_clean = i;
  6970. + netdev->last_rx = jiffies;
  6971. -#ifdef IEGBE_GBE_WORKAROUND
  6972. - /* Only allocate the number of buffers that we have actually
  6973. - cleaned! */
  6974. - if (cleaned_count) {
  6975. - adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  6976. - }
  6977. -#else
  6978. - adapter->alloc_rx_buf(adapter, rx_ring);
  6979. -#endif
  6980. +next_desc:
  6981. + rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
  6982. + buffer_info->skb = NULL;
  6983. - return cleaned;
  6984. + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
  6985. + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  6986. + cleaned_count = 0;
  6987. + }
  6988. +
  6989. + /* use prefetched values */
  6990. + rx_desc = next_rxd;
  6991. + buffer_info = next_buffer;
  6992. + staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
  6993. + }
  6994. + rx_ring->next_to_clean = i;
  6995. +
  6996. + cleaned_count = E1000_DESC_UNUSED(rx_ring);
  6997. + if (cleaned_count)
  6998. + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  6999. +
  7000. + adapter->total_rx_packets += total_rx_packets;
  7001. + adapter->total_rx_bytes += total_rx_bytes;
  7002. + adapter->net_stats.rx_bytes += total_rx_bytes;
  7003. + adapter->net_stats.rx_packets += total_rx_packets;
  7004. + return cleaned;
  7005. }
  7006. /**
  7007. @@ -4269,142 +4038,115 @@ next_desc:
  7008. * @adapter: address of board private structure
  7009. **/
  7010. -#ifdef IEGBE_GBE_WORKAROUND
  7011. -static void
  7012. -iegbe_alloc_rx_buffers(struct iegbe_adapter *adapter,
  7013. +
  7014. +static void iegbe_alloc_rx_buffers(struct iegbe_adapter *adapter,
  7015. struct iegbe_rx_ring *rx_ring,
  7016. int cleaned_count)
  7017. -#else
  7018. -static void
  7019. -iegbe_alloc_rx_buffers(struct iegbe_adapter *adapter,
  7020. - struct iegbe_rx_ring *rx_ring)
  7021. -#endif
  7022. {
  7023. - struct net_device *netdev = adapter->netdev;
  7024. - struct pci_dev *pdev = adapter->pdev;
  7025. - struct iegbe_rx_desc *rx_desc;
  7026. - struct iegbe_buffer *buffer_info;
  7027. - struct sk_buff *skb;
  7028. - unsigned int i;
  7029. - unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
  7030. -
  7031. - i = rx_ring->next_to_use;
  7032. - buffer_info = &rx_ring->buffer_info[i];
  7033. + struct iegbe_hw *hw = &adapter->hw;
  7034. + struct net_device *netdev = adapter->netdev;
  7035. + struct pci_dev *pdev = adapter->pdev;
  7036. + struct iegbe_rx_desc *rx_desc;
  7037. + struct iegbe_buffer *buffer_info;
  7038. + struct sk_buff *skb;
  7039. + unsigned int i;
  7040. + unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
  7041. -#ifdef IEGBE_GBE_WORKAROUND
  7042. - if (cleaned_count > IEGBE_GBE_WORKAROUND_NUM_RX_DESCRIPTORS) {
  7043. - adapter->stats.cc_gt_num_rx++;
  7044. - }
  7045. - while(cleaned_count-- && !buffer_info->skb) {
  7046. -#else
  7047. - while(!buffer_info->skb) {
  7048. -#endif
  7049. - skb = dev_alloc_skb(bufsz);
  7050. + i = rx_ring->next_to_use;
  7051. + buffer_info = &rx_ring->buffer_info[i];
  7052. - if(unlikely(!skb)) {
  7053. - /* Better luck next round */
  7054. - break;
  7055. - }
  7056. + while (cleaned_count--) {
  7057. + skb = buffer_info->skb;
  7058. + if (skb) {
  7059. + skb_trim(skb, 0);
  7060. + goto map_skb;
  7061. + }
  7062. + skb = netdev_alloc_skb(netdev, bufsz);
  7063. +
  7064. + if(unlikely(!skb)) {
  7065. + /* Better luck next round */
  7066. + adapter->alloc_rx_buff_failed++;
  7067. + break;
  7068. + }
  7069. - /* Fix for errata 23, can't cross 64kB boundary */
  7070. - if(!iegbe_check_64k_bound(adapter, skb->data, bufsz)) {
  7071. - struct sk_buff *oldskb = skb;
  7072. - DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
  7073. - "at %p\n", bufsz, skb->data);
  7074. - /* Try again, without freeing the previous */
  7075. - skb = dev_alloc_skb(bufsz);
  7076. - /* Failed allocation, critical failure */
  7077. - if(!skb) {
  7078. - dev_kfree_skb(oldskb);
  7079. - break;
  7080. - }
  7081. + /* Fix for errata 23, can't cross 64kB boundary */
  7082. + if(!iegbe_check_64k_bound(adapter, skb->data, bufsz)) {
  7083. + struct sk_buff *oldskb = skb;
  7084. + DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
  7085. + "at %p\n", bufsz, skb->data);
  7086. + /* Try again, without freeing the previous */
  7087. + skb = netdev_alloc_skb(netdev, bufsz);
  7088. + /* Failed allocation, critical failure */
  7089. + if(!skb) {
  7090. + dev_kfree_skb(oldskb);
  7091. + break;
  7092. + }
  7093. - if(!iegbe_check_64k_bound(adapter, skb->data, bufsz)) {
  7094. - /* give up */
  7095. - dev_kfree_skb(skb);
  7096. - dev_kfree_skb(oldskb);
  7097. - break; /* while !buffer_info->skb */
  7098. - } else {
  7099. - /* Use new allocation */
  7100. - dev_kfree_skb(oldskb);
  7101. + if(!iegbe_check_64k_bound(adapter, skb->data, bufsz)) {
  7102. + /* give up */
  7103. + dev_kfree_skb(skb);
  7104. + dev_kfree_skb(oldskb);
  7105. + break; /* while !buffer_info->skb */
  7106. }
  7107. - }
  7108. - /* Make buffer alignment 2 beyond a 16 byte boundary
  7109. - * this will result in a 16 byte aligned IP header after
  7110. - * the 14 byte MAC header is removed
  7111. - */
  7112. - skb_reserve(skb, NET_IP_ALIGN);
  7113. -
  7114. - skb->dev = netdev;
  7115. -
  7116. - buffer_info->skb = skb;
  7117. - buffer_info->length = adapter->rx_buffer_len;
  7118. - buffer_info->dma = pci_map_single(pdev,
  7119. - skb->data,
  7120. - adapter->rx_buffer_len,
  7121. - PCI_DMA_FROMDEVICE);
  7122. -
  7123. - /* Fix for errata 23, can't cross 64kB boundary */
  7124. - if(!iegbe_check_64k_bound(adapter,
  7125. - (void *)(unsigned long)buffer_info->dma,
  7126. - adapter->rx_buffer_len)) {
  7127. - DPRINTK(RX_ERR, ERR,
  7128. - "dma align check failed: %u bytes at %p\n",
  7129. - adapter->rx_buffer_len,
  7130. - (void *)(unsigned long)buffer_info->dma);
  7131. - dev_kfree_skb(skb);
  7132. - buffer_info->skb = NULL;
  7133. -
  7134. - pci_unmap_single(pdev, buffer_info->dma,
  7135. - adapter->rx_buffer_len,
  7136. - PCI_DMA_FROMDEVICE);
  7137. -
  7138. - break; /* while !buffer_info->skb */
  7139. - }
  7140. - rx_desc = E1000_RX_DESC(*rx_ring, i);
  7141. - rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  7142. -
  7143. -#ifdef IEGBE_GBE_WORKAROUND_DISABLED
  7144. - adapter->stats.num_rx_buf_alloc++;
  7145. + /* Use new allocation */
  7146. + dev_kfree_skb(oldskb);
  7147. + }
  7148. + /* Make buffer alignment 2 beyond a 16 byte boundary
  7149. + * this will result in a 16 byte aligned IP header after
  7150. + * the 14 byte MAC header is removed
  7151. + */
  7152. + skb_reserve(skb, NET_IP_ALIGN);
  7153. +
  7154. +
  7155. + buffer_info->skb = skb;
  7156. + buffer_info->length = adapter->rx_buffer_len;
  7157. +map_skb:
  7158. + buffer_info->dma = pci_map_single(pdev,
  7159. + skb->data,
  7160. + adapter->rx_buffer_len,
  7161. + PCI_DMA_FROMDEVICE);
  7162. +
  7163. + /* Fix for errata 23, can't cross 64kB boundary */
  7164. + if(!iegbe_check_64k_bound(adapter,
  7165. + (void *)(unsigned long)buffer_info->dma,
  7166. + adapter->rx_buffer_len)) {
  7167. + DPRINTK(RX_ERR, ERR,
  7168. + "dma align check failed: %u bytes at %p\n",
  7169. + adapter->rx_buffer_len,
  7170. + (void *)(unsigned long)buffer_info->dma);
  7171. + dev_kfree_skb(skb);
  7172. + buffer_info->skb = NULL;
  7173. +
  7174. + pci_unmap_single(pdev, buffer_info->dma,
  7175. + adapter->rx_buffer_len,
  7176. + PCI_DMA_FROMDEVICE);
  7177. - /* Force memory writes to complete before letting h/w
  7178. - * know there are new descriptors to fetch. (Only
  7179. - * applicable for weak-ordered memory model archs,
  7180. - * such as IA-64). */
  7181. - wmb();
  7182. - writel(i, adapter->hw.hw_addr + rx_ring->rdt);
  7183. + break; /* while !buffer_info->skb */
  7184. + }
  7185. + rx_desc = E1000_RX_DESC(*rx_ring, i);
  7186. + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
  7187. -#endif
  7188. -#ifndef IEGBE_GBE_WORKAROUND
  7189. - if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 0x1)) == i)) {
  7190. - /* Force memory writes to complete before letting h/w
  7191. - * know there are new descriptors to fetch. (Only
  7192. - * applicable for weak-ordered memory model archs,
  7193. - * such as IA-64). */
  7194. - wmb();
  7195. - writel(i, adapter->hw.hw_addr + rx_ring->rdt);
  7196. - }
  7197. -#endif
  7198. - if(unlikely(++i == rx_ring->count)) { i = 0; }
  7199. - buffer_info = &rx_ring->buffer_info[i];
  7200. - }
  7201. + /* Force memory writes to complete before letting h/w
  7202. + * know there are new descriptors to fetch. (Only
  7203. + * applicable for weak-ordered memory model archs,
  7204. + * such as IA-64). */
  7205. + if (unlikely(++i == rx_ring->count))
  7206. + i = 0;
  7207. + buffer_info = &rx_ring->buffer_info[i];
  7208. + }
  7209. -#ifdef IEGBE_GBE_WORKAROUND
  7210. if (likely(rx_ring->next_to_use != i)) {
  7211. - rx_ring->next_to_use = i;
  7212. - if (unlikely(i-- == 0)) {
  7213. - i = (rx_ring->count - 0x1);
  7214. - }
  7215. + rx_ring->next_to_use = i;
  7216. + if (unlikely(i-- == 0))
  7217. + i = (rx_ring->count - 1);
  7218. +
  7219. /* Force memory writes to complete before letting h/w
  7220. * know there are new descriptors to fetch. (Only
  7221. * applicable for weak-ordered memory model archs,
  7222. * such as IA-64). */
  7223. wmb();
  7224. - writel(i, adapter->hw.hw_addr + rx_ring->rdt);
  7225. + writel(i, hw->hw_addr + rx_ring->rdt);
  7226. }
  7227. -#else
  7228. - rx_ring->next_to_use = i;
  7229. -#endif
  7230. }
  7231. /**
  7232. @@ -4412,49 +4154,41 @@ iegbe_alloc_rx_buffers(struct iegbe_adap
  7233. * @adapter: address of board private structure
  7234. **/
  7235. -#ifdef IEGBE_GBE_WORKAROUND
  7236. -static void
  7237. -iegbe_alloc_rx_buffers_ps(struct iegbe_adapter *adapter,
  7238. +
  7239. +static void iegbe_alloc_rx_buffers_ps(struct iegbe_adapter *adapter,
  7240. struct iegbe_rx_ring *rx_ring,
  7241. int cleaned_count)
  7242. -#else
  7243. -static void
  7244. -iegbe_alloc_rx_buffers_ps(struct iegbe_adapter *adapter,
  7245. - struct iegbe_rx_ring *rx_ring)
  7246. -#endif
  7247. {
  7248. - struct net_device *netdev = adapter->netdev;
  7249. - struct pci_dev *pdev = adapter->pdev;
  7250. - union iegbe_rx_desc_packet_split *rx_desc;
  7251. - struct iegbe_buffer *buffer_info;
  7252. - struct iegbe_ps_page *ps_page;
  7253. - struct iegbe_ps_page_dma *ps_page_dma;
  7254. - struct sk_buff *skb;
  7255. - unsigned int i, j;
  7256. -
  7257. - i = rx_ring->next_to_use;
  7258. - buffer_info = &rx_ring->buffer_info[i];
  7259. - ps_page = &rx_ring->ps_page[i];
  7260. - ps_page_dma = &rx_ring->ps_page_dma[i];
  7261. + struct iegbe_hw *hw = &adapter->hw;
  7262. + struct net_device *netdev = adapter->netdev;
  7263. + struct pci_dev *pdev = adapter->pdev;
  7264. + union iegbe_rx_desc_packet_split *rx_desc;
  7265. + struct iegbe_buffer *buffer_info;
  7266. + struct iegbe_ps_page *ps_page;
  7267. + struct iegbe_ps_page_dma *ps_page_dma;
  7268. + struct sk_buff *skb;
  7269. + unsigned int i, j;
  7270. +
  7271. + i = rx_ring->next_to_use;
  7272. + buffer_info = &rx_ring->buffer_info[i];
  7273. + ps_page = &rx_ring->ps_page[i];
  7274. + ps_page_dma = &rx_ring->ps_page_dma[i];
  7275. -#ifdef IEGBE_GBE_WORKAROUND
  7276. - while(cleaned_count-- && !buffer_info->skb) {
  7277. -#else
  7278. - while(!buffer_info->skb) {
  7279. -#endif
  7280. - rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  7281. + while (cleaned_count--) {
  7282. + rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
  7283. for (j = 0; j < PS_PAGE_BUFFERS; j++) {
  7284. - if (j < adapter->rx_ps_pages) {
  7285. - if (likely(!ps_page->ps_page[j])) {
  7286. - ps_page->ps_page[j] =
  7287. - alloc_page(GFP_ATOMIC);
  7288. + if (j < adapter->rx_ps_pages) {
  7289. + if (likely(!ps_page->ps_page[j])) {
  7290. + ps_page->ps_page[j] =
  7291. + alloc_page(GFP_ATOMIC);
  7292. if (unlikely(!ps_page->ps_page[j])) {
  7293. - goto no_buffers;
  7294. + adapter->alloc_rx_buff_failed++;
  7295. + goto no_buffers;
  7296. }
  7297. - ps_page_dma->ps_page_dma[j] =
  7298. - pci_map_page(pdev,
  7299. - ps_page->ps_page[j],
  7300. + ps_page_dma->ps_page_dma[j] =
  7301. + pci_map_page(pdev,
  7302. + ps_page->ps_page[j],
  7303. 0, PAGE_SIZE,
  7304. PCI_DMA_FROMDEVICE);
  7305. }
  7306. @@ -4462,26 +4196,26 @@ iegbe_alloc_rx_buffers_ps(struct iegbe_a
  7307. * change because each write-back erases
  7308. * this info.
  7309. */
  7310. - rx_desc->read.buffer_addr[j+0x1] =
  7311. + rx_desc->read.buffer_addr[j+1] =
  7312. cpu_to_le64(ps_page_dma->ps_page_dma[j]);
  7313. - } else {
  7314. - rx_desc->read.buffer_addr[j+0x1] = ~0;
  7315. - }
  7316. + } else
  7317. + rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
  7318. }
  7319. - skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
  7320. + skb = netdev_alloc_skb(netdev,
  7321. + adapter->rx_ps_bsize0 + NET_IP_ALIGN);
  7322. - if (unlikely(!skb)) {
  7323. + if (unlikely(!skb)) {
  7324. + adapter->alloc_rx_buff_failed++;
  7325. break;
  7326. - }
  7327. + }
  7328. +
  7329. /* Make buffer alignment 2 beyond a 16 byte boundary
  7330. * this will result in a 16 byte aligned IP header after
  7331. * the 14 byte MAC header is removed
  7332. */
  7333. skb_reserve(skb, NET_IP_ALIGN);
  7334. - skb->dev = netdev;
  7335. -
  7336. buffer_info->skb = skb;
  7337. buffer_info->length = adapter->rx_ps_bsize0;
  7338. buffer_info->dma = pci_map_single(pdev, skb->data,
  7339. @@ -4490,27 +4224,28 @@ iegbe_alloc_rx_buffers_ps(struct iegbe_a
  7340. rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
  7341. - if (unlikely((i & ~(E1000_RX_BUFFER_WRITE - 0x1)) == i)) {
  7342. - /* Force memory writes to complete before letting h/w
  7343. - * know there are new descriptors to fetch. (Only
  7344. - * applicable for weak-ordered memory model archs,
  7345. - * such as IA-64). */
  7346. - wmb();
  7347. - /* Hardware increments by 16 bytes, but packet split
  7348. - * descriptors are 32 bytes...so we increment tail
  7349. - * twice as much.
  7350. - */
  7351. - writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
  7352. - }
  7353. -
  7354. - if (unlikely(++i == rx_ring->count)) { i = 0; }
  7355. + if (unlikely(++i == rx_ring->count)) i = 0;
  7356. buffer_info = &rx_ring->buffer_info[i];
  7357. ps_page = &rx_ring->ps_page[i];
  7358. ps_page_dma = &rx_ring->ps_page_dma[i];
  7359. }
  7360. no_buffers:
  7361. - rx_ring->next_to_use = i;
  7362. + if (likely(rx_ring->next_to_use != i)) {
  7363. + rx_ring->next_to_use = i;
  7364. + if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
  7365. +
  7366. + /* Force memory writes to complete before letting h/w
  7367. + * know there are new descriptors to fetch. (Only
  7368. + * applicable for weak-ordered memory model archs,
  7369. + * such as IA-64). */
  7370. + wmb();
  7371. + /* Hardware increments by 16 bytes, but packet split
  7372. + * descriptors are 32 bytes...so we increment tail
  7373. + * twice as much.
  7374. + */
  7375. + writel(i<<1, hw->hw_addr + rx_ring->rdt);
  7376. + }
  7377. }
  7378. /**
  7379. @@ -4521,52 +4256,52 @@ no_buffers:
  7380. static void
  7381. iegbe_smartspeed(struct iegbe_adapter *adapter)
  7382. {
  7383. - uint16_t phy_status;
  7384. - uint16_t phy_ctrl;
  7385. + uint16_t phy_status;
  7386. + uint16_t phy_ctrl;
  7387. - if((adapter->hw.phy_type != iegbe_phy_igp) || !adapter->hw.autoneg ||
  7388. + if((adapter->hw.phy_type != iegbe_phy_igp) || !adapter->hw.autoneg ||
  7389. !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) {
  7390. - return;
  7391. + return;
  7392. }
  7393. - if(adapter->smartspeed == 0) {
  7394. - /* If Master/Slave config fault is asserted twice,
  7395. - * we assume back-to-back */
  7396. - iegbe_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
  7397. + if(adapter->smartspeed == 0x0) {
  7398. + /* If Master/Slave config fault is asserted twice,
  7399. + * we assume back-to-back */
  7400. + iegbe_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
  7401. if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) { return; }
  7402. - iegbe_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
  7403. + iegbe_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
  7404. if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) { return; }
  7405. - iegbe_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
  7406. - if(phy_ctrl & CR_1000T_MS_ENABLE) {
  7407. - phy_ctrl &= ~CR_1000T_MS_ENABLE;
  7408. - iegbe_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
  7409. - phy_ctrl);
  7410. - adapter->smartspeed++;
  7411. - if(!iegbe_phy_setup_autoneg(&adapter->hw) &&
  7412. - !iegbe_read_phy_reg(&adapter->hw, PHY_CTRL,
  7413. - &phy_ctrl)) {
  7414. - phy_ctrl |= (MII_CR_AUTO_NEG_EN |
  7415. - MII_CR_RESTART_AUTO_NEG);
  7416. - iegbe_write_phy_reg(&adapter->hw, PHY_CTRL,
  7417. - phy_ctrl);
  7418. - }
  7419. - }
  7420. - return;
  7421. - } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
  7422. - /* If still no link, perhaps using 2/3 pair cable */
  7423. - iegbe_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
  7424. - phy_ctrl |= CR_1000T_MS_ENABLE;
  7425. - iegbe_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
  7426. - if(!iegbe_phy_setup_autoneg(&adapter->hw) &&
  7427. - !iegbe_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
  7428. - phy_ctrl |= (MII_CR_AUTO_NEG_EN |
  7429. - MII_CR_RESTART_AUTO_NEG);
  7430. - iegbe_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
  7431. - }
  7432. - }
  7433. - /* Restart process after E1000_SMARTSPEED_MAX iterations */
  7434. + iegbe_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
  7435. + if(phy_ctrl & CR_1000T_MS_ENABLE) {
  7436. + phy_ctrl &= ~CR_1000T_MS_ENABLE;
  7437. + iegbe_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
  7438. + phy_ctrl);
  7439. + adapter->smartspeed++;
  7440. + if(!iegbe_phy_setup_autoneg(&adapter->hw) &&
  7441. + !iegbe_read_phy_reg(&adapter->hw, PHY_CTRL,
  7442. + &phy_ctrl)) {
  7443. + phy_ctrl |= (MII_CR_AUTO_NEG_EN |
  7444. + MII_CR_RESTART_AUTO_NEG);
  7445. + iegbe_write_phy_reg(&adapter->hw, PHY_CTRL,
  7446. + phy_ctrl);
  7447. + }
  7448. + }
  7449. + return;
  7450. + } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
  7451. + /* If still no link, perhaps using 2/3 pair cable */
  7452. + iegbe_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
  7453. + phy_ctrl |= CR_1000T_MS_ENABLE;
  7454. + iegbe_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
  7455. + if(!iegbe_phy_setup_autoneg(&adapter->hw) &&
  7456. + !iegbe_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
  7457. + phy_ctrl |= (MII_CR_AUTO_NEG_EN |
  7458. + MII_CR_RESTART_AUTO_NEG);
  7459. + iegbe_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
  7460. + }
  7461. + }
  7462. + /* Restart process after E1000_SMARTSPEED_MAX iterations */
  7463. if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX) {
  7464. - adapter->smartspeed = 0;
  7465. -}
  7466. + adapter->smartspeed = 0x0;
  7467. + }
  7468. }
  7469. /**
  7470. @@ -4576,23 +4311,22 @@ iegbe_smartspeed(struct iegbe_adapter *a
  7471. * @cmd:
  7472. **/
  7473. -static int
  7474. -iegbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  7475. +static int iegbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  7476. {
  7477. - switch (cmd) {
  7478. + switch (cmd) {
  7479. #ifdef SIOCGMIIPHY
  7480. - case SIOCGMIIPHY:
  7481. - case SIOCGMIIREG:
  7482. - case SIOCSMIIREG:
  7483. - return iegbe_mii_ioctl(netdev, ifr, cmd);
  7484. + case SIOCGMIIPHY:
  7485. + case SIOCGMIIREG:
  7486. + case SIOCSMIIREG:
  7487. + return iegbe_mii_ioctl(netdev, ifr, cmd);
  7488. #endif
  7489. #ifdef ETHTOOL_OPS_COMPAT
  7490. - case SIOCETHTOOL:
  7491. - return ethtool_ioctl(ifr);
  7492. + case SIOCETHTOOL:
  7493. + return ethtool_ioctl(ifr);
  7494. #endif
  7495. - default:
  7496. - return -EOPNOTSUPP;
  7497. - }
  7498. + default:
  7499. + return -EOPNOTSUPP;
  7500. + }
  7501. }
  7502. #ifdef SIOCGMIIPHY
  7503. @@ -4603,534 +4337,510 @@ iegbe_ioctl(struct net_device *netdev, s
  7504. * @cmd:
  7505. **/
  7506. -static int
  7507. -iegbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  7508. +static int iegbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
  7509. + int cmd)
  7510. {
  7511. - struct iegbe_adapter *adapter = netdev_priv(netdev);
  7512. - struct mii_ioctl_data *data = if_mii(ifr);
  7513. - int retval;
  7514. - uint16_t mii_reg;
  7515. - uint16_t spddplx;
  7516. - unsigned long flags;
  7517. -
  7518. - if((adapter->hw.media_type == iegbe_media_type_oem &&
  7519. - !iegbe_oem_phy_is_copper(&adapter->hw)) ||
  7520. - adapter->hw.media_type == iegbe_media_type_fiber ||
  7521. - adapter->hw.media_type == iegbe_media_type_internal_serdes ) {
  7522. - return -EOPNOTSUPP;
  7523. - }
  7524. - switch (cmd) {
  7525. - case SIOCGMIIPHY:
  7526. - data->phy_id = adapter->hw.phy_addr;
  7527. - break;
  7528. - case SIOCGMIIREG:
  7529. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  7530. + struct mii_ioctl_data *data = if_mii(ifr);
  7531. + int retval;
  7532. + uint16_t mii_reg;
  7533. + uint16_t spddplx;
  7534. + unsigned long flags = 0;
  7535. +
  7536. + if((adapter->hw.media_type == iegbe_media_type_oem
  7537. + && !iegbe_oem_phy_is_copper(&adapter->hw))
  7538. + ||adapter->hw.media_type != iegbe_media_type_copper) {
  7539. + return -EOPNOTSUPP;
  7540. + }
  7541. + switch (cmd) {
  7542. + case SIOCGMIIPHY:
  7543. + data->phy_id = adapter->hw.phy_addr;
  7544. + break;
  7545. + case SIOCGMIIREG:
  7546. if(!capable(CAP_NET_ADMIN)) {
  7547. - return -EPERM;
  7548. + return -EPERM;
  7549. }
  7550. - spin_lock_irqsave(&adapter->stats_lock, flags);
  7551. - if(iegbe_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
  7552. - &data->val_out)) {
  7553. - spin_unlock_irqrestore(&adapter->stats_lock, flags);
  7554. - return -EIO;
  7555. - }
  7556. - spin_unlock_irqrestore(&adapter->stats_lock, flags);
  7557. - break;
  7558. - case SIOCSMIIREG:
  7559. + spin_lock_irqsave(&adapter->stats_lock, flags);
  7560. + if(iegbe_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
  7561. + &data->val_out)) {
  7562. + spin_unlock_irqrestore(&adapter->stats_lock, flags);
  7563. + return -EIO;
  7564. + }
  7565. + spin_unlock_irqrestore(&adapter->stats_lock, flags);
  7566. + break;
  7567. + case SIOCSMIIREG:
  7568. if(!capable(CAP_NET_ADMIN)){
  7569. - return -EPERM;
  7570. + return -EPERM;
  7571. }
  7572. if(data->reg_num & ~(0x1F)) {
  7573. - return -EFAULT;
  7574. + return -EFAULT;
  7575. }
  7576. - mii_reg = data->val_in;
  7577. - spin_lock_irqsave(&adapter->stats_lock, flags);
  7578. - if(iegbe_write_phy_reg(&adapter->hw, data->reg_num,
  7579. - mii_reg)) {
  7580. - spin_unlock_irqrestore(&adapter->stats_lock, flags);
  7581. - return -EIO;
  7582. - }
  7583. - switch(adapter->hw.phy_type) {
  7584. - case iegbe_phy_m88:
  7585. - switch (data->reg_num) {
  7586. - case PHY_CTRL:
  7587. + mii_reg = data->val_in;
  7588. + spin_lock_irqsave(&adapter->stats_lock, flags);
  7589. + if(iegbe_write_phy_reg(&adapter->hw, data->reg_num,
  7590. + mii_reg)) {
  7591. + spin_unlock_irqrestore(&adapter->stats_lock, flags);
  7592. + return -EIO;
  7593. + }
  7594. + switch(adapter->hw.phy_type) {
  7595. + case iegbe_phy_m88:
  7596. + switch (data->reg_num) {
  7597. + case PHY_CTRL:
  7598. if(mii_reg & MII_CR_POWER_DOWN) {
  7599. - break;
  7600. + break;
  7601. }
  7602. - if(mii_reg & MII_CR_AUTO_NEG_EN) {
  7603. - adapter->hw.autoneg = 1;
  7604. - adapter->hw.autoneg_advertised = 0x2F;
  7605. - } else {
  7606. + if(mii_reg & MII_CR_AUTO_NEG_EN) {
  7607. + adapter->hw.autoneg = 1;
  7608. + adapter->hw.autoneg_advertised = 0x2F;
  7609. + } else {
  7610. if(mii_reg & 0x40){
  7611. - spddplx = SPEED_1000;
  7612. + spddplx = SPEED_1000;
  7613. } else if(mii_reg & 0x2000) {
  7614. - spddplx = SPEED_100;
  7615. + spddplx = SPEED_100;
  7616. } else {
  7617. - spddplx = SPEED_10;
  7618. + spddplx = SPEED_10;
  7619. }
  7620. - spddplx += (mii_reg & 0x100)
  7621. - ? FULL_DUPLEX :
  7622. - HALF_DUPLEX;
  7623. - retval = iegbe_set_spd_dplx(adapter,
  7624. - spddplx);
  7625. - if(retval) {
  7626. - spin_unlock_irqrestore(
  7627. - &adapter->stats_lock,
  7628. - flags);
  7629. - return retval;
  7630. - }
  7631. - }
  7632. - if(netif_running(adapter->netdev)) {
  7633. - iegbe_down(adapter);
  7634. - iegbe_up(adapter);
  7635. + spddplx += (mii_reg & 0x100)
  7636. + ? FULL_DUPLEX :
  7637. + HALF_DUPLEX;
  7638. + retval = iegbe_set_spd_dplx(adapter,
  7639. + spddplx);
  7640. + if(retval) {
  7641. + spin_unlock_irqrestore(
  7642. + &adapter->stats_lock,
  7643. + flags);
  7644. + return retval;
  7645. + }
  7646. + }
  7647. + if(netif_running(adapter->netdev)) {
  7648. + iegbe_down(adapter);
  7649. + iegbe_up(adapter);
  7650. } else {
  7651. - iegbe_reset(adapter);
  7652. + iegbe_reset(adapter);
  7653. }
  7654. - break;
  7655. - case M88E1000_PHY_SPEC_CTRL:
  7656. - case M88E1000_EXT_PHY_SPEC_CTRL:
  7657. - if(iegbe_phy_reset(&adapter->hw)) {
  7658. - spin_unlock_irqrestore(
  7659. - &adapter->stats_lock, flags);
  7660. - return -EIO;
  7661. - }
  7662. - break;
  7663. - }
  7664. - break;
  7665. + break;
  7666. + case M88E1000_PHY_SPEC_CTRL:
  7667. + case M88E1000_EXT_PHY_SPEC_CTRL:
  7668. + if(iegbe_phy_reset(&adapter->hw)) {
  7669. + spin_unlock_irqrestore(
  7670. + &adapter->stats_lock, flags);
  7671. + return -EIO;
  7672. + }
  7673. + break;
  7674. + }
  7675. + break;
  7676. - case iegbe_phy_oem:
  7677. - retval = iegbe_oem_mii_ioctl(adapter, flags, ifr, cmd);
  7678. - if(retval) {
  7679. - spin_unlock_irqrestore(
  7680. - &adapter->stats_lock, flags);
  7681. - return retval;
  7682. - }
  7683. - break;
  7684. + case iegbe_phy_oem:
  7685. + retval = iegbe_oem_mii_ioctl(adapter, flags, ifr, cmd);
  7686. + if(retval) {
  7687. + spin_unlock_irqrestore(
  7688. + &adapter->stats_lock, flags);
  7689. + return retval;
  7690. + }
  7691. + break;
  7692. - default:
  7693. - switch (data->reg_num) {
  7694. - case PHY_CTRL:
  7695. + default:
  7696. + switch (data->reg_num) {
  7697. + case PHY_CTRL:
  7698. if(mii_reg & MII_CR_POWER_DOWN) {
  7699. - break;
  7700. + break;
  7701. }
  7702. - if(netif_running(adapter->netdev)) {
  7703. - iegbe_down(adapter);
  7704. - iegbe_up(adapter);
  7705. + if(netif_running(adapter->netdev)) {
  7706. + iegbe_down(adapter);
  7707. + iegbe_up(adapter);
  7708. } else {
  7709. - iegbe_reset(adapter);
  7710. + iegbe_reset(adapter);
  7711. }
  7712. - break;
  7713. - }
  7714. - }
  7715. - spin_unlock_irqrestore(&adapter->stats_lock, flags);
  7716. - break;
  7717. - default:
  7718. - return -EOPNOTSUPP;
  7719. - }
  7720. - return E1000_SUCCESS;
  7721. + break;
  7722. + }
  7723. + }
  7724. + spin_unlock_irqrestore(&adapter->stats_lock, flags);
  7725. + break;
  7726. + default:
  7727. + return -EOPNOTSUPP;
  7728. + }
  7729. + return E1000_SUCCESS;
  7730. }
  7731. #endif
  7732. -void
  7733. -iegbe_pci_set_mwi(struct iegbe_hw *hw)
  7734. +void iegbe_pci_set_mwi(struct iegbe_hw *hw)
  7735. {
  7736. - struct iegbe_adapter *adapter = hw->back;
  7737. -#ifdef HAVE_PCI_SET_MWI
  7738. - int ret_val = pci_set_mwi(adapter->pdev);
  7739. -
  7740. - if(ret_val) {
  7741. - DPRINTK(PROBE, ERR, "Error in setting MWI\n");
  7742. - }
  7743. -#else
  7744. - pci_write_config_word(adapter->pdev, PCI_COMMAND,
  7745. - adapter->hw.pci_cmd_word |
  7746. - PCI_COMMAND_INVALIDATE);
  7747. -#endif
  7748. + struct iegbe_adapter *adapter = hw->back;
  7749. + int ret_val = pci_set_mwi(adapter->pdev);
  7750. +
  7751. + if (ret_val)
  7752. + DPRINTK(PROBE, ERR, "Error in setting MWI\n");
  7753. }
  7754. -void
  7755. -iegbe_pci_clear_mwi(struct iegbe_hw *hw)
  7756. +void iegbe_pci_clear_mwi(struct iegbe_hw *hw)
  7757. {
  7758. - struct iegbe_adapter *adapter = hw->back;
  7759. + struct iegbe_adapter *adapter = hw->back;
  7760. -#ifdef HAVE_PCI_SET_MWI
  7761. - pci_clear_mwi(adapter->pdev);
  7762. -#else
  7763. - pci_write_config_word(adapter->pdev, PCI_COMMAND,
  7764. - adapter->hw.pci_cmd_word &
  7765. - ~PCI_COMMAND_INVALIDATE);
  7766. -#endif
  7767. + pci_clear_mwi(adapter->pdev);
  7768. }
  7769. void
  7770. iegbe_read_pci_cfg(struct iegbe_hw *hw, uint32_t reg, uint16_t *value)
  7771. {
  7772. - struct iegbe_adapter *adapter = hw->back;
  7773. + struct iegbe_adapter *adapter = hw->back;
  7774. - pci_read_config_word(adapter->pdev, reg, value);
  7775. + pci_read_config_word(adapter->pdev, reg, value);
  7776. }
  7777. void
  7778. iegbe_write_pci_cfg(struct iegbe_hw *hw, uint32_t reg, uint16_t *value)
  7779. {
  7780. - struct iegbe_adapter *adapter = hw->back;
  7781. + struct iegbe_adapter *adapter = hw->back;
  7782. - pci_write_config_word(adapter->pdev, reg, *value);
  7783. + pci_write_config_word(adapter->pdev, reg, *value);
  7784. }
  7785. uint32_t
  7786. iegbe_io_read(struct iegbe_hw *hw, unsigned long port)
  7787. {
  7788. - return inl(port);
  7789. + return inl(port);
  7790. }
  7791. void
  7792. iegbe_io_write(struct iegbe_hw *hw, unsigned long port, uint32_t value)
  7793. {
  7794. - outl(value, port);
  7795. + outl(value, port);
  7796. }
  7797. -#ifdef NETIF_F_HW_VLAN_TX
  7798. -static void
  7799. -iegbe_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
  7800. +static void iegbe_vlan_rx_register(struct net_device *netdev,
  7801. + struct vlan_group *grp)
  7802. {
  7803. - struct iegbe_adapter *adapter = netdev_priv(netdev);
  7804. - uint32_t ctrl, rctl;
  7805. -
  7806. - iegbe_irq_disable(adapter);
  7807. - adapter->vlgrp = grp;
  7808. -
  7809. - if(grp) {
  7810. - /* enable VLAN tag insert/strip */
  7811. - ctrl = E1000_READ_REG(&adapter->hw, CTRL);
  7812. - ctrl |= E1000_CTRL_VME;
  7813. - E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
  7814. -
  7815. - /* enable VLAN receive filtering */
  7816. - rctl = E1000_READ_REG(&adapter->hw, RCTL);
  7817. - rctl |= E1000_RCTL_VFE;
  7818. - rctl &= ~E1000_RCTL_CFIEN;
  7819. - E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  7820. - iegbe_update_mng_vlan(adapter);
  7821. - } else {
  7822. - /* disable VLAN tag insert/strip */
  7823. - ctrl = E1000_READ_REG(&adapter->hw, CTRL);
  7824. - ctrl &= ~E1000_CTRL_VME;
  7825. - E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
  7826. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  7827. + uint32_t ctrl, rctl;
  7828. - /* disable VLAN filtering */
  7829. - rctl = E1000_READ_REG(&adapter->hw, RCTL);
  7830. - rctl &= ~E1000_RCTL_VFE;
  7831. - E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  7832. - if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
  7833. - iegbe_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
  7834. - adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  7835. - }
  7836. - }
  7837. + if (!test_bit(__E1000_DOWN, &adapter->flags))
  7838. + iegbe_irq_disable(adapter);
  7839. + adapter->vlgrp = grp;
  7840. +
  7841. + if(grp) {
  7842. + /* enable VLAN tag insert/strip */
  7843. + ctrl = E1000_READ_REG(&adapter->hw, CTRL);
  7844. + ctrl |= E1000_CTRL_VME;
  7845. + E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
  7846. +
  7847. + /* enable VLAN receive filtering */
  7848. + rctl = E1000_READ_REG(&adapter->hw, RCTL);
  7849. + rctl |= E1000_RCTL_VFE;
  7850. + rctl &= ~E1000_RCTL_CFIEN;
  7851. + E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  7852. + iegbe_update_mng_vlan(adapter);
  7853. + } else {
  7854. + /* disable VLAN tag insert/strip */
  7855. + ctrl = E1000_READ_REG(&adapter->hw, CTRL);
  7856. + ctrl &= ~E1000_CTRL_VME;
  7857. + E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
  7858. +
  7859. + /* disable VLAN filtering */
  7860. + rctl = E1000_READ_REG(&adapter->hw, RCTL);
  7861. + rctl &= ~E1000_RCTL_VFE;
  7862. + E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  7863. + if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
  7864. + iegbe_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
  7865. + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
  7866. + }
  7867. + }
  7868. - iegbe_irq_enable(adapter);
  7869. + if (!test_bit(__E1000_DOWN, &adapter->flags))
  7870. + iegbe_irq_enable(adapter);
  7871. }
  7872. -static void
  7873. -iegbe_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
  7874. +static void iegbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
  7875. {
  7876. - struct iegbe_adapter *adapter = netdev_priv(netdev);
  7877. - uint32_t vfta, index;
  7878. - if((adapter->hw.mng_cookie.status &
  7879. - E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
  7880. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  7881. + uint32_t vfta, index;
  7882. + if((adapter->hw.mng_cookie.status &
  7883. + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
  7884. (vid == adapter->mng_vlan_id)) {
  7885. - return;
  7886. + return;
  7887. }
  7888. - /* add VID to filter table */
  7889. + /* add VID to filter table */
  7890. index = (vid >> 0x5) & 0x7F;
  7891. - vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
  7892. + vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
  7893. vfta |= (0x1 << (vid & 0x1F));
  7894. - iegbe_write_vfta(&adapter->hw, index, vfta);
  7895. + iegbe_write_vfta(&adapter->hw, index, vfta);
  7896. }
  7897. -static void
  7898. -iegbe_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
  7899. +static void iegbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
  7900. {
  7901. struct iegbe_adapter *adapter = netdev_priv(netdev);
  7902. - uint32_t vfta, index;
  7903. + u32 vfta, index;
  7904. + if (!test_bit(__E1000_DOWN, &adapter->flags))
  7905. iegbe_irq_disable(adapter);
  7906. -
  7907. - if(adapter->vlgrp) {
  7908. - adapter->vlgrp->vlan_devices[vid] = NULL;
  7909. - }
  7910. + vlan_group_set_device(adapter->vlgrp, vid, NULL);
  7911. + if (!test_bit(__E1000_DOWN, &adapter->flags))
  7912. iegbe_irq_enable(adapter);
  7913. - if((adapter->hw.mng_cookie.status &
  7914. - E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
  7915. - (vid == adapter->mng_vlan_id)) {
  7916. - return;
  7917. - }
  7918. /* remove VID from filter table */
  7919. - index = (vid >> 0x5) & 0x7F;
  7920. + index = (vid >> 0x5) & 0x7F;
  7921. vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
  7922. - vfta &= ~(0x1 << (vid & 0x1F));
  7923. + vfta &= ~(0x1 << (vid & 0x1F));
  7924. iegbe_write_vfta(&adapter->hw, index, vfta);
  7925. }
  7926. -static void
  7927. -iegbe_restore_vlan(struct iegbe_adapter *adapter)
  7928. +static void iegbe_restore_vlan(struct iegbe_adapter *adapter)
  7929. {
  7930. iegbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
  7931. - if(adapter->vlgrp) {
  7932. - uint16_t vid;
  7933. - for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
  7934. - if(!adapter->vlgrp->vlan_devices[vid]) {
  7935. + if (adapter->vlgrp) {
  7936. + u16 vid;
  7937. + for (vid = 0x0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
  7938. + if (!vlan_group_get_device(adapter->vlgrp, vid))
  7939. continue;
  7940. - }
  7941. iegbe_vlan_rx_add_vid(adapter->netdev, vid);
  7942. }
  7943. }
  7944. }
  7945. -#endif
  7946. -int
  7947. -iegbe_set_spd_dplx(struct iegbe_adapter *adapter, uint16_t spddplx)
  7948. +
  7949. +int iegbe_set_spd_dplx(struct iegbe_adapter *adapter, u16 spddplx)
  7950. {
  7951. - adapter->hw.autoneg = 0;
  7952. + adapter->hw.autoneg = 0x0;
  7953. - /* Fiber NICs only allow 1000 gbps Full duplex */
  7954. - if((adapter->hw.media_type == iegbe_media_type_fiber
  7955. + /* Fiber NICs only allow 1000 gbps Full duplex */
  7956. + if((adapter->hw.media_type == iegbe_media_type_fiber
  7957. || (adapter->hw.media_type == iegbe_media_type_oem
  7958. && !iegbe_oem_phy_is_copper(&adapter->hw)))
  7959. - && spddplx != (SPEED_1000 + FULL_DUPLEX)) {
  7960. - DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
  7961. - return -EINVAL;
  7962. - }
  7963. -
  7964. - switch(spddplx) {
  7965. - case SPEED_10 + HALF_DUPLEX:
  7966. - adapter->hw.forced_speed_duplex = iegbe_10_half;
  7967. - break;
  7968. - case SPEED_10 + FULL_DUPLEX:
  7969. - adapter->hw.forced_speed_duplex = iegbe_10_full;
  7970. - break;
  7971. - case SPEED_100 + HALF_DUPLEX:
  7972. - adapter->hw.forced_speed_duplex = iegbe_100_half;
  7973. - break;
  7974. - case SPEED_100 + FULL_DUPLEX:
  7975. - adapter->hw.forced_speed_duplex = iegbe_100_full;
  7976. - break;
  7977. - case SPEED_1000 + FULL_DUPLEX:
  7978. + && spddplx != (SPEED_1000 + DUPLEX_FULL)) {
  7979. + DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
  7980. + return -EINVAL;
  7981. + }
  7982. +
  7983. + switch(spddplx) {
  7984. + case SPEED_10 + DUPLEX_HALF:
  7985. + adapter->hw.forced_speed_duplex = iegbe_10_half;
  7986. + break;
  7987. + case SPEED_10 + DUPLEX_FULL:
  7988. + adapter->hw.forced_speed_duplex = iegbe_10_full;
  7989. + break;
  7990. + case SPEED_100 + DUPLEX_HALF:
  7991. + adapter->hw.forced_speed_duplex = iegbe_100_half;
  7992. + break;
  7993. + case SPEED_100 + DUPLEX_FULL:
  7994. + adapter->hw.forced_speed_duplex = iegbe_100_full;
  7995. + break;
  7996. + case SPEED_1000 + DUPLEX_FULL:
  7997. adapter->hw.autoneg = 0x1;
  7998. - adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
  7999. - break;
  8000. - case SPEED_1000 + HALF_DUPLEX: /* not supported */
  8001. - default:
  8002. - DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
  8003. - return -EINVAL;
  8004. - }
  8005. - return 0;
  8006. + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
  8007. + break;
  8008. + case SPEED_1000 + DUPLEX_HALF: /* not supported */
  8009. + default:
  8010. + DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
  8011. + return -EINVAL;
  8012. + }
  8013. + return 0x0;
  8014. }
  8015. static int
  8016. iegbe_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
  8017. {
  8018. - struct pci_dev *pdev = NULL;
  8019. + struct pci_dev *pdev = NULL;
  8020. pm_message_t state = {0x3};
  8021. - switch(event) {
  8022. - case SYS_DOWN:
  8023. - case SYS_HALT:
  8024. - case SYS_POWER_OFF:
  8025. - while((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
  8026. + switch(event) {
  8027. + case SYS_DOWN:
  8028. + case SYS_HALT:
  8029. + case SYS_POWER_OFF:
  8030. + while((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
  8031. if(pci_dev_driver(pdev) == &iegbe_driver) {
  8032. - iegbe_suspend(pdev, state);
  8033. - }
  8034. - }
  8035. + iegbe_suspend(pdev, state);
  8036. + }
  8037. + }
  8038. }
  8039. - return NOTIFY_DONE;
  8040. + return NOTIFY_DONE;
  8041. }
  8042. static int
  8043. iegbe_suspend(struct pci_dev *pdev, pm_message_t state)
  8044. {
  8045. - struct net_device *netdev = pci_get_drvdata(pdev);
  8046. - struct iegbe_adapter *adapter = netdev_priv(netdev);
  8047. - uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm;
  8048. - uint32_t wufc = adapter->wol;
  8049. - uint16_t cmd_word;
  8050. + struct net_device *netdev = pci_get_drvdata(pdev);
  8051. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  8052. + uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm;
  8053. + uint32_t wufc = adapter->wol;
  8054. + uint16_t cmd_word;
  8055. - netif_device_detach(netdev);
  8056. + netif_device_detach(netdev);
  8057. if(netif_running(netdev)) {
  8058. - iegbe_down(adapter);
  8059. + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
  8060. + iegbe_down(adapter);
  8061. }
  8062. - /*
  8063. - * ICP_XXXX style MACs do not have a link up bit in
  8064. - * the STATUS register, query the PHY directly
  8065. - */
  8066. - if(adapter->hw.mac_type != iegbe_icp_xxxx) {
  8067. - status = E1000_READ_REG(&adapter->hw, STATUS);
  8068. + /*
  8069. + * ICP_XXXX style MACs do not have a link up bit in
  8070. + * the STATUS register, query the PHY directly
  8071. + */
  8072. + if(adapter->hw.mac_type != iegbe_icp_xxxx) {
  8073. + status = E1000_READ_REG(&adapter->hw, STATUS);
  8074. if(status & E1000_STATUS_LU) {
  8075. - wufc &= ~E1000_WUFC_LNKC;
  8076. + wufc &= ~E1000_WUFC_LNKC;
  8077. }
  8078. - } else {
  8079. - int isUp = 0;
  8080. + } else {
  8081. + int isUp = 0x0;
  8082. if(iegbe_oem_phy_is_link_up(&adapter->hw, &isUp) != E1000_SUCCESS) {
  8083. - isUp = 0;
  8084. + isUp = 0x0;
  8085. }
  8086. if(isUp) {
  8087. - wufc &= ~E1000_WUFC_LNKC;
  8088. - }
  8089. + wufc &= ~E1000_WUFC_LNKC;
  8090. + }
  8091. }
  8092. - if(wufc) {
  8093. - iegbe_setup_rctl(adapter);
  8094. - iegbe_set_multi(netdev);
  8095. -
  8096. - /* turn on all-multi mode if wake on multicast is enabled */
  8097. - if(adapter->wol & E1000_WUFC_MC) {
  8098. - rctl = E1000_READ_REG(&adapter->hw, RCTL);
  8099. - rctl |= E1000_RCTL_MPE;
  8100. - E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  8101. - }
  8102. + if(wufc) {
  8103. + iegbe_setup_rctl(adapter);
  8104. + iegbe_set_rx_mode(netdev);
  8105. +
  8106. + /* turn on all-multi mode if wake on multicast is enabled */
  8107. + if(adapter->wol & E1000_WUFC_MC) {
  8108. + rctl = E1000_READ_REG(&adapter->hw, RCTL);
  8109. + rctl |= E1000_RCTL_MPE;
  8110. + E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
  8111. + }
  8112. - if(adapter->hw.mac_type >= iegbe_82540) {
  8113. - ctrl = E1000_READ_REG(&adapter->hw, CTRL);
  8114. - /* advertise wake from D3Cold */
  8115. - #define E1000_CTRL_ADVD3WUC 0x00100000
  8116. - /* phy power management enable */
  8117. - ctrl |= E1000_CTRL_ADVD3WUC |
  8118. - (adapter->hw.mac_type != iegbe_icp_xxxx
  8119. - ? E1000_CTRL_EN_PHY_PWR_MGMT : 0);
  8120. + if(adapter->hw.mac_type >= iegbe_82540) {
  8121. + ctrl = E1000_READ_REG(&adapter->hw, CTRL);
  8122. + /* advertise wake from D3Cold */
  8123. + #define E1000_CTRL_ADVD3WUC 0x00100000
  8124. + /* phy power management enable */
  8125. + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
  8126. + ctrl |= E1000_CTRL_ADVD3WUC |
  8127. + (adapter->hw.mac_type != iegbe_icp_xxxx
  8128. + ? E1000_CTRL_EN_PHY_PWR_MGMT : 0x0);
  8129. - E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
  8130. - }
  8131. + E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
  8132. + }
  8133. - if(adapter->hw.media_type == iegbe_media_type_fiber ||
  8134. - adapter->hw.media_type == iegbe_media_type_internal_serdes) {
  8135. - /* keep the laser running in D3 */
  8136. - ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  8137. - ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
  8138. - E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
  8139. - }
  8140. + if(adapter->hw.media_type == iegbe_media_type_fiber ||
  8141. + adapter->hw.media_type == iegbe_media_type_internal_serdes) {
  8142. + /* keep the laser running in D3 */
  8143. + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  8144. + ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
  8145. + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
  8146. + }
  8147. /* Allow OEM PHYs (if any exist) to keep the laser
  8148. *running in D3 */
  8149. iegbe_oem_fiber_live_in_suspend(&adapter->hw);
  8150. - /* Allow time for pending master requests to run */
  8151. - iegbe_disable_pciex_master(&adapter->hw);
  8152. + /* Allow time for pending master requests to run */
  8153. + iegbe_disable_pciex_master(&adapter->hw);
  8154. - E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
  8155. - E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
  8156. + E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
  8157. + E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
  8158. pci_enable_wake(pdev, 0x3, 0x1);
  8159. pci_enable_wake(pdev, 0x4, 0x1); /* 4 == D3 cold */
  8160. - } else {
  8161. - E1000_WRITE_REG(&adapter->hw, WUC, 0);
  8162. - E1000_WRITE_REG(&adapter->hw, WUFC, 0);
  8163. - pci_enable_wake(pdev, 0x3, 0);
  8164. - pci_enable_wake(pdev, 0x4, 0); /* 4 == D3 cold */
  8165. - }
  8166. + } else {
  8167. + E1000_WRITE_REG(&adapter->hw, WUC, 0x0);
  8168. + E1000_WRITE_REG(&adapter->hw, WUFC, 0x0);
  8169. + pci_enable_wake(pdev, 0x3, 0x0);
  8170. + pci_enable_wake(pdev, 0x4, 0x0); /* 4 == D3 cold */
  8171. + }
  8172. - pci_save_state(pdev);
  8173. -
  8174. - if(adapter->hw.mac_type >= iegbe_82540
  8175. - && adapter->hw.mac_type != iegbe_icp_xxxx
  8176. - && adapter->hw.media_type == iegbe_media_type_copper) {
  8177. - manc = E1000_READ_REG(&adapter->hw, MANC);
  8178. - if(manc & E1000_MANC_SMBUS_EN) {
  8179. - manc |= E1000_MANC_ARP_EN;
  8180. - E1000_WRITE_REG(&adapter->hw, MANC, manc);
  8181. + pci_save_state(pdev);
  8182. +
  8183. + if(adapter->hw.mac_type >= iegbe_82540
  8184. + && adapter->hw.mac_type != iegbe_icp_xxxx
  8185. + && adapter->hw.media_type == iegbe_media_type_copper) {
  8186. + manc = E1000_READ_REG(&adapter->hw, MANC);
  8187. + if(manc & E1000_MANC_SMBUS_EN) {
  8188. + manc |= E1000_MANC_ARP_EN;
  8189. + E1000_WRITE_REG(&adapter->hw, MANC, manc);
  8190. pci_enable_wake(pdev, 0x3, 0x1);
  8191. pci_enable_wake(pdev, 0x4, 0x1); /* 4 == D3 cold */
  8192. - }
  8193. - }
  8194. + }
  8195. + }
  8196. - switch(adapter->hw.mac_type) {
  8197. - case iegbe_82571:
  8198. - case iegbe_82572:
  8199. - ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  8200. - E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
  8201. - ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
  8202. - break;
  8203. - case iegbe_82573:
  8204. - swsm = E1000_READ_REG(&adapter->hw, SWSM);
  8205. - E1000_WRITE_REG(&adapter->hw, SWSM,
  8206. - swsm & ~E1000_SWSM_DRV_LOAD);
  8207. - break;
  8208. - default:
  8209. - break;
  8210. - }
  8211. + switch(adapter->hw.mac_type) {
  8212. + case iegbe_82571:
  8213. + case iegbe_82572:
  8214. + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  8215. + E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
  8216. + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
  8217. + break;
  8218. + case iegbe_82573:
  8219. + swsm = E1000_READ_REG(&adapter->hw, SWSM);
  8220. + E1000_WRITE_REG(&adapter->hw, SWSM,
  8221. + swsm & ~E1000_SWSM_DRV_LOAD);
  8222. + break;
  8223. + default:
  8224. + break;
  8225. + }
  8226. - pci_disable_device(pdev);
  8227. - if(adapter->hw.mac_type == iegbe_icp_xxxx) {
  8228. - /*
  8229. - * ICP xxxx devices are not true PCI devices, in the context
  8230. - * of power management, disabling the bus mastership is not
  8231. - * sufficient to disable the device, it is also necessary to
  8232. - * disable IO, Memory, and Interrupts if they are enabled.
  8233. - */
  8234. - pci_read_config_word(pdev, PCI_COMMAND, &cmd_word);
  8235. + pci_disable_device(pdev);
  8236. + if(adapter->hw.mac_type == iegbe_icp_xxxx) {
  8237. + /*
  8238. + * ICP xxxx devices are not true PCI devices, in the context
  8239. + * of power management, disabling the bus mastership is not
  8240. + * sufficient to disable the device, it is also necessary to
  8241. + * disable IO, Memory, and Interrupts if they are enabled.
  8242. + */
  8243. + pci_read_config_word(pdev, PCI_COMMAND, &cmd_word);
  8244. if(cmd_word & PCI_COMMAND_IO) {
  8245. - cmd_word &= ~PCI_COMMAND_IO;
  8246. + cmd_word &= ~PCI_COMMAND_IO;
  8247. }
  8248. if(cmd_word & PCI_COMMAND_MEMORY) {
  8249. - cmd_word &= ~PCI_COMMAND_MEMORY;
  8250. + cmd_word &= ~PCI_COMMAND_MEMORY;
  8251. }
  8252. if(cmd_word & PCI_COMMAND_INTX_DISABLE) {
  8253. - cmd_word &= ~PCI_COMMAND_INTX_DISABLE;
  8254. + cmd_word &= ~PCI_COMMAND_INTX_DISABLE;
  8255. }
  8256. - pci_write_config_word(pdev, PCI_COMMAND, cmd_word);
  8257. - }
  8258. + pci_write_config_word(pdev, PCI_COMMAND, cmd_word);
  8259. + }
  8260. - state.event = (state.event > 0) ? 0x3 : 0;
  8261. - pci_set_power_state(pdev, state.event);
  8262. - if(gcu_suspend == 0)
  8263. + state.event = (state.event > 0x0) ? 0x3 : 0x0;
  8264. + pci_set_power_state(pdev, state.event);
  8265. + if(gcu_suspend == 0x0)
  8266. {
  8267. if(gcu == NULL) {
  8268. - gcu = pci_find_device(PCI_VENDOR_ID_INTEL, GCU_DEVID, NULL);
  8269. - }
  8270. + gcu = pci_get_device(PCI_VENDOR_ID_INTEL, GCU_DEVID, NULL);
  8271. + }
  8272. gcu_iegbe_suspend(gcu, 0x3);
  8273. - gcu_suspend = 1;
  8274. - gcu_resume = 0;
  8275. + gcu_suspend = 0x1;
  8276. + gcu_resume = 0x0;
  8277. }
  8278. - return 0;
  8279. + return 0x0;
  8280. }
  8281. #ifdef CONFIG_PM
  8282. static int
  8283. iegbe_resume(struct pci_dev *pdev)
  8284. {
  8285. - struct net_device *netdev = pci_get_drvdata(pdev);
  8286. - struct iegbe_adapter *adapter = netdev_priv(netdev);
  8287. - uint32_t manc, ret_val, swsm;
  8288. - uint32_t ctrl_ext;
  8289. + struct net_device *netdev = pci_get_drvdata(pdev);
  8290. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  8291. + uint32_t manc, ret_val, swsm;
  8292. + uint32_t ctrl_ext;
  8293. int offset;
  8294. uint32_t vdid;
  8295. - if(gcu_resume == 0)
  8296. + if(gcu_resume == 0x0)
  8297. {
  8298. if(gcu == NULL) {
  8299. - gcu = pci_find_device(PCI_VENDOR_ID_INTEL, GCU_DEVID, NULL);
  8300. + gcu = pci_get_device(PCI_VENDOR_ID_INTEL, GCU_DEVID, NULL);
  8301. pci_read_config_dword(gcu, 0x00, &vdid);
  8302. - }
  8303. -
  8304. + }
  8305. +
  8306. if(gcu) {
  8307. gcu_iegbe_resume(gcu);
  8308. - gcu_resume = 1;
  8309. - gcu_suspend = 0;
  8310. + gcu_resume = 0x1;
  8311. + gcu_suspend = 0x0;
  8312. } else {
  8313. printk("Unable to resume GCU!\n");
  8314. - }
  8315. + }
  8316. }
  8317. pci_set_power_state(pdev, 0x0);
  8318. - pci_restore_state(pdev);
  8319. - ret_val = pci_enable_device(pdev);
  8320. - pci_set_master(pdev);
  8321. + pci_restore_state(pdev);
  8322. + ret_val = pci_enable_device(pdev);
  8323. + pci_set_master(pdev);
  8324. pci_enable_wake(pdev, 0x3, 0x0);
  8325. pci_enable_wake(pdev, 0x4, 0x0); /* 4 == D3 cold */
  8326. - iegbe_reset(adapter);
  8327. - E1000_WRITE_REG(&adapter->hw, WUS, ~0);
  8328. + iegbe_reset(adapter);
  8329. + E1000_WRITE_REG(&adapter->hw, WUS, ~0);
  8330. offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_ST)
  8331. + PCI_ST_SMIA_OFFSET;
  8332. pci_write_config_dword(adapter->pdev, offset, 0x00000006);
  8333. @@ -5138,51 +4848,52 @@ iegbe_resume(struct pci_dev *pdev)
  8334. E1000_WRITE_REG(&adapter->hw, IMC2, ~0UL);
  8335. if(netif_running(netdev)) {
  8336. - iegbe_up(adapter);
  8337. + iegbe_up(adapter);
  8338. }
  8339. - netif_device_attach(netdev);
  8340. -
  8341. - if(adapter->hw.mac_type >= iegbe_82540
  8342. - && adapter->hw.mac_type != iegbe_icp_xxxx
  8343. - && adapter->hw.media_type == iegbe_media_type_copper) {
  8344. - manc = E1000_READ_REG(&adapter->hw, MANC);
  8345. - manc &= ~(E1000_MANC_ARP_EN);
  8346. - E1000_WRITE_REG(&adapter->hw, MANC, manc);
  8347. - }
  8348. + netif_device_attach(netdev);
  8349. - switch(adapter->hw.mac_type) {
  8350. - case iegbe_82571:
  8351. - case iegbe_82572:
  8352. - ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  8353. - E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
  8354. - ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
  8355. - break;
  8356. - case iegbe_82573:
  8357. - swsm = E1000_READ_REG(&adapter->hw, SWSM);
  8358. - E1000_WRITE_REG(&adapter->hw, SWSM,
  8359. - swsm | E1000_SWSM_DRV_LOAD);
  8360. - break;
  8361. - default:
  8362. - break;
  8363. - }
  8364. + if(adapter->hw.mac_type >= iegbe_82540
  8365. + && adapter->hw.mac_type != iegbe_icp_xxxx
  8366. + && adapter->hw.media_type == iegbe_media_type_copper) {
  8367. + manc = E1000_READ_REG(&adapter->hw, MANC);
  8368. + manc &= ~(E1000_MANC_ARP_EN);
  8369. + E1000_WRITE_REG(&adapter->hw, MANC, manc);
  8370. + }
  8371. +
  8372. + switch(adapter->hw.mac_type) {
  8373. + case iegbe_82571:
  8374. + case iegbe_82572:
  8375. + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
  8376. + E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
  8377. + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
  8378. + break;
  8379. + case iegbe_82573:
  8380. + swsm = E1000_READ_REG(&adapter->hw, SWSM);
  8381. + E1000_WRITE_REG(&adapter->hw, SWSM,
  8382. + swsm | E1000_SWSM_DRV_LOAD);
  8383. + break;
  8384. + default:
  8385. + break;
  8386. + }
  8387. +#endif
  8388. - return 0;
  8389. + return 0x0;
  8390. }
  8391. -#endif
  8392. +
  8393. #ifdef CONFIG_NET_POLL_CONTROLLER
  8394. /*
  8395. * Polling 'interrupt' - used by things like netconsole to send skbs
  8396. * without having to re-enable interrupts. It's not called while
  8397. * the interrupt routine is executing.
  8398. */
  8399. -static void
  8400. -iegbe_netpoll(struct net_device *netdev)
  8401. +static void iegbe_netpoll(struct net_device *netdev)
  8402. {
  8403. - struct iegbe_adapter *adapter = netdev_priv(netdev);
  8404. - disable_irq(adapter->pdev->irq);
  8405. - iegbe_intr(adapter->pdev->irq, netdev, NULL);
  8406. - enable_irq(adapter->pdev->irq);
  8407. + struct iegbe_adapter *adapter = netdev_priv(netdev);
  8408. + disable_irq(adapter->pdev->irq);
  8409. + iegbe_intr(adapter->pdev->irq, netdev);
  8410. + enable_irq(adapter->pdev->irq);
  8411. }
  8412. #endif
  8413. +
  8414. /* iegbe_main.c */
  8415. --- a/Embedded/src/GbE/iegbe_oem_phy.c
  8416. +++ b/Embedded/src/GbE/iegbe_oem_phy.c
  8417. @@ -2,31 +2,31 @@
  8418. GPL LICENSE SUMMARY
  8419. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  8420. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  8421. - This program is free software; you can redistribute it and/or modify
  8422. + This program is free software; you can redistribute it and/or modify
  8423. it under the terms of version 2 of the GNU General Public License as
  8424. published by the Free Software Foundation.
  8425. - This program is distributed in the hope that it will be useful, but
  8426. - WITHOUT ANY WARRANTY; without even the implied warranty of
  8427. - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  8428. + This program is distributed in the hope that it will be useful, but
  8429. + WITHOUT ANY WARRANTY; without even the implied warranty of
  8430. + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  8431. General Public License for more details.
  8432. - You should have received a copy of the GNU General Public License
  8433. - along with this program; if not, write to the Free Software
  8434. + You should have received a copy of the GNU General Public License
  8435. + along with this program; if not, write to the Free Software
  8436. Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  8437. - The full GNU General Public License is included in this distribution
  8438. + The full GNU General Public License is included in this distribution
  8439. in the file called LICENSE.GPL.
  8440. Contact Information:
  8441. Intel Corporation
  8442. - version: Embedded.L.1.0.34
  8443. + version: Embedded.Release.Patch.L.1.0.7-5
  8444. Contact Information:
  8445. - Intel Corporation, 5000 W Chandler Blvd, Chandler, AZ 85226
  8446. + Intel Corporation, 5000 W Chandler Blvd, Chandler, AZ 85226
  8447. *****************************************************************************/
  8448. /**************************************************************************
  8449. @@ -65,11 +65,6 @@ static int32_t iegbe_oem_link_m88_setup(
  8450. static int32_t iegbe_oem_set_phy_mode(struct iegbe_hw *hw);
  8451. static int32_t iegbe_oem_detect_phy(struct iegbe_hw *hw);
  8452. -/* Define specific BCM functions */
  8453. -static int32_t iegbe_oem_link_bcm5481_setup(struct iegbe_hw *hw);
  8454. -static int32_t bcm5481_read_18sv (struct iegbe_hw *hw, int sv, uint16_t *data);
  8455. -static int32_t oi_phy_setup (struct iegbe_hw *hw);
  8456. -
  8457. /**
  8458. * iegbe_oem_setup_link
  8459. * @hw: iegbe_hw struct containing device specific information
  8460. @@ -84,7 +79,7 @@ iegbe_oem_setup_link(struct iegbe_hw *hw
  8461. {
  8462. #ifdef EXTERNAL_MDIO
  8463. - /*
  8464. + /*
  8465. * see iegbe_setup_copper_link() as the primary example. Look at both
  8466. * the M88 and IGP functions that are called for ideas, possibly for
  8467. * power management.
  8468. @@ -102,14 +97,14 @@ iegbe_oem_setup_link(struct iegbe_hw *hw
  8469. }
  8470. /* AFU: add test to exit out if improper phy type
  8471. */
  8472. - /* relevent parts of iegbe_copper_link_preconfig */
  8473. - ctrl = E1000_READ_REG(hw, CTRL);
  8474. - ctrl |= E1000_CTRL_SLU;
  8475. - ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
  8476. - E1000_WRITE_REG(hw, CTRL, ctrl);
  8477. -
  8478. + /* relevent parts of iegbe_copper_link_preconfig */
  8479. + ctrl = E1000_READ_REG(hw, CTRL);
  8480. + ctrl |= E1000_CTRL_SLU;
  8481. + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
  8482. + E1000_WRITE_REG(hw, CTRL, ctrl);
  8483. +
  8484. /* this is required for *hw init */
  8485. - ret_val = iegbe_oem_detect_phy(hw);
  8486. + ret_val = iegbe_oem_detect_phy(hw);
  8487. if(ret_val) {
  8488. return ret_val;
  8489. }
  8490. @@ -119,23 +114,13 @@ iegbe_oem_setup_link(struct iegbe_hw *hw
  8491. }
  8492. switch (hw->phy_id) {
  8493. - case BCM5395S_PHY_ID:
  8494. - return E1000_SUCCESS;
  8495. - break;
  8496. -
  8497. case M88E1000_I_PHY_ID:
  8498. case M88E1141_E_PHY_ID:
  8499. ret_val = iegbe_oem_link_m88_setup(hw);
  8500. - if(ret_val) {
  8501. - return ret_val;
  8502. - }
  8503. - break;
  8504. - case BCM5481_PHY_ID:
  8505. - ret_val = iegbe_oem_link_bcm5481_setup(hw);
  8506. - if(ret_val) {
  8507. - return ret_val;
  8508. + if(ret_val) {
  8509. + return ret_val;
  8510. }
  8511. - break;
  8512. + break;
  8513. default:
  8514. DEBUGOUT("Invalid PHY ID\n");
  8515. return -E1000_ERR_PHY_TYPE;
  8516. @@ -143,16 +128,16 @@ iegbe_oem_setup_link(struct iegbe_hw *hw
  8517. if(hw->autoneg) {
  8518. ret_val = iegbe_copper_link_autoneg(hw);
  8519. - if(ret_val) {
  8520. - return ret_val;
  8521. - }
  8522. + if(ret_val) {
  8523. + return ret_val;
  8524. }
  8525. + }
  8526. else {
  8527. DEBUGOUT("Forcing speed and duplex\n");
  8528. ret_val = iegbe_phy_force_speed_duplex(hw);
  8529. }
  8530. -
  8531. - /*
  8532. +
  8533. + /*
  8534. * Check link status. Wait up to 100 microseconds for link to become
  8535. * valid.
  8536. */
  8537. @@ -194,51 +179,6 @@ iegbe_oem_setup_link(struct iegbe_hw *hw
  8538. #endif /* ifdef EXTERNAL_MDIO */
  8539. }
  8540. -/**
  8541. - * iegbe_oem_link_bcm5481_setup
  8542. - * @hw: iegbe_hw struct containing device specific information
  8543. - *
  8544. - * Returns E1000_SUCCESS, negative E1000 error code on failure
  8545. - *
  8546. - * copied verbatim from iegbe_oem_link_m88_setup
  8547. - **/
  8548. -static int32_t
  8549. -iegbe_oem_link_bcm5481_setup(struct iegbe_hw *hw)
  8550. -{
  8551. - int32_t ret_val;
  8552. - uint16_t phy_data;
  8553. -
  8554. - //DEBUGFUNC(__func__);
  8555. -
  8556. - if(!hw)
  8557. - return -1;
  8558. -
  8559. - /* phy_reset_disable is set in iegbe_oem_set_phy_mode */
  8560. - if(hw->phy_reset_disable)
  8561. - return E1000_SUCCESS;
  8562. -
  8563. - // Enable MDIX in extended control reg.
  8564. - ret_val = iegbe_oem_read_phy_reg_ex(hw, BCM5481_ECTRL, &phy_data);
  8565. - if(ret_val)
  8566. - {
  8567. - DEBUGOUT("Unable to read BCM5481_ECTRL register\n");
  8568. - return ret_val;
  8569. - }
  8570. -
  8571. - phy_data &= ~BCM5481_ECTRL_DISMDIX;
  8572. - ret_val = iegbe_oem_write_phy_reg_ex(hw, BCM5481_ECTRL, phy_data);
  8573. - if(ret_val)
  8574. - {
  8575. - DEBUGOUT("Unable to write BCM5481_ECTRL register\n");
  8576. - return ret_val;
  8577. - }
  8578. -
  8579. - ret_val = oi_phy_setup (hw);
  8580. - if (ret_val)
  8581. - return ret_val;
  8582. -
  8583. - return E1000_SUCCESS;
  8584. -}
  8585. /**
  8586. * iegbe_oem_link_m88_setup
  8587. @@ -253,7 +193,7 @@ static int32_t
  8588. iegbe_oem_link_m88_setup(struct iegbe_hw *hw)
  8589. {
  8590. int32_t ret_val;
  8591. - uint16_t phy_data;
  8592. + uint16_t phy_data = 0;
  8593. DEBUGFUNC1("%s",__func__);
  8594. @@ -261,7 +201,7 @@ iegbe_oem_link_m88_setup(struct iegbe_hw
  8595. return -1;
  8596. }
  8597. - ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
  8598. + ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
  8599. &phy_data);
  8600. phy_data |= 0x00000008;
  8601. ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
  8602. @@ -279,7 +219,7 @@ iegbe_oem_link_m88_setup(struct iegbe_hw
  8603. phy_data &= ~M88E1000_PSCR_ASSERT_CRS_ON_TX;
  8604. - /*
  8605. + /*
  8606. * Options:
  8607. * MDI/MDI-X = 0 (default)
  8608. * 0 - Auto for all speeds
  8609. @@ -305,7 +245,7 @@ iegbe_oem_link_m88_setup(struct iegbe_hw
  8610. break;
  8611. }
  8612. - /*
  8613. + /*
  8614. * Options:
  8615. * disable_polarity_correction = 0 (default)
  8616. * Automatic Correction for Reversed Cable Polarity
  8617. @@ -316,25 +256,25 @@ iegbe_oem_link_m88_setup(struct iegbe_hw
  8618. if(hw->disable_polarity_correction == 1) {
  8619. phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
  8620. - }
  8621. + }
  8622. ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
  8623. if(ret_val) {
  8624. DEBUGOUT("Unable to write M88E1000_PHY_SPEC_CTRL register\n");
  8625. return ret_val;
  8626. }
  8627. - /*
  8628. + /*
  8629. * Force TX_CLK in the Extended PHY Specific Control Register
  8630. * to 25MHz clock.
  8631. */
  8632. - ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_EXT_PHY_SPEC_CTRL,
  8633. + ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_EXT_PHY_SPEC_CTRL,
  8634. &phy_data);
  8635. if(ret_val) {
  8636. DEBUGOUT("Unable to read M88E1000_EXT_PHY_SPEC_CTRL register\n");
  8637. return ret_val;
  8638. }
  8639. - /*
  8640. + /*
  8641. * For Truxton, it is necessary to add RGMII tx and rx
  8642. * timing delay though the EXT_PHY_SPEC_CTRL register
  8643. */
  8644. @@ -350,13 +290,13 @@ iegbe_oem_link_m88_setup(struct iegbe_hw
  8645. phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
  8646. M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
  8647. }
  8648. - ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_EXT_PHY_SPEC_CTRL,
  8649. + ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_EXT_PHY_SPEC_CTRL,
  8650. phy_data);
  8651. if(ret_val) {
  8652. DEBUGOUT("Unable to read M88E1000_EXT_PHY_SPEC_CTRL register\n");
  8653. return ret_val;
  8654. }
  8655. -
  8656. +
  8657. /* SW Reset the PHY so all changes take effect */
  8658. ret_val = iegbe_phy_hw_reset(hw);
  8659. @@ -371,7 +311,7 @@ iegbe_oem_link_m88_setup(struct iegbe_hw
  8660. /**
  8661. * iegbe_oem_force_mdi
  8662. * @hw: iegbe_hw struct containing device specific information
  8663. - * @resetPhy: returns true if after calling this function the
  8664. + * @resetPhy: returns true if after calling this function the
  8665. * PHY requires a reset
  8666. *
  8667. * Returns E1000_SUCCESS, negative E1000 error code on failure
  8668. @@ -379,7 +319,7 @@ iegbe_oem_link_m88_setup(struct iegbe_hw
  8669. * This is called from iegbe_phy_force_speed_duplex, which is
  8670. * called from iegbe_oem_setup_link.
  8671. **/
  8672. -int32_t
  8673. +int32_t
  8674. iegbe_oem_force_mdi(struct iegbe_hw *hw, int *resetPhy)
  8675. {
  8676. #ifdef EXTERNAL_MDIO
  8677. @@ -393,35 +333,30 @@ iegbe_oem_force_mdi(struct iegbe_hw *hw,
  8678. return -1;
  8679. }
  8680. - /*
  8681. + /*
  8682. * a boolean to indicate if the phy needs to be reset
  8683. - *
  8684. + *
  8685. * Make note that the M88 phy is what'll be used on Truxton
  8686. * see iegbe_phy_force_speed_duplex, which does the following for M88
  8687. */
  8688. switch (hw->phy_id) {
  8689. - case BCM5395S_PHY_ID:
  8690. - case BCM5481_PHY_ID:
  8691. - DEBUGOUT("WARNING: An empty iegbe_oem_force_mdi() has been called!\n");
  8692. - break;
  8693. -
  8694. case M88E1000_I_PHY_ID:
  8695. case M88E1141_E_PHY_ID:
  8696. - ret_val = iegbe_oem_read_phy_reg_ex(hw,
  8697. - M88E1000_PHY_SPEC_CTRL,
  8698. + ret_val = iegbe_oem_read_phy_reg_ex(hw,
  8699. + M88E1000_PHY_SPEC_CTRL,
  8700. &phy_data);
  8701. if(ret_val) {
  8702. DEBUGOUT("Unable to read M88E1000_PHY_SPEC_CTRL register\n");
  8703. return ret_val;
  8704. }
  8705. -
  8706. +
  8707. /*
  8708. - * Clear Auto-Crossover to force MDI manually. M88E1000 requires
  8709. + * Clear Auto-Crossover to force MDI manually. M88E1000 requires
  8710. * MDI forced whenever speed are duplex are forced.
  8711. */
  8712. -
  8713. +
  8714. phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
  8715. - ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
  8716. + ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
  8717. phy_data);
  8718. if(ret_val) {
  8719. DEBUGOUT("Unable to write M88E1000_PHY_SPEC_CTRL register\n");
  8720. @@ -458,7 +393,7 @@ iegbe_oem_force_mdi(struct iegbe_hw *hw,
  8721. * This is called from iegbe_phy_force_speed_duplex, which is
  8722. * called from iegbe_oem_setup_link.
  8723. **/
  8724. -int32_t
  8725. +int32_t
  8726. iegbe_oem_phy_reset_dsp(struct iegbe_hw *hw)
  8727. {
  8728. #ifdef EXTERNAL_MDIO
  8729. @@ -478,10 +413,8 @@ iegbe_oem_phy_reset_dsp(struct iegbe_hw
  8730. * no-op.
  8731. */
  8732. switch (hw->phy_id) {
  8733. - case M88E1000_I_PHY_ID:
  8734. - case M88E1141_E_PHY_ID:
  8735. - case BCM5481_PHY_ID:
  8736. - case BCM5395S_PHY_ID:
  8737. + case M88E1000_I_PHY_ID:
  8738. + case M88E1141_E_PHY_ID:
  8739. DEBUGOUT("No DSP to reset on OEM PHY\n");
  8740. break;
  8741. default:
  8742. @@ -508,7 +441,7 @@ iegbe_oem_phy_reset_dsp(struct iegbe_hw
  8743. * This is called from iegbe_phy_force_speed_duplex, which is
  8744. * called from iegbe_oem_setup_link.
  8745. **/
  8746. -int32_t
  8747. +int32_t
  8748. iegbe_oem_cleanup_after_phy_reset(struct iegbe_hw *hw)
  8749. {
  8750. #ifdef EXTERNAL_MDIO
  8751. @@ -520,29 +453,24 @@ iegbe_oem_cleanup_after_phy_reset(struct
  8752. if(!hw) {
  8753. return -1;
  8754. - }
  8755. + }
  8756. - /*
  8757. + /*
  8758. * Make note that the M88 phy is what'll be used on Truxton.
  8759. * see iegbe_phy_force_speed_duplex, which does the following for M88
  8760. */
  8761. switch (hw->phy_id) {
  8762. - case BCM5395S_PHY_ID:
  8763. - case BCM5481_PHY_ID:
  8764. - DEBUGOUT("WARNING: An empty iegbe_oem_cleanup_after_phy_reset() has been called!\n");
  8765. - break;
  8766. -
  8767. case M88E1000_I_PHY_ID:
  8768. case M88E1141_E_PHY_ID:
  8769. /*
  8770. - * Because we reset the PHY above, we need to re-force
  8771. + * Because we reset the PHY above, we need to re-force
  8772. * TX_CLK in the Extended PHY Specific Control Register to
  8773. * 25MHz clock. This value defaults back to a 2.5MHz clock
  8774. * when the PHY is reset.
  8775. */
  8776. ret_val = iegbe_oem_read_phy_reg_ex(hw,
  8777. - M88E1000_EXT_PHY_SPEC_CTRL,
  8778. + M88E1000_EXT_PHY_SPEC_CTRL,
  8779. &phy_data);
  8780. if(ret_val) {
  8781. DEBUGOUT("Unable to read M88E1000_EXT_SPEC_CTRL register\n");
  8782. @@ -550,22 +478,23 @@ iegbe_oem_cleanup_after_phy_reset(struct
  8783. }
  8784. phy_data |= M88E1000_EPSCR_TX_CLK_25;
  8785. - ret_val = iegbe_oem_write_phy_reg_ex(hw,
  8786. - M88E1000_EXT_PHY_SPEC_CTRL,
  8787. + ret_val = iegbe_oem_write_phy_reg_ex(hw,
  8788. + M88E1000_EXT_PHY_SPEC_CTRL,
  8789. phy_data);
  8790. if(ret_val) {
  8791. - DEBUGOUT("Unable to write M88E1000_EXT_PHY_SPEC_CTRL register\n");
  8792. + DEBUGOUT("Unable to write M88E1000_EXT_PHY_SPEC_CTRL "
  8793. + "register\n");
  8794. return ret_val;
  8795. }
  8796. /*
  8797. * In addition, because of the s/w reset above, we need to enable
  8798. - * CRX on TX. This must be set for both full and half duplex
  8799. + * CRX on TX. This must be set for both full and half duplex
  8800. * operation.
  8801. */
  8802. - ret_val = iegbe_oem_read_phy_reg_ex(hw,
  8803. - M88E1000_PHY_SPEC_CTRL,
  8804. + ret_val = iegbe_oem_read_phy_reg_ex(hw,
  8805. + M88E1000_PHY_SPEC_CTRL,
  8806. &phy_data);
  8807. if(ret_val) {
  8808. DEBUGOUT("Unable to read M88E1000_PHY_SPEC_CTRL register\n");
  8809. @@ -573,12 +502,12 @@ iegbe_oem_cleanup_after_phy_reset(struct
  8810. }
  8811. phy_data &= ~M88E1000_PSCR_ASSERT_CRS_ON_TX;
  8812. - ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
  8813. + ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
  8814. phy_data);
  8815. if(ret_val) {
  8816. DEBUGOUT("Unable to write M88E1000_PHY_SPEC_CTRL register\n");
  8817. return ret_val;
  8818. - }
  8819. + }
  8820. break;
  8821. default:
  8822. DEBUGOUT("Invalid PHY ID\n");
  8823. @@ -604,12 +533,12 @@ iegbe_oem_cleanup_after_phy_reset(struct
  8824. * This is called from iegbe_oem_setup_link which is
  8825. * called from iegbe_setup_link.
  8826. **/
  8827. -static int32_t
  8828. +static int32_t
  8829. iegbe_oem_set_phy_mode(struct iegbe_hw *hw)
  8830. {
  8831. /*
  8832. * it is unclear if it is necessary to set the phy mode. Right now only
  8833. - * one MAC 82545 Rev 3 does it, but the other MACs like Tolapai do not.
  8834. + * one MAC 82545 Rev 3 does it, but the other MACs like tola do not.
  8835. * Leave the functionality off for now until it is determined that Tolapai
  8836. * needs it as well.
  8837. */
  8838. @@ -638,41 +567,37 @@ iegbe_oem_set_phy_mode(struct iegbe_hw *
  8839. #ifndef skip_set_mode
  8840. DEBUGOUT("No need to call oem_set_phy_mode on Truxton\n");
  8841. #else
  8842. - /*
  8843. + /*
  8844. * Make note that the M88 phy is what'll be used on Truxton.
  8845. *
  8846. * use iegbe_set_phy_mode as example
  8847. */
  8848. switch (hw->phy_id) {
  8849. - case BCM5395S_PHY_ID:
  8850. - case BCM5481_PHY_ID:
  8851. - DEBUGOUT("WARNING: An empty iegbe_oem_set_phy_mode() has been called!\n");
  8852. - break;
  8853. -
  8854. case M88E1000_I_PHY_ID:
  8855. case M88E1141_E_PHY_ID:
  8856. - ret_val = iegbe_read_eeprom(hw,
  8857. - EEPROM_PHY_CLASS_WORD,
  8858. - 1,
  8859. + ret_val = iegbe_read_eeprom(hw,
  8860. + EEPROM_PHY_CLASS_WORD,
  8861. + 1,
  8862. &eeprom_data);
  8863. if(ret_val) {
  8864. return ret_val;
  8865. }
  8866. - if((eeprom_data != EEPROM_RESERVED_WORD) &&
  8867. - (eeprom_data & EEPROM_PHY_CLASS_A))
  8868. + if((eeprom_data != EEPROM_RESERVED_WORD) &&
  8869. + (eeprom_data & EEPROM_PHY_CLASS_A))
  8870. {
  8871. - ret_val = iegbe_oem_write_phy_reg_ex(hw,
  8872. - M88E1000_PHY_PAGE_SELECT,
  8873. - 0x000B);
  8874. + ret_val = iegbe_oem_write_phy_reg_ex(hw,
  8875. + M88E1000_PHY_PAGE_SELECT,
  8876. + 0x000B);
  8877. if(ret_val) {
  8878. - DEBUGOUT("Unable to write to M88E1000_PHY_PAGE_SELECT register on PHY\n");
  8879. + DEBUGOUT("Unable to write to M88E1000_PHY_PAGE_SELECT "
  8880. + "register on PHY\n");
  8881. return ret_val;
  8882. }
  8883. - ret_val = iegbe_oem_write_phy_reg_ex(hw,
  8884. - M88E1000_PHY_GEN_CONTROL,
  8885. - 0x8104);
  8886. + ret_val = iegbe_oem_write_phy_reg_ex(hw,
  8887. + M88E1000_PHY_GEN_CONTROL,
  8888. + 0x8104);
  8889. if(ret_val) {
  8890. DEBUGOUT("Unable to write to M88E1000_PHY_GEN_CONTROL"
  8891. "register on PHY\n");
  8892. @@ -687,11 +612,12 @@ iegbe_oem_set_phy_mode(struct iegbe_hw *
  8893. return -E1000_ERR_PHY_TYPE;
  8894. }
  8895. #endif
  8896. -
  8897. +
  8898. return E1000_SUCCESS;
  8899. }
  8900. +
  8901. /**
  8902. * iegbe_oem_detect_phy
  8903. * @hw: iegbe_hw struct containing device specific information
  8904. @@ -702,7 +628,7 @@ iegbe_oem_set_phy_mode(struct iegbe_hw *
  8905. *
  8906. * This borrows heavily from iegbe_detect_gig_phy
  8907. **/
  8908. -static int32_t
  8909. +static int32_t
  8910. iegbe_oem_detect_phy(struct iegbe_hw *hw)
  8911. {
  8912. int32_t ret_val;
  8913. @@ -715,33 +641,20 @@ iegbe_oem_detect_phy(struct iegbe_hw *hw
  8914. }
  8915. hw->phy_type = iegbe_phy_oem;
  8916. -{
  8917. - // If MAC2 (BCM5395 switch), manually detect the phy
  8918. - struct iegbe_adapter *adapter;
  8919. - uint32_t device_number;
  8920. - adapter = (struct iegbe_adapter *) hw->back;
  8921. - device_number = PCI_SLOT(adapter->pdev->devfn);
  8922. - if (device_number == ICP_XXXX_MAC_2) {
  8923. - hw->phy_id = BCM5395S_PHY_ID;
  8924. - hw->phy_revision = 0;
  8925. - return E1000_SUCCESS;
  8926. - }
  8927. -}
  8928. -
  8929. -
  8930. ret_val = iegbe_oem_read_phy_reg_ex(hw, PHY_ID1, &phy_id_high);
  8931. if(ret_val) {
  8932. DEBUGOUT("Unable to read PHY register PHY_ID1\n");
  8933. return ret_val;
  8934. }
  8935. -
  8936. +
  8937. usec_delay(0x14);
  8938. ret_val = iegbe_oem_read_phy_reg_ex(hw, PHY_ID2, &phy_id_low);
  8939. if(ret_val) {
  8940. DEBUGOUT("Unable to read PHY register PHY_ID2\n");
  8941. return ret_val;
  8942. }
  8943. - hw->phy_id = (uint32_t) ((phy_id_high << 0x10) + phy_id_low);
  8944. + hw->phy_id = (uint32_t) ((phy_id_high << 0x10) +
  8945. + (phy_id_low & PHY_REVISION_MASK));
  8946. hw->phy_revision = (uint32_t) phy_id_low & ~PHY_REVISION_MASK;
  8947. return E1000_SUCCESS;
  8948. @@ -753,15 +666,15 @@ iegbe_oem_detect_phy(struct iegbe_hw *hw
  8949. * @hw: iegbe_hw struct containing device specific information
  8950. *
  8951. * Returns the value of the Inter Packet Gap (IPG) Transmit Time (IPGT) in the
  8952. - * Transmit IPG register appropriate for the given PHY. This field is only 10
  8953. + * Transmit IPG register appropriate for the given PHY. This field is only 10
  8954. * bits wide.
  8955. *
  8956. * In the original iegbe code, only the IPGT field varied between media types.
  8957. - * If the OEM phy requires setting IPG Receive Time 1 & 2 Registers, it would
  8958. + * If the OEM phy requires setting IPG Receive Time 1 & 2 Registers, it would
  8959. * be required to modify the iegbe_config_tx() function to accomdate the change
  8960. *
  8961. **/
  8962. -uint32_t
  8963. +uint32_t
  8964. iegbe_oem_get_tipg(struct iegbe_hw *hw)
  8965. {
  8966. #ifdef EXTERNAL_MDIO
  8967. @@ -777,15 +690,13 @@ iegbe_oem_get_tipg(struct iegbe_hw *hw)
  8968. switch (hw->phy_id) {
  8969. case M88E1000_I_PHY_ID:
  8970. case M88E1141_E_PHY_ID:
  8971. - case BCM5481_PHY_ID:
  8972. - case BCM5395S_PHY_ID:
  8973. phy_num = DEFAULT_ICP_XXXX_TIPG_IPGT;
  8974. break;
  8975. default:
  8976. DEBUGOUT("Invalid PHY ID\n");
  8977. return DEFAULT_ICP_XXXX_TIPG_IPGT;
  8978. }
  8979. -
  8980. +
  8981. return phy_num;
  8982. #else /* ifdef EXTERNAL_MDIO */
  8983. @@ -803,15 +714,15 @@ iegbe_oem_get_tipg(struct iegbe_hw *hw)
  8984. * iegbe_oem_phy_is_copper
  8985. * @hw: iegbe_hw struct containing device specific information
  8986. *
  8987. - * Test for media type within the iegbe driver is common, so this is a simple
  8988. - * test for copper PHYs. The ICP_XXXX family of controllers initially only
  8989. - * supported copper interconnects (no TBI (ten bit interface) for Fiber
  8990. - * existed). If future revs support either Fiber or an internal SERDES, it
  8991. - * may become necessary to evaluate where this function is used to go beyond
  8992. + * Test for media type within the iegbe driver is common, so this is a simple
  8993. + * test for copper PHYs. The ICP_XXXX family of controllers initially only
  8994. + * supported copper interconnects (no TBI (ten bit interface) for Fiber
  8995. + * existed). If future revs support either Fiber or an internal SERDES, it
  8996. + * may become necessary to evaluate where this function is used to go beyond
  8997. * determining whether or not media type is just copper.
  8998. *
  8999. **/
  9000. -int
  9001. +int
  9002. iegbe_oem_phy_is_copper(struct iegbe_hw *hw)
  9003. {
  9004. #ifdef EXTERNAL_MDIO
  9005. @@ -827,23 +738,21 @@ iegbe_oem_phy_is_copper(struct iegbe_hw
  9006. switch (hw->phy_id) {
  9007. case M88E1000_I_PHY_ID:
  9008. case M88E1141_E_PHY_ID:
  9009. - case BCM5481_PHY_ID:
  9010. - case BCM5395S_PHY_ID:
  9011. isCopper = TRUE;
  9012. break;
  9013. default:
  9014. DEBUGOUT("Invalid PHY ID\n");
  9015. return -E1000_ERR_PHY_TYPE;
  9016. }
  9017. -
  9018. +
  9019. return isCopper;
  9020. #else /* ifdef EXTERNAL_MDIO */
  9021. - /*
  9022. + /*
  9023. * caught between returning true or false. True allows it to
  9024. * be entered into && statements w/o ill effect, but false
  9025. - * would make more sense
  9026. + * would make more sense
  9027. */
  9028. DEBUGOUT("Invalid value for transceiver type, return FALSE\n");
  9029. return FALSE;
  9030. @@ -856,19 +765,19 @@ iegbe_oem_phy_is_copper(struct iegbe_hw
  9031. * iegbe_oem_get_phy_dev_number
  9032. * @hw: iegbe_hw struct containing device specific information
  9033. *
  9034. - * For ICP_XXXX family of devices, there are 3 MACs, each of which may
  9035. - * have a different PHY (and indeed a different media interface). This
  9036. - * function is used to indicate which of the MAC/PHY pairs we are interested
  9037. + * For ICP_XXXX family of devices, there are 3 MACs, each of which may
  9038. + * have a different PHY (and indeed a different media interface). This
  9039. + * function is used to indicate which of the MAC/PHY pairs we are interested
  9040. * in.
  9041. - *
  9042. + *
  9043. **/
  9044. -uint32_t
  9045. +uint32_t
  9046. iegbe_oem_get_phy_dev_number(struct iegbe_hw *hw)
  9047. {
  9048. #ifdef EXTERNAL_MDIO
  9049. - /*
  9050. - * for ICP_XXXX family of devices, the three network interfaces are
  9051. + /*
  9052. + * for ICP_XXXX family of devices, the three network interfaces are
  9053. * differentiated by their PCI device number, where the three share
  9054. * the same PCI bus
  9055. */
  9056. @@ -886,15 +795,15 @@ iegbe_oem_get_phy_dev_number(struct iegb
  9057. switch(device_number)
  9058. {
  9059. - case ICP_XXXX_MAC_0:
  9060. + case ICP_XXXX_MAC_0:
  9061. + hw->phy_addr = 0x00;
  9062. + break;
  9063. + case ICP_XXXX_MAC_1:
  9064. hw->phy_addr = 0x01;
  9065. break;
  9066. - case ICP_XXXX_MAC_1:
  9067. + case ICP_XXXX_MAC_2:
  9068. hw->phy_addr = 0x02;
  9069. break;
  9070. - case ICP_XXXX_MAC_2:
  9071. - hw->phy_addr = 0x00;
  9072. - break;
  9073. default: hw->phy_addr = 0x00;
  9074. }
  9075. return hw->phy_addr;
  9076. @@ -915,7 +824,7 @@ iegbe_oem_get_phy_dev_number(struct iegb
  9077. * @cmd: the original IOCTL command that instigated the call chain.
  9078. *
  9079. * This function abstracts out the code necessary to service the
  9080. - * SIOCSMIIREG case within the iegbe_mii_ioctl() for oem PHYs.
  9081. + * SIOCSMIIREG case within the iegbe_mii_ioctl() for oem PHYs.
  9082. * iegbe_mii_ioctl() was implemented for copper phy's only and this
  9083. * function will only be called if iegbe_oem_phy_is_copper() returns true for
  9084. * a given MAC. Note that iegbe_mii_ioctl() has a compile flag
  9085. @@ -924,14 +833,14 @@ iegbe_oem_get_phy_dev_number(struct iegb
  9086. * NOTE: a spinlock is in effect for the duration of this call. It is
  9087. * imperative that a negative value be returned on any error, so
  9088. * the spinlock can be released properly.
  9089. - *
  9090. + *
  9091. **/
  9092. int
  9093. iegbe_oem_mii_ioctl(struct iegbe_adapter *adapter, unsigned long flags,
  9094. struct ifreq *ifr, int cmd)
  9095. {
  9096. #ifdef EXTERNAL_MDIO
  9097. -
  9098. +
  9099. struct mii_ioctl_data *data = if_mii(ifr);
  9100. uint16_t mii_reg = data->val_in;
  9101. uint16_t spddplx;
  9102. @@ -942,12 +851,6 @@ iegbe_oem_mii_ioctl(struct iegbe_adapter
  9103. if(!adapter || !ifr) {
  9104. return -1;
  9105. }
  9106. -
  9107. - // If MAC2 (BCM5395 switch) then leave now
  9108. - if ((PCI_SLOT(adapter->pdev->devfn)) == ICP_XXXX_MAC_2) {
  9109. - return -1;
  9110. - }
  9111. -
  9112. switch (data->reg_num) {
  9113. case PHY_CTRL:
  9114. if(mii_reg & MII_CR_POWER_DOWN) {
  9115. @@ -956,7 +859,7 @@ iegbe_oem_mii_ioctl(struct iegbe_adapter
  9116. if(mii_reg & MII_CR_AUTO_NEG_EN) {
  9117. adapter->hw.autoneg = 1;
  9118. adapter->hw.autoneg_advertised = ICP_XXXX_AUTONEG_ADV_DEFAULT;
  9119. - }
  9120. + }
  9121. else {
  9122. if(mii_reg & 0x40) {
  9123. spddplx = SPEED_1000;
  9124. @@ -976,7 +879,7 @@ iegbe_oem_mii_ioctl(struct iegbe_adapter
  9125. if(netif_running(adapter->netdev)) {
  9126. iegbe_down(adapter);
  9127. iegbe_up(adapter);
  9128. - }
  9129. + }
  9130. else {
  9131. iegbe_reset(adapter);
  9132. }
  9133. @@ -1043,10 +946,10 @@ void iegbe_oem_fiber_live_in_suspend(str
  9134. * Note: The call to iegbe_get_regs() assumed an array of 24 elements
  9135. * where the last 11 are passed to this function. If the array
  9136. * that is passed to the calling function has its size or element
  9137. - * defintions changed, this function becomes broken.
  9138. + * defintions changed, this function becomes broken.
  9139. *
  9140. **/
  9141. -void iegbe_oem_get_phy_regs(struct iegbe_adapter *adapter, uint32_t *data,
  9142. +void iegbe_oem_get_phy_regs(struct iegbe_adapter *adapter, uint32_t *data,
  9143. uint32_t data_len)
  9144. {
  9145. #define EXPECTED_ARRAY_LEN 11
  9146. @@ -1062,13 +965,13 @@ void iegbe_oem_get_phy_regs(struct iegbe
  9147. * Use the corrected_length variable to make sure we don't exceed that
  9148. * length
  9149. */
  9150. - corrected_len = data_len>EXPECTED_ARRAY_LEN
  9151. + corrected_len = data_len>EXPECTED_ARRAY_LEN
  9152. ? EXPECTED_ARRAY_LEN : data_len;
  9153. memset(data, 0, corrected_len*sizeof(uint32_t));
  9154. #ifdef EXTERNAL_MDIO
  9155. - /*
  9156. + /*
  9157. * Fill data[] with...
  9158. *
  9159. * [0] = cable length
  9160. @@ -1084,16 +987,11 @@ void iegbe_oem_get_phy_regs(struct iegbe
  9161. * [10] = mdix mode
  9162. */
  9163. switch (adapter->hw.phy_id) {
  9164. - case BCM5395S_PHY_ID:
  9165. - case BCM5481_PHY_ID:
  9166. - DEBUGOUT("WARNING: An empty iegbe_oem_get_phy_regs() has been called!\n");
  9167. - break;
  9168. -
  9169. case M88E1000_I_PHY_ID:
  9170. case M88E1141_E_PHY_ID:
  9171. if(corrected_len > 0) {
  9172. - iegbe_oem_read_phy_reg_ex(&adapter->hw,
  9173. - M88E1000_PHY_SPEC_STATUS,
  9174. + iegbe_oem_read_phy_reg_ex(&adapter->hw,
  9175. + M88E1000_PHY_SPEC_STATUS,
  9176. (uint16_t *) &data[0]);
  9177. }
  9178. if(corrected_len > 0x1){
  9179. @@ -1106,7 +1004,7 @@ void iegbe_oem_get_phy_regs(struct iegbe
  9180. data[0x3] = 0x0; /* Dummy (to align w/ IGP phy reg dump) */
  9181. }
  9182. if(corrected_len > 0x4) {
  9183. - iegbe_oem_read_phy_reg_ex(&adapter->hw, M88E1000_PHY_SPEC_CTRL,
  9184. + iegbe_oem_read_phy_reg_ex(&adapter->hw, M88E1000_PHY_SPEC_CTRL,
  9185. (uint16_t *) &data[0x4]);
  9186. }
  9187. if(corrected_len > 0x5) {
  9188. @@ -1144,7 +1042,7 @@ void iegbe_oem_get_phy_regs(struct iegbe
  9189. * This is called from iegbe_set_phy_loopback in response from call from
  9190. * ethtool to place the PHY into loopback mode.
  9191. **/
  9192. -int
  9193. +int
  9194. iegbe_oem_phy_loopback(struct iegbe_adapter *adapter)
  9195. {
  9196. #ifdef EXTERNAL_MDIO
  9197. @@ -1165,23 +1063,18 @@ iegbe_oem_phy_loopback(struct iegbe_adap
  9198. * was that nonintegrated called iegbe_phy_reset_clk_and_crs(),
  9199. * hopefully this won't matter as CRS required for half-duplex
  9200. * operation and this is set to full duplex.
  9201. - *
  9202. + *
  9203. * Make note that the M88 phy is what'll be used on Truxton
  9204. * Loopback configuration is the same for each of the supported PHYs.
  9205. */
  9206. switch (adapter->hw.phy_id) {
  9207. - case BCM5395S_PHY_ID:
  9208. - DEBUGOUT("WARNING: An empty iegbe_oem_phy_loopback() has been called!\n");
  9209. - break;
  9210. -
  9211. case M88E1000_I_PHY_ID:
  9212. case M88E1141_E_PHY_ID:
  9213. - case BCM5481_PHY_ID:
  9214. adapter->hw.autoneg = FALSE;
  9215. /* turn off Auto-MDI/MDIX */
  9216. - /*ret_val = iegbe_oem_write_phy_reg_ex(&adapter->hw,
  9217. + /*ret_val = iegbe_oem_write_phy_reg_ex(&adapter->hw,
  9218. M88E1000_PHY_SPEC_CTRL, 0x0808);
  9219. if(ret_val)
  9220. {
  9221. @@ -1206,10 +1099,10 @@ iegbe_oem_phy_loopback(struct iegbe_adap
  9222. DEBUGOUT("Unable to write to register PHY_CTRL\n");
  9223. return ret_val;
  9224. }
  9225. -
  9226. -
  9227. +
  9228. +
  9229. /* force 1000, set loopback */
  9230. - /*ret_val =
  9231. + /*ret_val =
  9232. iegbe_oem_write_phy_reg_ex(&adapter->hw, PHY_CTRL, 0x4140); */
  9233. ret_val = iegbe_oem_write_phy_reg_ex(&adapter->hw, PHY_CTRL, 0x6100);
  9234. if(ret_val) {
  9235. @@ -1228,21 +1121,21 @@ iegbe_oem_phy_loopback(struct iegbe_adap
  9236. E1000_WRITE_REG(&adapter->hw, CTRL, ctrl_reg);
  9237. /*
  9238. - * Write out to PHY registers 29 and 30 to disable the Receiver.
  9239. + * Write out to PHY registers 29 and 30 to disable the Receiver.
  9240. * This directly lifted from iegbe_phy_disable_receiver().
  9241. - *
  9242. + *
  9243. * The code is currently commented out as for the M88 used in
  9244. * Truxton, registers 29 and 30 are unutilized. Leave in, just
  9245. - * in case we are on the receiving end of an 'undocumented'
  9246. + * in case we are on the receiving end of an 'undocumented'
  9247. * feature
  9248. */
  9249. - /*
  9250. + /*
  9251. * iegbe_oem_write_phy_reg_ex(&adapter->hw, 29, 0x001F);
  9252. * iegbe_oem_write_phy_reg_ex(&adapter->hw, 30, 0x8FFC);
  9253. * iegbe_oem_write_phy_reg_ex(&adapter->hw, 29, 0x001A);
  9254. * iegbe_oem_write_phy_reg_ex(&adapter->hw, 30, 0x8FF0);
  9255. */
  9256. -
  9257. +
  9258. break;
  9259. default:
  9260. DEBUGOUT("Invalid PHY ID\n");
  9261. @@ -1268,15 +1161,15 @@ iegbe_oem_phy_loopback(struct iegbe_adap
  9262. * ethtool to place the PHY out of loopback mode. This handles the OEM
  9263. * specific part of loopback cleanup.
  9264. **/
  9265. -void
  9266. +void
  9267. iegbe_oem_loopback_cleanup(struct iegbe_adapter *adapter)
  9268. {
  9269. #ifdef EXTERNAL_MDIO
  9270. - /*
  9271. - * This borrows liberally from iegbe_loopback_cleanup().
  9272. + /*
  9273. + * This borrows liberally from iegbe_loopback_cleanup().
  9274. * making note that the M88 phy is what'll be used on Truxton
  9275. - *
  9276. + *
  9277. * Loopback cleanup is the same for all supported PHYs.
  9278. */
  9279. int32_t ret_val;
  9280. @@ -1289,38 +1182,32 @@ iegbe_oem_loopback_cleanup(struct iegbe_
  9281. }
  9282. switch (adapter->hw.phy_id) {
  9283. - case BCM5395S_PHY_ID:
  9284. - DEBUGOUT("WARNING: An empty iegbe_oem_loopback_cleanup() has been called!\n");
  9285. - return;
  9286. - break;
  9287. -
  9288. case M88E1000_I_PHY_ID:
  9289. case M88E1141_E_PHY_ID:
  9290. - case BCM5481_PHY_ID:
  9291. default:
  9292. adapter->hw.autoneg = TRUE;
  9293. -
  9294. - ret_val = iegbe_oem_read_phy_reg_ex(&adapter->hw, PHY_CTRL,
  9295. +
  9296. + ret_val = iegbe_oem_read_phy_reg_ex(&adapter->hw, PHY_CTRL,
  9297. &phy_reg);
  9298. if(ret_val) {
  9299. DEBUGOUT("Unable to read to register PHY_CTRL\n");
  9300. return;
  9301. }
  9302. -
  9303. +
  9304. if(phy_reg & MII_CR_LOOPBACK) {
  9305. phy_reg &= ~MII_CR_LOOPBACK;
  9306. -
  9307. - ret_val = iegbe_oem_write_phy_reg_ex(&adapter->hw, PHY_CTRL,
  9308. +
  9309. + ret_val = iegbe_oem_write_phy_reg_ex(&adapter->hw, PHY_CTRL,
  9310. phy_reg);
  9311. if(ret_val) {
  9312. DEBUGOUT("Unable to write to register PHY_CTRL\n");
  9313. return;
  9314. }
  9315. -
  9316. +
  9317. iegbe_phy_reset(&adapter->hw);
  9318. }
  9319. }
  9320. -
  9321. +
  9322. #endif /* ifdef EXTERNAL_MDIO */
  9323. return;
  9324. @@ -1336,7 +1223,7 @@ iegbe_oem_loopback_cleanup(struct iegbe_
  9325. * Called by iegbe_check_downshift(), checks the PHY to see if it running
  9326. * at as speed slower than its maximum.
  9327. **/
  9328. -uint32_t
  9329. +uint32_t
  9330. iegbe_oem_phy_speed_downgraded(struct iegbe_hw *hw, uint16_t *isDowngraded)
  9331. {
  9332. #ifdef EXTERNAL_MDIO
  9333. @@ -1356,24 +1243,19 @@ iegbe_oem_phy_speed_downgraded(struct ie
  9334. */
  9335. switch (hw->phy_id) {
  9336. - case BCM5395S_PHY_ID:
  9337. - case BCM5481_PHY_ID:
  9338. - *isDowngraded = 0;
  9339. - break;
  9340. -
  9341. case M88E1000_I_PHY_ID:
  9342. case M88E1141_E_PHY_ID:
  9343. - ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS,
  9344. + ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS,
  9345. &phy_data);
  9346. if(ret_val) {
  9347. DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_STATUS\n");
  9348. return ret_val;
  9349. }
  9350. -
  9351. - *isDowngraded = (phy_data & M88E1000_PSSR_DOWNSHIFT)
  9352. +
  9353. + *isDowngraded = (phy_data & M88E1000_PSSR_DOWNSHIFT)
  9354. >> M88E1000_PSSR_DOWNSHIFT_SHIFT;
  9355. -
  9356. - break;
  9357. +
  9358. + break;
  9359. default:
  9360. DEBUGOUT("Invalid PHY ID\n");
  9361. return 1;
  9362. @@ -1388,7 +1270,7 @@ iegbe_oem_phy_speed_downgraded(struct ie
  9363. }
  9364. *isDowngraded = 0;
  9365. - return 0;
  9366. + return 0;
  9367. #endif /* ifdef EXTERNAL_MDIO */
  9368. }
  9369. @@ -1403,7 +1285,7 @@ iegbe_oem_phy_speed_downgraded(struct ie
  9370. * Called by iegbe_check_downshift(), checks the PHY to see if it running
  9371. * at as speed slower than its maximum.
  9372. **/
  9373. -int32_t
  9374. +int32_t
  9375. iegbe_oem_check_polarity(struct iegbe_hw *hw, uint16_t *polarity)
  9376. {
  9377. #ifdef EXTERNAL_MDIO
  9378. @@ -1417,33 +1299,27 @@ iegbe_oem_check_polarity(struct iegbe_hw
  9379. return -1;
  9380. }
  9381. - /*
  9382. + /*
  9383. * borrow liberally from iegbe_check_polarity.
  9384. * Make note that the M88 phy is what'll be used on Truxton
  9385. */
  9386. switch (hw->phy_id) {
  9387. - case BCM5395S_PHY_ID:
  9388. - case BCM5481_PHY_ID:
  9389. - *polarity = 0;
  9390. - break;
  9391. -
  9392. case M88E1000_I_PHY_ID:
  9393. case M88E1141_E_PHY_ID:
  9394. /* return the Polarity bit in the Status register. */
  9395. - ret_val = iegbe_oem_read_phy_reg_ex(hw,
  9396. - M88E1000_PHY_SPEC_STATUS,
  9397. + ret_val = iegbe_oem_read_phy_reg_ex(hw,
  9398. + M88E1000_PHY_SPEC_STATUS,
  9399. &phy_data);
  9400. if(ret_val) {
  9401. DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_STATUS\n");
  9402. return ret_val;
  9403. }
  9404. - *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY)
  9405. + *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY)
  9406. >> M88E1000_PSSR_REV_POLARITY_SHIFT;
  9407. -
  9408. - break;
  9409. -
  9410. +
  9411. + break;
  9412. default:
  9413. DEBUGOUT("Invalid PHY ID\n");
  9414. return -E1000_ERR_PHY_TYPE;
  9415. @@ -1472,7 +1348,7 @@ iegbe_oem_check_polarity(struct iegbe_hw
  9416. * the MAC with the PHY. It turns out on ICP_XXXX, this is not
  9417. * done automagically.
  9418. **/
  9419. -int32_t
  9420. +int32_t
  9421. iegbe_oem_phy_is_full_duplex(struct iegbe_hw *hw, int *isFD)
  9422. {
  9423. #ifdef EXTERNAL_MDIO
  9424. @@ -1485,40 +1361,22 @@ iegbe_oem_phy_is_full_duplex(struct iegb
  9425. if(!hw || !isFD) {
  9426. return -1;
  9427. }
  9428. - /*
  9429. + /*
  9430. * Make note that the M88 phy is what'll be used on Truxton
  9431. * see iegbe_config_mac_to_phy
  9432. */
  9433. -
  9434. +
  9435. switch (hw->phy_id) {
  9436. - case BCM5395S_PHY_ID:
  9437. - /* Always full duplex */
  9438. - *isFD = 1;
  9439. - break;
  9440. -
  9441. - case BCM5481_PHY_ID:
  9442. - ret_val = iegbe_read_phy_reg(hw, BCM5481_ASTAT, &phy_data);
  9443. - if(ret_val) return ret_val;
  9444. -
  9445. - switch (BCM5481_ASTAT_HCD(phy_data)) {
  9446. - case BCM5481_ASTAT_1KBTFD:
  9447. - case BCM5481_ASTAT_100BTXFD:
  9448. - *isFD = 1;
  9449. - break;
  9450. - default:
  9451. - *isFD = 0;
  9452. - }
  9453. - break;
  9454. -
  9455. case M88E1000_I_PHY_ID:
  9456. case M88E1141_E_PHY_ID:
  9457. - ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
  9458. - if(ret_val) {
  9459. - DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_STATUS\n");
  9460. - return ret_val;
  9461. - }
  9462. + ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS,
  9463. + &phy_data);
  9464. + if(ret_val) {
  9465. + DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_STATUS\n");
  9466. + return ret_val;
  9467. + }
  9468. *isFD = (phy_data & M88E1000_PSSR_DPLX) != 0;
  9469. -
  9470. +
  9471. break;
  9472. default:
  9473. DEBUGOUT("Invalid PHY ID\n");
  9474. @@ -1546,7 +1404,7 @@ iegbe_oem_phy_is_full_duplex(struct iegb
  9475. * the MAC with the PHY. It turns out on ICP_XXXX, this is not
  9476. * done automagically.
  9477. **/
  9478. -int32_t
  9479. +int32_t
  9480. iegbe_oem_phy_is_speed_1000(struct iegbe_hw *hw, int *is1000)
  9481. {
  9482. #ifdef EXTERNAL_MDIO
  9483. @@ -1565,28 +1423,10 @@ iegbe_oem_phy_is_speed_1000(struct iegbe
  9484. */
  9485. switch (hw->phy_id) {
  9486. - case BCM5395S_PHY_ID:
  9487. - /* Always 1000mb */
  9488. - *is1000 = 1;
  9489. - break;
  9490. -
  9491. - case BCM5481_PHY_ID:
  9492. - ret_val = iegbe_read_phy_reg(hw, BCM5481_ASTAT, &phy_data);
  9493. - if(ret_val) return ret_val;
  9494. -
  9495. - switch (BCM5481_ASTAT_HCD(phy_data)) {
  9496. - case BCM5481_ASTAT_1KBTFD:
  9497. - case BCM5481_ASTAT_1KBTHD:
  9498. - *is1000 = 1;
  9499. - break;
  9500. - default:
  9501. - *is1000 = 0;
  9502. - }
  9503. - break;
  9504. -
  9505. case M88E1000_I_PHY_ID:
  9506. case M88E1141_E_PHY_ID:
  9507. - ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
  9508. + ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS,
  9509. + &phy_data);
  9510. if(ret_val) {
  9511. DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_STATUS\n");
  9512. return ret_val;
  9513. @@ -1638,28 +1478,9 @@ iegbe_oem_phy_is_speed_100(struct iegbe_
  9514. * see iegbe_config_mac_to_phy
  9515. */
  9516. switch (hw->phy_id) {
  9517. - case BCM5395S_PHY_ID:
  9518. - /* Always 1000Mb, never 100mb */
  9519. - *is100 = 0;
  9520. - break;
  9521. -
  9522. - case BCM5481_PHY_ID:
  9523. - ret_val = iegbe_read_phy_reg(hw, BCM5481_ASTAT, &phy_data);
  9524. - if(ret_val) return ret_val;
  9525. -
  9526. - switch (BCM5481_ASTAT_HCD(phy_data)) {
  9527. - case BCM5481_ASTAT_100BTXFD:
  9528. - case BCM5481_ASTAT_100BTXHD:
  9529. - *is100 = 1;
  9530. - break;
  9531. - default:
  9532. - *is100 = 0;
  9533. - }
  9534. - break;
  9535. -
  9536. case M88E1000_I_PHY_ID:
  9537. case M88E1141_E_PHY_ID:
  9538. - ret_val = iegbe_oem_read_phy_reg_ex(hw,
  9539. + ret_val = iegbe_oem_read_phy_reg_ex(hw,
  9540. M88E1000_PHY_SPEC_STATUS,
  9541. &phy_data);
  9542. if(ret_val) {
  9543. @@ -1714,29 +1535,24 @@ iegbe_oem_phy_get_info(struct iegbe_hw *
  9544. * see iegbe_phy_m88_get_info
  9545. */
  9546. switch (hw->phy_id) {
  9547. - case BCM5395S_PHY_ID:
  9548. - case BCM5481_PHY_ID:
  9549. - DEBUGOUT("WARNING: An empty iegbe_oem_phy_get_info() has been called!\n");
  9550. - break;
  9551. -
  9552. case M88E1000_I_PHY_ID:
  9553. case M88E1141_E_PHY_ID:
  9554. - /* The downshift status is checked only once, after link is
  9555. - * established and it stored in the hw->speed_downgraded parameter.*/
  9556. + /* The downshift status is checked only once, after link is
  9557. + * established and it stored in the hw->speed_downgraded parameter.*/
  9558. phy_info->downshift = (iegbe_downshift)hw->speed_downgraded;
  9559. -
  9560. - ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
  9561. +
  9562. + ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
  9563. &phy_data);
  9564. if(ret_val) {
  9565. DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_CTRL\n");
  9566. return ret_val;
  9567. }
  9568. - phy_info->extended_10bt_distance =
  9569. - (phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE)
  9570. + phy_info->extended_10bt_distance =
  9571. + (phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE)
  9572. >> M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT;
  9573. phy_info->polarity_correction =
  9574. - (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
  9575. + (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
  9576. >> M88E1000_PSCR_POLARITY_REVERSAL_SHIFT;
  9577. /* Check polarity status */
  9578. @@ -1747,11 +1563,11 @@ iegbe_oem_phy_get_info(struct iegbe_hw *
  9579. phy_info->cable_polarity = polarity;
  9580. - ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS,
  9581. + ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS,
  9582. &phy_data);
  9583. if(ret_val) {
  9584. - DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_STATUS\n");
  9585. - return ret_val;
  9586. + DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_STATUS\n");
  9587. + return ret_val;
  9588. }
  9589. phy_info->mdix_mode = (phy_data & M88E1000_PSSR_MDIX)
  9590. @@ -1761,24 +1577,24 @@ iegbe_oem_phy_get_info(struct iegbe_hw *
  9591. /* Cable Length Estimation and Local/Remote Receiver Information
  9592. * are only valid at 1000 Mbps.
  9593. */
  9594. - phy_info->cable_length =
  9595. + phy_info->cable_length =
  9596. (phy_data & M88E1000_PSSR_CABLE_LENGTH)
  9597. >> M88E1000_PSSR_CABLE_LENGTH_SHIFT;
  9598. - ret_val = iegbe_oem_read_phy_reg_ex(hw, PHY_1000T_STATUS,
  9599. + ret_val = iegbe_oem_read_phy_reg_ex(hw, PHY_1000T_STATUS,
  9600. &phy_data);
  9601. if(ret_val) {
  9602. DEBUGOUT("Unable to read register PHY_1000T_STATUS\n");
  9603. return ret_val;
  9604. }
  9605. - phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
  9606. + phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
  9607. >> SR_1000T_LOCAL_RX_STATUS_SHIFT;
  9608. -
  9609. - phy_info->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
  9610. +
  9611. + phy_info->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
  9612. >> SR_1000T_REMOTE_RX_STATUS_SHIFT;
  9613. }
  9614. -
  9615. +
  9616. break;
  9617. default:
  9618. DEBUGOUT("Invalid PHY ID\n");
  9619. @@ -1801,7 +1617,7 @@ iegbe_oem_phy_get_info(struct iegbe_hw *
  9620. * This function will perform a software initiated reset of
  9621. * the PHY
  9622. **/
  9623. -int32_t
  9624. +int32_t
  9625. iegbe_oem_phy_hw_reset(struct iegbe_hw *hw)
  9626. {
  9627. #ifdef EXTERNAL_MDIO
  9628. @@ -1815,18 +1631,13 @@ iegbe_oem_phy_hw_reset(struct iegbe_hw *
  9629. return -1;
  9630. }
  9631. /*
  9632. - * This code pretty much copies the default case from
  9633. + * This code pretty much copies the default case from
  9634. * iegbe_phy_reset() as that is what is appropriate for
  9635. - * the M88 used in truxton.
  9636. + * the M88 used in truxton.
  9637. */
  9638. switch (hw->phy_id) {
  9639. - case BCM5395S_PHY_ID:
  9640. - DEBUGOUT("WARNING: An empty iegbe_oem_phy_hw_reset() has been called!\n");
  9641. - break;
  9642. -
  9643. case M88E1000_I_PHY_ID:
  9644. case M88E1141_E_PHY_ID:
  9645. - case BCM5481_PHY_ID:
  9646. ret_val = iegbe_oem_read_phy_reg_ex(hw, PHY_CTRL, &phy_data);
  9647. if(ret_val) {
  9648. DEBUGOUT("Unable to read register PHY_CTRL\n");
  9649. @@ -1864,7 +1675,7 @@ iegbe_oem_phy_hw_reset(struct iegbe_hw *
  9650. * to perform and post reset initialiation. Not all PHYs require
  9651. * this, which is why it was split off as a seperate function.
  9652. **/
  9653. -void
  9654. +void
  9655. iegbe_oem_phy_init_script(struct iegbe_hw *hw)
  9656. {
  9657. #ifdef EXTERNAL_MDIO
  9658. @@ -1877,19 +1688,17 @@ iegbe_oem_phy_init_script(struct iegbe_h
  9659. /* call the GCU func that can do any phy specific init
  9660. * functions after a reset
  9661. - *
  9662. + *
  9663. * Make note that the M88 phy is what'll be used on Truxton
  9664. *
  9665. - * The closest thing is in iegbe_phy_init_script, however this is
  9666. + * The closest thing is in iegbe_phy_init_script, however this is
  9667. * for the IGP style of phy. This is probably a no-op for truxton
  9668. * but may be needed by OEM's later on
  9669. - *
  9670. + *
  9671. */
  9672. switch (hw->phy_id) {
  9673. case M88E1000_I_PHY_ID:
  9674. case M88E1141_E_PHY_ID:
  9675. - case BCM5481_PHY_ID:
  9676. - case BCM5395S_PHY_ID:
  9677. DEBUGOUT("Nothing to do for OEM PHY Init");
  9678. break;
  9679. default:
  9680. @@ -1926,13 +1735,8 @@ iegbe_oem_read_phy_reg_ex(struct iegbe_h
  9681. return -1;
  9682. }
  9683. - if (hw->phy_id == BCM5395S_PHY_ID) {
  9684. - DEBUGOUT("WARNING: iegbe_oem_read_phy_reg_ex() has been unexpectedly called!\n");
  9685. - return -1;
  9686. - }
  9687. -
  9688. /* call the GCU func that will read the phy
  9689. - *
  9690. + *
  9691. * Make note that the M88 phy is what'll be used on Truxton.
  9692. *
  9693. * The closest thing is in iegbe_read_phy_reg_ex.
  9694. @@ -1940,7 +1744,7 @@ iegbe_oem_read_phy_reg_ex(struct iegbe_h
  9695. * NOTE: this is 1 (of 2) functions that is truly dependant on the
  9696. * gcu module
  9697. */
  9698. -
  9699. +
  9700. ret_val = gcu_read_eth_phy(iegbe_oem_get_phy_dev_number(hw),
  9701. reg_addr, phy_data);
  9702. if(ret_val) {
  9703. @@ -1962,10 +1766,10 @@ iegbe_oem_read_phy_reg_ex(struct iegbe_h
  9704. *
  9705. * Returns E1000_SUCCESS, negative E1000 error code on failure
  9706. *
  9707. - * This is called from iegbe_config_mac_to_phy. Various supported
  9708. + * This is called from iegbe_config_mac_to_phy. Various supported
  9709. * Phys may require the RGMII/RMII Translation gasket be set to RMII.
  9710. **/
  9711. -int32_t
  9712. +int32_t
  9713. iegbe_oem_set_trans_gasket(struct iegbe_hw *hw)
  9714. {
  9715. #ifdef EXTERNAL_MDIO
  9716. @@ -1978,17 +1782,12 @@ iegbe_oem_set_trans_gasket(struct iegbe_
  9717. }
  9718. switch (hw->phy_id) {
  9719. - case BCM5395S_PHY_ID:
  9720. - case BCM5481_PHY_ID:
  9721. - DEBUGOUT("WARNING: An empty iegbe_oem_set_trans_gasket() has been called!\n");
  9722. - break;
  9723. -
  9724. case M88E1000_I_PHY_ID:
  9725. case M88E1141_E_PHY_ID:
  9726. /* Gasket set correctly for Marvell Phys, so nothing to do */
  9727. break;
  9728. /* Add your PHY_ID here if your device requires an RMII interface
  9729. - case YOUR_PHY_ID:
  9730. + case YOUR_PHY_ID:
  9731. ctrl_aux_reg = E1000_READ_REG(hw, CTRL_AUX);
  9732. ctrl_aux_reg |= E1000_CTRL_AUX_ICP_xxxx_MII_TGS; // Set the RGMII_RMII bit
  9733. */
  9734. @@ -2032,7 +1831,7 @@ iegbe_oem_write_phy_reg_ex(struct iegbe_
  9735. return -1;
  9736. }
  9737. /* call the GCU func that will write to the phy
  9738. - *
  9739. + *
  9740. * Make note that the M88 phy is what'll be used on Truxton.
  9741. *
  9742. * The closest thing is in iegbe_write_phy_reg_ex
  9743. @@ -2062,11 +1861,11 @@ iegbe_oem_write_phy_reg_ex(struct iegbe_
  9744. * @hw struct iegbe_hw hardware specific data
  9745. *
  9746. * iegbe_reset_hw is called to reset the MAC. If, for
  9747. - * some reason the PHY needs to be reset as well, this
  9748. + * some reason the PHY needs to be reset as well, this
  9749. * should return TRUE and then iegbe_oem_phy_hw_reset()
  9750. * will be called.
  9751. **/
  9752. -int
  9753. +int
  9754. iegbe_oem_phy_needs_reset_with_mac(struct iegbe_hw *hw)
  9755. {
  9756. #ifdef EXTERNAL_MDIO
  9757. @@ -2079,16 +1878,14 @@ iegbe_oem_phy_needs_reset_with_mac(struc
  9758. return FALSE;
  9759. }
  9760. - /*
  9761. + /*
  9762. * From the original iegbe driver, the M88
  9763. - * PHYs did not seem to need this reset,
  9764. + * PHYs did not seem to need this reset,
  9765. * so returning FALSE.
  9766. */
  9767. switch (hw->phy_id) {
  9768. case M88E1000_I_PHY_ID:
  9769. case M88E1141_E_PHY_ID:
  9770. - case BCM5481_PHY_ID:
  9771. - case BCM5395S_PHY_ID:
  9772. ret_val = FALSE;
  9773. break;
  9774. default:
  9775. @@ -2116,7 +1913,7 @@ iegbe_oem_phy_needs_reset_with_mac(struc
  9776. * tweaking of the PHY, for PHYs that support a DSP.
  9777. *
  9778. **/
  9779. -int32_t
  9780. +int32_t
  9781. iegbe_oem_config_dsp_after_link_change(struct iegbe_hw *hw,
  9782. int link_up)
  9783. {
  9784. @@ -2138,8 +1935,6 @@ iegbe_oem_config_dsp_after_link_change(s
  9785. switch (hw->phy_id) {
  9786. case M88E1000_I_PHY_ID:
  9787. case M88E1141_E_PHY_ID:
  9788. - case BCM5481_PHY_ID:
  9789. - case BCM5395S_PHY_ID:
  9790. DEBUGOUT("No DSP to configure on OEM PHY");
  9791. break;
  9792. default:
  9793. @@ -2165,7 +1960,7 @@ iegbe_oem_config_dsp_after_link_change(s
  9794. *
  9795. *
  9796. **/
  9797. -int32_t
  9798. +int32_t
  9799. iegbe_oem_get_cable_length(struct iegbe_hw *hw,
  9800. uint16_t *min_length,
  9801. uint16_t *max_length)
  9802. @@ -2177,21 +1972,15 @@ iegbe_oem_get_cable_length(struct iegbe_
  9803. uint16_t phy_data;
  9804. DEBUGFUNC1("%s",__func__);
  9805. -
  9806. +
  9807. if(!hw || !min_length || !max_length) {
  9808. return -1;
  9809. }
  9810. switch (hw->phy_id) {
  9811. - case BCM5395S_PHY_ID:
  9812. - case BCM5481_PHY_ID:
  9813. - *min_length = 0;
  9814. - *max_length = iegbe_igp_cable_length_150;
  9815. - break;
  9816. -
  9817. case M88E1000_I_PHY_ID:
  9818. case M88E1141_E_PHY_ID:
  9819. - ret_val = iegbe_oem_read_phy_reg_ex(hw,
  9820. + ret_val = iegbe_oem_read_phy_reg_ex(hw,
  9821. M88E1000_PHY_SPEC_STATUS,
  9822. &phy_data);
  9823. if(ret_val) {
  9824. @@ -2246,13 +2035,13 @@ iegbe_oem_get_cable_length(struct iegbe_
  9825. /**
  9826. * iegbe_oem_phy_is_link_up
  9827. * @hw iegbe_hw struct containing device specific information
  9828. - * @isUp a boolean returning true if link is up
  9829. + * @isUp a boolean returning true if link is up
  9830. *
  9831. * This is called as part of iegbe_config_mac_to_phy() to align
  9832. * the MAC with the PHY. It turns out on ICP_XXXX, this is not
  9833. * done automagically.
  9834. **/
  9835. -int32_t
  9836. +int32_t
  9837. iegbe_oem_phy_is_link_up(struct iegbe_hw *hw, int *isUp)
  9838. {
  9839. #ifdef EXTERNAL_MDIO
  9840. @@ -2266,35 +2055,19 @@ iegbe_oem_phy_is_link_up(struct iegbe_hw
  9841. if(!hw || !isUp) {
  9842. return -1;
  9843. }
  9844. - /*
  9845. + /*
  9846. * Make note that the M88 phy is what'll be used on Truxton
  9847. * see iegbe_config_mac_to_phy
  9848. */
  9849. switch (hw->phy_id) {
  9850. - case BCM5395S_PHY_ID:
  9851. - /* Link always up */
  9852. - *isUp = TRUE;
  9853. - return E1000_SUCCESS;
  9854. - break;
  9855. -
  9856. - case BCM5481_PHY_ID:
  9857. - iegbe_oem_read_phy_reg_ex(hw, BCM5481_ESTAT, &phy_data);
  9858. - ret_val = iegbe_oem_read_phy_reg_ex(hw, BCM5481_ESTAT, &phy_data);
  9859. - if(ret_val)
  9860. - {
  9861. - DEBUGOUT("Unable to read PHY register BCM5481_ESTAT\n");
  9862. - return ret_val;
  9863. - }
  9864. - statusMask = BCM5481_ESTAT_LINK;
  9865. - break;
  9866. -
  9867. - case M88E1000_I_PHY_ID:
  9868. + case M88E1000_I_PHY_ID:
  9869. case M88E1141_E_PHY_ID:
  9870. - iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
  9871. - ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
  9872. + iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
  9873. + ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS,
  9874. + &phy_data);
  9875. statusMask = M88E1000_PSSR_LINK;
  9876. - break;
  9877. + break;
  9878. default:
  9879. DEBUGOUT("Invalid PHY ID\n");
  9880. return -E1000_ERR_PHY_TYPE;
  9881. @@ -2319,213 +2092,3 @@ iegbe_oem_phy_is_link_up(struct iegbe_hw
  9882. #endif /* ifdef EXTERNAL_MDIO */
  9883. }
  9884. -
  9885. -
  9886. -//-----
  9887. -// Read BCM5481 expansion register
  9888. -//
  9889. -int32_t
  9890. -bcm5481_read_ex (struct iegbe_hw *hw, uint16_t reg, uint16_t *data)
  9891. -{
  9892. - int ret;
  9893. - uint16_t selector;
  9894. - uint16_t reg_data;
  9895. -
  9896. - // Get the current value of bits 15:12
  9897. - ret = iegbe_oem_read_phy_reg_ex (hw, 0x15, &selector);
  9898. - if (ret)
  9899. - return ret;
  9900. -
  9901. - // Select the expansion register
  9902. - selector &= 0xf000;
  9903. - selector |= (0xf << 8) | (reg);
  9904. - iegbe_oem_write_phy_reg_ex (hw, 0x17, selector);
  9905. -
  9906. - // Read the expansion register
  9907. - ret = iegbe_oem_read_phy_reg_ex (hw, 0x15, &reg_data);
  9908. -
  9909. - // De-select the expansion registers.
  9910. - selector &= 0xf000;
  9911. - iegbe_oem_write_phy_reg_ex (hw, 0x17, selector);
  9912. -
  9913. - if (ret)
  9914. - return ret;
  9915. -
  9916. - *data = reg_data;
  9917. - return ret;
  9918. -}
  9919. -
  9920. -//-----
  9921. -// Read reg 0x18 sub-register
  9922. -//
  9923. -static int32_t
  9924. -bcm5481_read_18sv (struct iegbe_hw *hw, int sv, uint16_t *data)
  9925. -{
  9926. - int ret;
  9927. - uint16_t tmp_data;
  9928. -
  9929. - // Select reg 0x18, sv
  9930. - tmp_data = ((sv & BCM5481_R18H_SV_MASK) << 12) | BCM5481_R18H_SV_MCTRL;
  9931. - ret = iegbe_oem_write_phy_reg_ex (hw, BCM5481_R18H, tmp_data);
  9932. - if(ret)
  9933. - return ret;
  9934. -
  9935. - // Read reg 0x18, sv
  9936. - ret = iegbe_oem_read_phy_reg_ex (hw, BCM5481_R18H, &tmp_data);
  9937. - if(ret)
  9938. - return ret;
  9939. -
  9940. - *data = tmp_data;
  9941. - return ret;
  9942. -}
  9943. -
  9944. -//-----
  9945. -// Read reg 0x1C sub-register
  9946. -//
  9947. -int32_t
  9948. -bcm5481_read_1csv (struct iegbe_hw *hw, int sv, uint16_t *data)
  9949. -{
  9950. - int ret;
  9951. - uint16_t tmp_data;
  9952. -
  9953. - // Select reg 0x1c, sv
  9954. - tmp_data = ((sv & BCM5481_R1CH_SV_MASK) << BCM5481_R1CH_SV_SHIFT);
  9955. -
  9956. - ret = iegbe_oem_write_phy_reg_ex (hw, BCM5481_R1CH, tmp_data);
  9957. - if(ret)
  9958. - return ret;
  9959. -
  9960. - // Read reg 0x1c, sv
  9961. - ret = iegbe_oem_read_phy_reg_ex (hw, BCM5481_R1CH, &tmp_data);
  9962. - if(ret)
  9963. - return ret;
  9964. -
  9965. - *data = tmp_data;
  9966. - return ret;
  9967. -}
  9968. -
  9969. -//-----
  9970. -// Read-modify-write a 0x1C register.
  9971. -//
  9972. -// hw - hardware access info.
  9973. -// reg - 0x1C register to modify.
  9974. -// data - bits which should be set.
  9975. -// mask - the '1' bits in this argument will be cleared in the data
  9976. -// read from 'reg' then 'data' will be or'd in and the result
  9977. -// will be written to 'reg'.
  9978. -
  9979. -int32_t
  9980. -bcm5481_rmw_1csv (struct iegbe_hw *hw, uint16_t reg, uint16_t data, uint16_t mask)
  9981. -{
  9982. - int32_t ret;
  9983. - uint16_t reg_data;
  9984. -
  9985. - ret = 0;
  9986. -
  9987. - ret = bcm5481_read_1csv (hw, reg, &reg_data);
  9988. - if (ret)
  9989. - {
  9990. - DEBUGOUT("Unable to read BCM5481 1CH register\n");
  9991. - printk (KERN_ERR "Unable to read BCM5481 1CH register [0x%x]\n", reg);
  9992. - return ret;
  9993. - }
  9994. -
  9995. - reg_data &= ~mask;
  9996. - reg_data |= (BCM5481_R1CH_WE | data);
  9997. -
  9998. - ret = iegbe_oem_write_phy_reg_ex (hw, BCM5481_R1CH, reg_data);
  9999. - if(ret)
  10000. - {
  10001. - DEBUGOUT("Unable to write BCM5481 1CH register\n");
  10002. - printk (KERN_ERR "Unable to write BCM5481 1CH register\n");
  10003. - return ret;
  10004. - }
  10005. -
  10006. - return ret;
  10007. -}
  10008. -
  10009. -int32_t
  10010. -oi_phy_setup (struct iegbe_hw *hw)
  10011. -{
  10012. - int ret;
  10013. - uint16_t pmii_data;
  10014. - uint16_t mctrl_data;
  10015. - uint16_t cacr_data;
  10016. - uint16_t sc1_data;
  10017. - uint16_t lctl_data;
  10018. -
  10019. - ret = 0;
  10020. -
  10021. - // Set low power mode via reg 0x18, sv010, bit 6
  10022. - // Do a read-modify-write on reg 0x18, sv010 register to preserve existing bits.
  10023. - ret = bcm5481_read_18sv (hw, BCM5481_R18H_SV_PMII, &pmii_data);
  10024. - if (ret)
  10025. - {
  10026. - DEBUGOUT("Unable to read BCM5481_R18H_SV_PMII register\n");
  10027. - printk (KERN_ERR "Unable to read BCM5481_R18H_SV_PMII register\n");
  10028. - return ret;
  10029. - }
  10030. -
  10031. - // Set the LPM bit in the data just read and write back to sv010
  10032. - // The shadow register select bits [2:0] are set by reading the sv010
  10033. - // register.
  10034. - pmii_data |= BCM5481_R18H_SV010_LPM;
  10035. - ret = iegbe_oem_write_phy_reg_ex (hw, BCM5481_R18H, pmii_data);
  10036. - if(ret)
  10037. - {
  10038. - DEBUGOUT("Unable to write BCM5481_R18H register\n");
  10039. - printk (KERN_ERR "Unable to write BCM5481_R18H register\n");
  10040. - return ret;
  10041. - }
  10042. -
  10043. -
  10044. - // Set the RGMII RXD to RXC skew bit in reg 0x18, sv111
  10045. -
  10046. - if (bcm5481_read_18sv (hw, BCM5481_R18H_SV_MCTRL, &mctrl_data))
  10047. - {
  10048. - DEBUGOUT("Unable to read BCM5481_R18H_SV_MCTRL register\n");
  10049. - printk (KERN_ERR "Unable to read BCM5481_R18H_SV_MCTRL register\n");
  10050. - return ret;
  10051. - }
  10052. - mctrl_data |= (BCM5481_R18H_WE | BCM5481_R18H_SV111_SKEW);
  10053. -
  10054. - ret = iegbe_oem_write_phy_reg_ex (hw, BCM5481_R18H, mctrl_data);
  10055. - if(ret)
  10056. - {
  10057. - DEBUGOUT("Unable to write BCM5481_R18H register\n");
  10058. - printk (KERN_ERR "Unable to write BCM5481_R18H register\n");
  10059. - return ret;
  10060. - }
  10061. -
  10062. -
  10063. - // Enable RGMII transmit clock delay in reg 0x1c, sv00011
  10064. - ret = bcm5481_read_1csv (hw, BCM5481_R1CH_CACR, &cacr_data);
  10065. - if (ret)
  10066. - {
  10067. - DEBUGOUT("Unable to read BCM5481_R1CH_CACR register\n");
  10068. - printk (KERN_ERR "Unable to read BCM5481_R1CH_CACR register\n");
  10069. - return ret;
  10070. - }
  10071. -
  10072. - cacr_data |= (BCM5481_R1CH_WE | BCM5481_R1CH_CACR_TCD);
  10073. -
  10074. - ret = iegbe_oem_write_phy_reg_ex (hw, BCM5481_R1CH, cacr_data);
  10075. - if(ret)
  10076. - {
  10077. - DEBUGOUT("Unable to write BCM5481_R1CH register\n");
  10078. - printk (KERN_ERR "Unable to write BCM5481_R1CH register\n");
  10079. - return ret;
  10080. - }
  10081. -
  10082. - // Enable dual link speed indication (0x1c, sv 00010, bit 2)
  10083. - ret = bcm5481_rmw_1csv (hw, BCM5481_R1CH_SC1, BCM5481_R1CH_SC1_LINK, BCM5481_R1CH_SC1_LINK);
  10084. - if (ret)
  10085. - return ret;
  10086. -
  10087. - // Enable link and activity on ACTIVITY LED (0x1c, sv 01001, bit 4=1, bit 3=0)
  10088. - ret = bcm5481_rmw_1csv (hw, BCM5481_R1CH_LCTRL, BCM5481_R1CH_LCTRL_ALEN, BCM5481_R1CH_LCTRL_ALEN | BCM5481_R1CH_LCTRL_AEN);
  10089. - if (ret)
  10090. - return ret;
  10091. -
  10092. - return ret;
  10093. -}
  10094. --- a/Embedded/src/GbE/iegbe_oem_phy.h
  10095. +++ b/Embedded/src/GbE/iegbe_oem_phy.h
  10096. @@ -2,31 +2,31 @@
  10097. GPL LICENSE SUMMARY
  10098. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  10099. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  10100. - This program is free software; you can redistribute it and/or modify
  10101. + This program is free software; you can redistribute it and/or modify
  10102. it under the terms of version 2 of the GNU General Public License as
  10103. published by the Free Software Foundation.
  10104. - This program is distributed in the hope that it will be useful, but
  10105. - WITHOUT ANY WARRANTY; without even the implied warranty of
  10106. - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10107. + This program is distributed in the hope that it will be useful, but
  10108. + WITHOUT ANY WARRANTY; without even the implied warranty of
  10109. + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10110. General Public License for more details.
  10111. - You should have received a copy of the GNU General Public License
  10112. - along with this program; if not, write to the Free Software
  10113. + You should have received a copy of the GNU General Public License
  10114. + along with this program; if not, write to the Free Software
  10115. Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  10116. - The full GNU General Public License is included in this distribution
  10117. + The full GNU General Public License is included in this distribution
  10118. in the file called LICENSE.GPL.
  10119. Contact Information:
  10120. Intel Corporation
  10121. - version: Embedded.L.1.0.34
  10122. + version: Embedded.Release.Patch.L.1.0.7-5
  10123. Contact Information:
  10124. -
  10125. - Intel Corporation, 5000 W Chandler Blvd, Chandler, AZ 85226
  10126. +
  10127. + Intel Corporation, 5000 W Chandler Blvd, Chandler, AZ 85226
  10128. *******************************************************************************/
  10129. #ifndef _IEGBE_OEM_PHY_H_
  10130. @@ -45,10 +45,10 @@ int32_t iegbe_oem_set_trans_gasket(struc
  10131. uint32_t iegbe_oem_get_tipg(struct iegbe_hw *hw);
  10132. int iegbe_oem_phy_is_copper(struct iegbe_hw *hw);
  10133. uint32_t iegbe_oem_get_phy_dev_number(struct iegbe_hw *hw);
  10134. -int iegbe_oem_mii_ioctl(struct iegbe_adapter *adapter, unsigned long flags,
  10135. +int iegbe_oem_mii_ioctl(struct iegbe_adapter *adapter, unsigned long flags,
  10136. struct ifreq *ifr, int cmd);
  10137. void iegbe_oem_fiber_live_in_suspend(struct iegbe_hw *hw);
  10138. -void iegbe_oem_get_phy_regs(struct iegbe_adapter *adapter, uint32_t *data,
  10139. +void iegbe_oem_get_phy_regs(struct iegbe_adapter *adapter, uint32_t *data,
  10140. uint32_t data_length);
  10141. int iegbe_oem_phy_loopback(struct iegbe_adapter *adapter);
  10142. void iegbe_oem_loopback_cleanup(struct iegbe_adapter *adapter);
  10143. @@ -94,81 +94,14 @@ int32_t iegbe_oem_phy_is_link_up(struct
  10144. #define ICP_XXXX_MAC_2 2
  10145. #define DEFAULT_ICP_XXXX_TIPG_IPGT 8 /* Inter Packet Gap Transmit Time */
  10146. -#define ICP_XXXX_TIPG_IPGT_MASK 0x000003FFUL
  10147. -#define BCM5481_PHY_ID 0x0143BCA2
  10148. -#define BCM5395S_PHY_ID 0x0143BCF0
  10149. +#define ICP_XXXX_TIPG_IPGT_MASK 0x000003FFUL
  10150. /* Miscellaneous defines */
  10151. #ifdef IEGBE_10_100_ONLY
  10152. - #define ICP_XXXX_AUTONEG_ADV_DEFAULT 0x0F
  10153. + #define ICP_XXXX_AUTONEG_ADV_DEFAULT 0x0F
  10154. #else
  10155. #define ICP_XXXX_AUTONEG_ADV_DEFAULT 0x2F
  10156. #endif
  10157. -//-----
  10158. -// BCM5481 specifics
  10159. -
  10160. -#define BCM5481_ECTRL (0x10)
  10161. -#define BCM5481_ESTAT (0x11)
  10162. -#define BCM5481_RXERR (0x12)
  10163. -#define BCM5481_EXPRW (0x15)
  10164. -#define BCM5481_EXPACC (0x17)
  10165. -#define BCM5481_ASTAT (0x19)
  10166. -#define BCM5481_R18H (0x18)
  10167. -#define BCM5481_R1CH (0x1c)
  10168. -
  10169. -//-----
  10170. -// indirect register access via register 18h
  10171. -
  10172. -#define BCM5481_R18H_SV_MASK (7) // Mask for SV bits.
  10173. -#define BCM5481_R18H_SV_ACTRL (0) // SV000 Aux. control
  10174. -#define BCM5481_R18H_SV_10BT (1) // SV001 10Base-T
  10175. -#define BCM5481_R18H_SV_PMII (2) // SV010 Power/MII control
  10176. -#define BCM5481_R18H_SV_MTEST (4) // SV100 Misc. test
  10177. -#define BCM5481_R18H_SV_MCTRL (7) // SV111 Misc. control
  10178. -
  10179. -#define BCM5481_R18H_SV001_POL (1 << 13) // Polarity
  10180. -#define BCM5481_R18H_SV010_LPM (1 << 6)
  10181. -#define BCM5481_R18H_SV111_SKEW (1 << 8)
  10182. -#define BCM5481_R18H_WE (1 << 15) // Write enable
  10183. -
  10184. -// 0x1c registers
  10185. -#define BCM5481_R1CH_SV_SHIFT (10)
  10186. -#define BCM5481_R1CH_SV_MASK (0x1f)
  10187. -#define BCM5481_R1CH_SC1 (0x02) // sv00010 Spare control 1
  10188. -#define BCM5481_R1CH_CACR (0x03) // sv00011 Clock alignment control
  10189. -#define BCM5481_R1CH_LCTRL (0x09) // sv01001 LED control
  10190. -#define BCM5481_R1CH_LEDS1 (0x0d) // sv01101 LED selector 1
  10191. -
  10192. -// 0x1c common
  10193. -#define BCM5481_R1CH_WE (1 << 15) // Write enable
  10194. -
  10195. -// 0x1c, sv 00010
  10196. -#define BCM5481_R1CH_SC1_LINK (1 << 2) // sv00010 Linkspeed
  10197. -
  10198. -// 0x1c, sv 00011
  10199. -#define BCM5481_R1CH_CACR_TCD (1 << 9) // sv00011 RGMII tx clock delay
  10200. -
  10201. -// 0x1c, sv 01001
  10202. -#define BCM5481_R1CH_LCTRL_ALEN (1 << 4) // Activity/Link enable on ACTIVITY LED
  10203. -#define BCM5481_R1CH_LCTRL_AEN (1 << 3) // Activity enable on ACTIVITY LED
  10204. -
  10205. -
  10206. -#define BCM5481_ECTRL_DISMDIX (1 <<14)
  10207. -
  10208. -#define BCM5481_MCTRL_AUTOMDIX (1 <<9)
  10209. -
  10210. -#define BCM5481_ESTAT_LINK (1 << 8)
  10211. -
  10212. -#define BCM5481_ASTAT_ANC (1 << 15)
  10213. -#define BCM5481_ASTAT_ANHCD (7 << 8)
  10214. -#define BCM5481_ASTAT_HCD(x) ((x >> 8) & 7)
  10215. -#define BCM5481_ASTAT_1KBTFD (0x7)
  10216. -#define BCM5481_ASTAT_1KBTHD (0x6)
  10217. -#define BCM5481_ASTAT_100BTXFD (0x5)
  10218. -#define BCM5481_ASTAT_100BTXHD (0x3)
  10219. -
  10220. -// end BCM5481 specifics
  10221. -
  10222. #endif /* ifndef _IEGBE_OEM_PHY_H_ */
  10223. -
  10224. +
  10225. --- a/Embedded/src/GbE/iegbe_osdep.h
  10226. +++ b/Embedded/src/GbE/iegbe_osdep.h
  10227. @@ -2,7 +2,7 @@
  10228. GPL LICENSE SUMMARY
  10229. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  10230. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  10231. This program is free software; you can redistribute it and/or modify
  10232. it under the terms of version 2 of the GNU General Public License as
  10233. @@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
  10234. Contact Information:
  10235. Intel Corporation
  10236. - version: Embedded.L.1.0.34
  10237. + version: Embedded.Release.Patch.L.1.0.7-5
  10238. Contact Information:
  10239. --- a/Embedded/src/GbE/iegbe_param.c
  10240. +++ b/Embedded/src/GbE/iegbe_param.c
  10241. @@ -2,7 +2,7 @@
  10242. GPL LICENSE SUMMARY
  10243. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  10244. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  10245. This program is free software; you can redistribute it and/or modify
  10246. it under the terms of version 2 of the GNU General Public License as
  10247. @@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
  10248. Contact Information:
  10249. Intel Corporation
  10250. - version: Embedded.L.1.0.34
  10251. + version: Embedded.Release.Patch.L.1.0.7-5
  10252. Contact Information:
  10253. @@ -239,11 +239,7 @@ E1000_PARAM(InterruptThrottleRate, "Inte
  10254. #define MAX_TXABSDELAY 0xFFFF
  10255. #define MIN_TXABSDELAY 0
  10256. -#ifdef IEGBE_GBE_WORKAROUND
  10257. -#define DEFAULT_ITR 0
  10258. -#else
  10259. #define DEFAULT_ITR 8000
  10260. -#endif
  10261. #define MAX_ITR 100000
  10262. @@ -373,7 +369,7 @@ iegbe_check_options(struct iegbe_adapter
  10263. tx_ring->count = opt.def;
  10264. }
  10265. #endif
  10266. - for (i = 0; i < adapter->num_queues; i++)
  10267. + for (i = 0; i < adapter->num_tx_queues; i++)
  10268. tx_ring[i].count = tx_ring->count;
  10269. }
  10270. { /* Receive Descriptor Count */
  10271. @@ -403,7 +399,7 @@ iegbe_check_options(struct iegbe_adapter
  10272. rx_ring->count = opt.def;
  10273. }
  10274. #endif
  10275. - for (i = 0; i < adapter->num_queues; i++)
  10276. + for (i = 0; i < adapter->num_rx_queues; i++)
  10277. rx_ring[i].count = rx_ring->count;
  10278. }
  10279. { /* Checksum Offload Enable/Disable */
  10280. --- a/Embedded/src/GbE/kcompat.c
  10281. +++ b/Embedded/src/GbE/kcompat.c
  10282. @@ -1,8 +1,8 @@
  10283. -/************************************************************
  10284. -
  10285. +/************************************************************
  10286. +
  10287. GPL LICENSE SUMMARY
  10288. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  10289. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  10290. This program is free software; you can redistribute it and/or modify
  10291. it under the terms of version 2 of the GNU General Public License as
  10292. @@ -22,183 +22,192 @@ GPL LICENSE SUMMARY
  10293. Contact Information:
  10294. Intel Corporation
  10295. - version: Embedded.L.1.0.34
  10296. -
  10297. - Contact Information:
  10298. -
  10299. - Intel Corporation, 5000 W Chandler Blvd, Chandler, AZ 85226
  10300. -
  10301. -**************************************************************/
  10302. -/**************************************************************************
  10303. - * @ingroup KCOMPAT_GENERAL
  10304. - *
  10305. - * @file kcompat.c
  10306. - *
  10307. - * @description
  10308. - *
  10309. - *
  10310. - **************************************************************************/
  10311. -#include "kcompat.h"
  10312. -
  10313. -/*************************************************************/
  10314. -/* 2.4.13 => 2.4.3 */
  10315. -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(0x2,0x4,0xd) )
  10316. -
  10317. -/**************************************/
  10318. -/* PCI DMA MAPPING */
  10319. -
  10320. -#if defined(CONFIG_HIGHMEM)
  10321. -
  10322. -#ifndef PCI_DRAM_OFFSET
  10323. -#define PCI_DRAM_OFFSET 0
  10324. -#endif
  10325. -
  10326. -u64 _kc_pci_map_page(struct pci_dev *dev,
  10327. - struct page *page,
  10328. - unsigned long offset,
  10329. - size_t size,
  10330. - int direction)
  10331. -{
  10332. - u64 ret_val;
  10333. - ret_val = (((u64)(page - mem_map) << PAGE_SHIFT) + offset +
  10334. - PCI_DRAM_OFFSET);
  10335. - return ret_val;
  10336. -}
  10337. -
  10338. -#else /* CONFIG_HIGHMEM */
  10339. -
  10340. -u64 _kc_pci_map_page(struct pci_dev *dev,
  10341. - struct page *page,
  10342. - unsigned long offset,
  10343. - size_t size,
  10344. - int direction)
  10345. -{
  10346. - return pci_map_single(dev, (void *)page_address(page) + offset,
  10347. - size, direction);
  10348. -}
  10349. -
  10350. -#endif /* CONFIG_HIGHMEM */
  10351. -
  10352. -void _kc_pci_unmap_page(struct pci_dev *dev,
  10353. - u64 dma_addr,
  10354. - size_t size,
  10355. - int direction)
  10356. -{
  10357. - return pci_unmap_single(dev, dma_addr, size, direction);
  10358. -}
  10359. -
  10360. -#endif /* 2.4.13 => 2.4.3 */
  10361. -
  10362. -
  10363. -/*****************************************************************************/
  10364. -/* 2.4.3 => 2.4.0 */
  10365. -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(0x2,0x4,0x3) )
  10366. -
  10367. -/**************************************/
  10368. -/* PCI DRIVER API */
  10369. -
  10370. -int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
  10371. -{
  10372. - if(!pci_dma_supported(dev, mask)) {
  10373. - return -EIO;
  10374. - }
  10375. - dev->dma_mask = mask;
  10376. - return 0;
  10377. -}
  10378. -
  10379. -int _kc_pci_request_regions(struct pci_dev *dev, char *res_name)
  10380. -{
  10381. - int i;
  10382. -
  10383. - for (i = 0; i < 0x6; i++) {
  10384. - if (pci_resource_len(dev, i) == 0) {
  10385. - continue;
  10386. - }
  10387. - if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
  10388. - if (!request_region(pci_resource_start(dev, i),
  10389. - pci_resource_len(dev, i), res_name)) {
  10390. - pci_release_regions(dev);
  10391. - return -EBUSY;
  10392. - }
  10393. - } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
  10394. - if (!request_mem_region(pci_resource_start(dev, i),
  10395. - pci_resource_len(dev, i),
  10396. - res_name)) {
  10397. - pci_release_regions(dev);
  10398. - return -EBUSY;
  10399. - }
  10400. - }
  10401. - }
  10402. - return 0;
  10403. -}
  10404. -
  10405. -void _kc_pci_release_regions(struct pci_dev *dev)
  10406. -{
  10407. - int i;
  10408. -
  10409. - for (i = 0; i < 0x6; i++) {
  10410. - if (pci_resource_len(dev, i) == 0) {
  10411. - continue;
  10412. - }
  10413. - if (pci_resource_flags(dev, i) & IORESOURCE_IO){
  10414. - release_region(pci_resource_start(dev, i),
  10415. - pci_resource_len(dev, i));
  10416. - } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
  10417. - release_mem_region(pci_resource_start(dev, i),
  10418. - pci_resource_len(dev, i));
  10419. - }
  10420. - }
  10421. -}
  10422. -
  10423. -/**************************************/
  10424. -/* NETWORK DRIVER API */
  10425. -
  10426. -struct net_device * _kc_alloc_etherdev(int sizeof_priv)
  10427. -{
  10428. - struct net_device *dev;
  10429. - int alloc_size;
  10430. -
  10431. - alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 0x1f;
  10432. -
  10433. - dev = kmalloc(alloc_size, GFP_KERNEL);
  10434. -
  10435. - if (!dev) { return NULL; }
  10436. -
  10437. - memset(dev, 0, alloc_size);
  10438. -
  10439. - if (sizeof_priv) {
  10440. - dev->priv = (void *) (((unsigned long)(dev + 1) + 0x1f) & ~0x1f);
  10441. - }
  10442. - dev->name[0] = '\0';
  10443. -
  10444. - ether_setup(dev);
  10445. -
  10446. - return dev;
  10447. -}
  10448. -
  10449. -int _kc_is_valid_ether_addr(u8 *addr)
  10450. -{
  10451. - const char zaddr[0x6] = {0,};
  10452. -
  10453. - return !(addr[0]&1) && memcmp( addr, zaddr, 0x6);
  10454. -}
  10455. -
  10456. -#endif /* 2.4.3 => 2.4.0 */
  10457. -
  10458. -
  10459. -/*****************************************************************/
  10460. -/* 2.4.6 => 2.4.3 */
  10461. -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(0x2,0x4,0x6) )
  10462. -
  10463. -int _kc_pci_set_power_state(struct pci_dev *dev, int state)
  10464. -{ return 0; }
  10465. -int _kc_pci_save_state(struct pci_dev *dev, u32 *buffer)
  10466. -{ return 0; }
  10467. -int _kc_pci_restore_state(struct pci_dev *pdev, u32 *buffer)
  10468. -{ return 0; }
  10469. -int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
  10470. -{ return 0; }
  10471. -
  10472. -#endif /* 2.4.6 => 2.4.3 */
  10473. -
  10474. -
  10475. + version: Embedded.Release.Patch.L.1.0.7-5
  10476. +
  10477. + Contact Information:
  10478. +
  10479. + Intel Corporation, 5000 W Chandler Blvd, Chandler, AZ 85226
  10480. +
  10481. +**************************************************************/
  10482. +/**************************************************************************
  10483. + * @ingroup KCOMPAT_GENERAL
  10484. + *
  10485. + * @file kcompat.c
  10486. + *
  10487. + * @description
  10488. + *
  10489. + *
  10490. + **************************************************************************/
  10491. +#include "kcompat.h"
  10492. +
  10493. +/*************************************************************/
  10494. +/* 2.4.13 => 2.4.3 */
  10495. +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(0x2,0x4,0xd) )
  10496. +
  10497. +/**************************************/
  10498. +/* PCI DMA MAPPING */
  10499. +
  10500. +#if defined(CONFIG_HIGHMEM)
  10501. +
  10502. +#ifndef PCI_DRAM_OFFSET
  10503. +#define PCI_DRAM_OFFSET 0
  10504. +#endif
  10505. +
  10506. +u64 _kc_pci_map_page(struct pci_dev *dev,
  10507. + struct page *page,
  10508. + unsigned long offset,
  10509. + size_t size,
  10510. + int direction)
  10511. +{
  10512. + u64 ret_val;
  10513. + ret_val = (((u64)(page - mem_map) << PAGE_SHIFT) + offset +
  10514. + PCI_DRAM_OFFSET);
  10515. + return ret_val;
  10516. +}
  10517. +
  10518. +#else /* CONFIG_HIGHMEM */
  10519. +
  10520. +u64 _kc_pci_map_page(struct pci_dev *dev,
  10521. + struct page *page,
  10522. + unsigned long offset,
  10523. + size_t size,
  10524. + int direction)
  10525. +{
  10526. + return pci_map_single(dev, (void *)page_address(page) + offset,
  10527. + size, direction);
  10528. +}
  10529. +
  10530. +#endif /* CONFIG_HIGHMEM */
  10531. +
  10532. +void _kc_pci_unmap_page(struct pci_dev *dev,
  10533. + u64 dma_addr,
  10534. + size_t size,
  10535. + int direction)
  10536. +{
  10537. + return pci_unmap_single(dev, dma_addr, size, direction);
  10538. +}
  10539. +
  10540. +#endif /* 2.4.13 => 2.4.3 */
  10541. +
  10542. +
  10543. +/*****************************************************************************/
  10544. +/* 2.4.3 => 2.4.0 */
  10545. +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(0x2,0x4,0x3) )
  10546. +
  10547. +/**************************************/
  10548. +/* PCI DRIVER API */
  10549. +
  10550. +int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
  10551. +{
  10552. + if(!pci_dma_supported(dev, mask)) {
  10553. + return -EIO;
  10554. + }
  10555. + dev->dma_mask = mask;
  10556. + return 0;
  10557. +}
  10558. +
  10559. +int _kc_pci_request_regions(struct pci_dev *dev, char *res_name)
  10560. +{
  10561. + int i;
  10562. +
  10563. + for (i = 0; i < 0x6; i++) {
  10564. + if (pci_resource_len(dev, i) == 0) {
  10565. + continue;
  10566. + }
  10567. + if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
  10568. + if (!request_region(pci_resource_start(dev, i),
  10569. + pci_resource_len(dev, i), res_name)) {
  10570. + pci_release_regions(dev);
  10571. + return -EBUSY;
  10572. + }
  10573. + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
  10574. + if (!request_mem_region(pci_resource_start(dev, i),
  10575. + pci_resource_len(dev, i),
  10576. + res_name)) {
  10577. + pci_release_regions(dev);
  10578. + return -EBUSY;
  10579. + }
  10580. + }
  10581. + }
  10582. + return 0;
  10583. +}
  10584. +
  10585. +void _kc_pci_release_regions(struct pci_dev *dev)
  10586. +{
  10587. + int i;
  10588. +
  10589. + for (i = 0; i < 0x6; i++) {
  10590. + if (pci_resource_len(dev, i) == 0) {
  10591. + continue;
  10592. + }
  10593. + if (pci_resource_flags(dev, i) & IORESOURCE_IO){
  10594. + release_region(pci_resource_start(dev, i),
  10595. + pci_resource_len(dev, i));
  10596. + } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
  10597. + release_mem_region(pci_resource_start(dev, i),
  10598. + pci_resource_len(dev, i));
  10599. + }
  10600. + }
  10601. +}
  10602. +
  10603. +/**************************************/
  10604. +/* NETWORK DRIVER API */
  10605. +
  10606. +struct net_device * _kc_alloc_etherdev(int sizeof_priv)
  10607. +{
  10608. + struct net_device *dev;
  10609. + int alloc_size;
  10610. +
  10611. + alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 0x1f;
  10612. +
  10613. + dev = kmalloc(alloc_size, GFP_KERNEL);
  10614. +
  10615. + if (!dev) { return NULL; }
  10616. +
  10617. + memset(dev, 0, alloc_size);
  10618. +
  10619. + if (sizeof_priv) {
  10620. + dev->priv = (void *) (((unsigned long)(dev + 1) + 0x1f) & ~0x1f);
  10621. + }
  10622. + dev->name[0] = '\0';
  10623. +
  10624. + ether_setup(dev);
  10625. +
  10626. + return dev;
  10627. +}
  10628. +
  10629. +int _kc_is_valid_ether_addr(u8 *addr)
  10630. +{
  10631. + const char zaddr[0x6] = {0,};
  10632. +
  10633. + return !(addr[0]&1) && memcmp( addr, zaddr, 0x6);
  10634. +}
  10635. +
  10636. +#endif /* 2.4.3 => 2.4.0 */
  10637. +
  10638. +
  10639. +/*****************************************************************/
  10640. +/* 2.4.6 => 2.4.3 */
  10641. +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(0x2,0x4,0x6) )
  10642. +
  10643. +int _kc_pci_set_power_state(struct pci_dev *dev, int state)
  10644. +{ return 0; }
  10645. +int _kc_pci_save_state(struct pci_dev *dev, u32 *buffer)
  10646. +{ return 0; }
  10647. +int _kc_pci_restore_state(struct pci_dev *pdev, u32 *buffer)
  10648. +{ return 0; }
  10649. +int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
  10650. +{ return 0; }
  10651. +
  10652. +#endif /* 2.4.6 => 2.4.3 */
  10653. +
  10654. +
  10655. +
  10656. +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,24) )
  10657. +
  10658. +void dump_stack(void)
  10659. +{
  10660. +}
  10661. +
  10662. +#endif /* 2.4.24 */
  10663. +
  10664. --- a/Embedded/src/GbE/kcompat_ethtool.c
  10665. +++ b/Embedded/src/GbE/kcompat_ethtool.c
  10666. @@ -2,7 +2,7 @@
  10667. /*
  10668. * GPL LICENSE SUMMARY
  10669. *
  10670. - * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  10671. + * Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  10672. *
  10673. * This program is free software; you can redistribute it and/or modify
  10674. * it under the terms of version 2 of the GNU General Public License as
  10675. @@ -22,7 +22,7 @@
  10676. * Contact Information:
  10677. * Intel Corporation
  10678. *
  10679. - * version: Embedded.L.1.0.34
  10680. + * version: Embedded.Release.Patch.L.1.0.7-5
  10681. */
  10682. /**************************************************************************
  10683. @@ -779,6 +779,7 @@ static int ethtool_get_stats(struct net_
  10684. }
  10685. /* The main entry point in this file. Called from net/core/dev.c */
  10686. +
  10687. #define ETHTOOL_OPS_COMPAT
  10688. int ethtool_ioctl(struct ifreq *ifr)
  10689. {
  10690. --- a/Embedded/src/GbE/kcompat.h
  10691. +++ b/Embedded/src/GbE/kcompat.h
  10692. @@ -2,7 +2,7 @@
  10693. GPL LICENSE SUMMARY
  10694. - Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  10695. + Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  10696. This program is free software; you can redistribute it and/or modify
  10697. it under the terms of version 2 of the GNU General Public License as
  10698. @@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
  10699. Contact Information:
  10700. Intel Corporation
  10701. - version: Embedded.L.1.0.34
  10702. + version: Embedded.Release.Patch.L.1.0.7-5
  10703. Contact Information:
  10704. @@ -69,15 +69,6 @@ GPL LICENSE SUMMARY
  10705. #define CONFIG_NET_POLL_CONTROLLER
  10706. #endif
  10707. -#ifdef E1000_NAPI
  10708. -#undef CONFIG_E1000_NAPI
  10709. -#define CONFIG_E1000_NAPI
  10710. -#endif
  10711. -
  10712. -#ifdef E1000_NO_NAPI
  10713. -#undef CONFIG_E1000_NAPI
  10714. -#endif
  10715. -
  10716. #ifndef module_param
  10717. #define module_param(v,t,p) MODULE_PARM(v, "i");
  10718. #endif
  10719. @@ -554,35 +545,14 @@ extern void _kc_pci_unmap_page(struct pc
  10720. #endif
  10721. /*****************************************************************************/
  10722. -/* 2.4.23 => 2.4.22 */
  10723. -#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
  10724. -#ifdef CONFIG_E1000_NAPI
  10725. -#ifndef netif_poll_disable
  10726. -#define netif_poll_disable(x) _kc_netif_poll_disable(x)
  10727. -static inline void _kc_netif_poll_disable(struct net_device *netdev)
  10728. -{
  10729. - while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
  10730. - /* No hurry */
  10731. - current->state = TASK_INTERRUPTIBLE;
  10732. - schedule_timeout(1);
  10733. - }
  10734. -}
  10735. -#endif
  10736. -#ifndef netif_poll_enable
  10737. -#define netif_poll_enable(x) _kc_netif_poll_enable(x)
  10738. -static inline void _kc_netif_poll_enable(struct net_device *netdev)
  10739. -{
  10740. - clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
  10741. -}
  10742. -#endif
  10743. -#endif
  10744. -#endif
  10745. -
  10746. -/*****************************************************************************/
  10747. /* 2.5.28 => 2.4.23 */
  10748. #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
  10749. +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
  10750. +static inline void _kc_synchronize_irq(void) { barrier(); }
  10751. +#else
  10752. static inline void _kc_synchronize_irq() { synchronize_irq(); }
  10753. +#endif /* 2.4.23 */
  10754. #undef synchronize_irq
  10755. #define synchronize_irq(X) _kc_synchronize_irq()
  10756. @@ -747,6 +717,37 @@ static inline struct mii_ioctl_data *_kc
  10757. #define skb_header_cloned(x) 0
  10758. #endif /* SKB_DATAREF_SHIFT not defined */
  10759. +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
  10760. +
  10761. +#define ioread32(addr) readl(addr)
  10762. +#define iowrite32(val,addr) writel(val,addr)
  10763. +
  10764. +#endif /* 2.6.10 */
  10765. +
  10766. +#ifndef DEFINE_SPINLOCK
  10767. +#define DEFINE_SPINLOCK(s) spinlock_t s = SPIN_LOCK_UNLOCKED
  10768. +#endif /* DEFINE_SPINLOCK */
  10769. +
  10770. +#ifndef PCI_COMMAND_INTX_DISABLE
  10771. +#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
  10772. +#endif /* PCI_COMMAND_INTX_DISABLE */
  10773. +
  10774. +#ifndef ETH_GSTRING_LEN
  10775. +#define ETH_GSTRING_LEN 32
  10776. +#endif /* ETH_GSTRING_LEN */
  10777. +
  10778. +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,24) )
  10779. +
  10780. +extern void dump_stack(void);
  10781. +
  10782. +#undef register_reboot_notifier
  10783. +#define register_reboot_notifier(a)
  10784. +
  10785. +#undef unregister_reboot_notifier
  10786. +#define unregister_reboot_notifier(a)
  10787. +
  10788. +#endif /* 2.4.24 */
  10789. +
  10790. #endif /* _KCOMPAT_H_ */
  10791. --- a/Embedded/src/GbE/Makefile
  10792. +++ b/Embedded/src/GbE/Makefile
  10793. @@ -1,6 +1,6 @@
  10794. # GPL LICENSE SUMMARY
  10795. #
  10796. -# Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
  10797. +# Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
  10798. #
  10799. # This program is free software; you can redistribute it and/or modify
  10800. # it under the terms of version 2 of the GNU General Public License as
  10801. @@ -20,7 +20,7 @@
  10802. # Contact Information:
  10803. # Intel Corporation
  10804. #
  10805. -# version: Embedded.L.1.0.34
  10806. +# version: Embedded.Release.Patch.L.1.0.7-5
  10807. ###########################################################################
  10808. # Driver files
  10809. @@ -35,6 +35,8 @@ MDIO_PHONY_CFILES = gcu.c
  10810. MDIO_CFILES = gcu_main.c gcu_if.c
  10811. MDIO_HFILES = gcu.h gcu_if.h gcu_reg.h kcompat.h
  10812. +KVER=$(shell uname -r)
  10813. +
  10814. #
  10815. # Variables:
  10816. # KSRC (path to kernel source to build against)
  10817. @@ -50,45 +52,16 @@ MDIO_HFILES = gcu.h gcu_if.h gcu_reg.h k
  10818. # set KSRC, KOBJ, and EXTERNAL_MDIO to default values of not already set
  10819. #
  10820. -KOBJ ?= /usr/src/kernels/linux
  10821. -KSRC ?= /usr/src/kernels/linux
  10822. +#KOBJ=/usr/src/kernels/linux
  10823. +#KSRC=/usr/src/kernels/linux
  10824. +#KSRC=$(KOBJ)
  10825. EXTERNAL_MDIO ?= 1
  10826. GBE_NAME = iegbe
  10827. GCU_NAME = gcu
  10828. -# By default the workaround for the IEGBE writeback issue is enabled
  10829. -#
  10830. -IEGBE_GBE_WORKAROUND ?= 0
  10831. -
  10832. -# If the platform only supports 10/100 this variable needs to be set
  10833. -# so the default advertisement is set appropriately.
  10834. -# By default, this variable will be disabled.
  10835. -#
  10836. -IEGBE_10_100_ONLY ?= 0
  10837. -
  10838. -# check for version.h and autoconf.h for running kernel in /boot (SUSE)
  10839. -ifneq (,$(wildcard /boot/vmlinuz.version.h))
  10840. - VERSION_FILE := /boot/vmlinuz.version.h
  10841. - CONFIG_FILE := /boot/vmlinuz.autoconf.h
  10842. - KVER := $(shell $(CC) $(CFLAGS) -E -dM $(VERSION_FILE) | \
  10843. - grep UTS_RELEASE | awk '{ print $$3 }' | sed 's/\"//g')
  10844. - ifeq ($(KVER),$(shell uname -r))
  10845. - # set up include path to override headers from kernel source
  10846. - x:=$(shell rm -rf include)
  10847. - x:=$(shell mkdir -p include/linux)
  10848. - x:=$(shell cp /boot/vmlinuz.version.h include/linux/version.h)
  10849. - x:=$(shell cp /boot/vmlinuz.autoconf.h include/linux/autoconf.h)
  10850. - CFLAGS += -I./include
  10851. - else
  10852. - VERSION_FILE := $(KOBJ)/include/linux/version.h
  10853. - UTS_REL_FILE := $(KSRC)/include/linux/utsrelease.h
  10854. - CONFIG_FILE := $(KOBJ)/include/linux/autoconf.h
  10855. - endif
  10856. -else
  10857. - VERSION_FILE := $(KOBJ)/include/linux/version.h
  10858. - UTS_REL_FILE := $(KSRC)/include/linux/utsrelease.h
  10859. - CONFIG_FILE := $(KOBJ)/include/linux/autoconf.h
  10860. -endif
  10861. +VERSION_FILE := $(KSRC)/include/linux/version.h
  10862. +UTS_REL_FILE := $(KSRC)/include/linux/utsrelease.h
  10863. +CONFIG_FILE := $(KSRC)/include/linux/autoconf.h
  10864. ifeq (,$(wildcard $(VERSION_FILE)))
  10865. $(error Linux kernel source not configured - missing version.h)
  10866. @@ -98,83 +71,8 @@ ifeq (,$(wildcard $(CONFIG_FILE)))
  10867. $(error Linux kernel source not configured - missing autoconf.h)
  10868. endif
  10869. -# as of 2.6.16, kernel define UTS_RELEASE has been moved to utsrelease.h
  10870. -# so check that file for kernel version string instead of version.h
  10871. -USE_UTS_REL := $(shell [ -f $(UTS_REL_FILE) ] && echo "1")
  10872. -
  10873. -# pick a compiler
  10874. -ifneq (,$(findstring egcs-2.91.66, $(shell cat /proc/version)))
  10875. - CC := kgcc gcc cc
  10876. -else
  10877. - CC := gcc cc
  10878. -endif
  10879. -test_cc = $(shell $(cc) --version > /dev/null 2>&1 && echo $(cc))
  10880. -CC := $(foreach cc, $(CC), $(test_cc))
  10881. -CC := $(firstword $(CC))
  10882. -ifeq (,$(CC))
  10883. - $(error Compiler not found)
  10884. -endif
  10885. -
  10886. -# we need to know what platform the driver is being built on
  10887. -# some additional features are only built on Intel platforms
  10888. -ARCH := $(shell uname -m | sed 's/i.86/i386/')
  10889. -ifeq ($(ARCH),alpha)
  10890. - CFLAGS += -ffixed-8 -mno-fp-regs
  10891. -endif
  10892. -ifeq ($(ARCH),x86_64)
  10893. - CFLAGS += -mcmodel=kernel -mno-red-zone
  10894. -endif
  10895. -ifeq ($(ARCH),ppc)
  10896. - CFLAGS += -msoft-float
  10897. -endif
  10898. -ifeq ($(ARCH),ppc64)
  10899. - CFLAGS += -m64 -msoft-float
  10900. - LDFLAGS += -melf64ppc
  10901. -endif
  10902. -
  10903. -# standard flags for module builds
  10904. -CFLAGS += -DLINUX -D__KERNEL__ -DMODULE -O2 -pipe -Wall
  10905. -CFLAGS += -I$(KSRC)/include -I.
  10906. -CFLAGS += $(shell [ -f $(KSRC)/include/linux/modversions.h ] && \
  10907. - echo "-DMODVERSIONS -DEXPORT_SYMTAB \
  10908. - -include $(KSRC)/include/linux/modversions.h")
  10909. -
  10910. -ifeq ($(IEGBE_GBE_WORKAROUND), 1)
  10911. -CFLAGS += -DIEGBE_GBE_WORKAROUND -DE1000_NO_NAPI
  10912. -endif
  10913. -
  10914. -ifeq ($(IEGBE_10_100_ONLY), 1)
  10915. -CFLAGS += -DIEGBE_10_100_ONLY
  10916. -endif
  10917. -
  10918. -CFLAGS += $(CFLAGS_EXTRA)
  10919. -#ifeq (,$(shell echo $(CFLAGS_EXTRA) | grep NAPI))
  10920. -#CFLAGS += -DE1000_NO_NAPI
  10921. -#CFLAGS_EXTRA += -DE1000_NO_NAPI
  10922. -#endif
  10923. -
  10924. -RHC := $(KSRC)/include/linux/rhconfig.h
  10925. -ifneq (,$(wildcard $(RHC)))
  10926. - # 7.3 typo in rhconfig.h
  10927. - ifneq (,$(shell $(CC) $(CFLAGS) -E -dM $(RHC) | grep __module__bigmem))
  10928. - CFLAGS += -D__module_bigmem
  10929. - endif
  10930. -endif
  10931. -
  10932. -# get the kernel version - we use this to find the correct install path
  10933. -ifeq ($(USE_UTS_REL), 1)
  10934. - KVER := $(shell $(CC) $(CFLAGS) -E -dM $(UTS_REL_FILE) | grep UTS_RELEASE | \
  10935. - awk '{ print $$3 }' | sed 's/\"//g')
  10936. -else
  10937. - KVER := $(shell $(CC) $(CFLAGS) -E -dM $(VERSION_FILE) | grep UTS_RELEASE | \
  10938. - awk '{ print $$3 }' | sed 's/\"//g')
  10939. -endif
  10940. -
  10941. -KKVER := $(shell echo $(KVER) | \
  10942. - awk '{ if ($$0 ~ /2\.[6-9]\./) print "1"; else print "0"}')
  10943. -ifeq ($(KKVER), 0)
  10944. - $(error *** Aborting the build. \
  10945. - *** This driver is not supported on kernel versions older than 2.6.18)
  10946. +ifeq (,$(wildcard $(UTS_REL_FILE)))
  10947. + $(error Linux kernel source not configured - missing utsrelease.h)
  10948. endif
  10949. # set the install path
  10950. @@ -202,11 +100,11 @@ ifneq ($(SMP),$(shell uname -a | grep SM
  10951. endif
  10952. ifeq ($(SMP),1)
  10953. - CFLAGS += -D__SMP__
  10954. + EXTRA_CFLAGS += -D__SMP__
  10955. endif
  10956. ifeq ($(EXTERNAL_MDIO), 1)
  10957. - CFLAGS += -DEXTERNAL_MDIO
  10958. + EXTRA_CFLAGS += -DEXTERNAL_MDIO
  10959. endif
  10960. ###########################################################################
  10961. @@ -223,7 +121,6 @@ MANSECTION = 7
  10962. MANFILE = $(TARGET:.ko=.$(MANSECTION))
  10963. ifneq ($(PATCHLEVEL),)
  10964. - EXTRA_CFLAGS += $(CFLAGS_EXTRA)
  10965. obj-m += $(TARGET:.ko=.o)
  10966. iegbe-objs := $(CFILES:.c=.o)
  10967. ifeq ($(EXTERNAL_MDIO),1)
  10968. --- a/filelist
  10969. +++ b/filelist
  10970. @@ -1,41 +1,3 @@
  10971. -Embedded/Makefile
  10972. -Embedded/environment.mk
  10973. -Embedded/src/1588/1588.c
  10974. -Embedded/src/1588/1588.h
  10975. -Embedded/src/1588/IxTimeSyncAcc_p.h
  10976. -Embedded/src/1588/Makefile
  10977. -Embedded/src/1588/ixtimesyncacc.c
  10978. -Embedded/src/1588/ixtimesyncacc.h
  10979. -Embedded/src/1588/linux_ioctls.h
  10980. -Embedded/src/CAN/Makefile
  10981. -Embedded/src/CAN/can_fifo.c
  10982. -Embedded/src/CAN/can_fifo.h
  10983. -Embedded/src/CAN/can_ioctl.h
  10984. -Embedded/src/CAN/can_main.c
  10985. -Embedded/src/CAN/can_main.h
  10986. -Embedded/src/CAN/can_port.h
  10987. -Embedded/src/CAN/icp_can.c
  10988. -Embedded/src/CAN/icp_can.h
  10989. -Embedded/src/CAN/icp_can_regs.h
  10990. -Embedded/src/CAN/icp_can_types.h
  10991. -Embedded/src/CAN/icp_can_user.h
  10992. -Embedded/src/EDMA/Makefile
  10993. -Embedded/src/EDMA/dma.h
  10994. -Embedded/src/EDMA/dma_api.h
  10995. -Embedded/src/EDMA/dma_client_api.c
  10996. -Embedded/src/EDMA/dma_common.c
  10997. -Embedded/src/EDMA/dma_internals.h
  10998. -Embedded/src/EDMA/dma_linux.c
  10999. -Embedded/src/EDMA/os/os.c
  11000. -Embedded/src/EDMA/os/os.h
  11001. -Embedded/src/EDMA/os/os_list.c
  11002. -Embedded/src/EDMA/os/os_list.h
  11003. -Embedded/src/EDMA/os/os_types.h
  11004. -Embedded/src/GPIO/Makefile
  11005. -Embedded/src/GPIO/common.h
  11006. -Embedded/src/GPIO/gpio.h
  11007. -Embedded/src/GPIO/gpio_ref.c
  11008. -Embedded/src/GPIO/linux_ioctls.h
  11009. Embedded/src/GbE/Makefile
  11010. Embedded/src/GbE/gcu.h
  11011. Embedded/src/GbE/gcu_if.c
  11012. @@ -55,16 +17,6 @@ Embedded/src/GbE/iegbe_param.c
  11013. Embedded/src/GbE/kcompat.c
  11014. Embedded/src/GbE/kcompat.h
  11015. Embedded/src/GbE/kcompat_ethtool.c
  11016. -Embedded/src/WDT/Makefile
  11017. -Embedded/src/WDT/iwdt.c
  11018. -Embedded/src/WDT/iwdt.h
  11019. -Embedded/src/patches/Intel_EP80579_RHEL5.patch
  11020. -Embedded/src/patches/pci.ids_RHEL5.patch
  11021. LICENSE.GPL
  11022. -build_system/build_files/Core/ia.mk
  11023. -build_system/build_files/OS/linux_2.6.mk
  11024. -build_system/build_files/OS/linux_2.6_kernel_space_rules.mk
  11025. -build_system/build_files/common.mk
  11026. -build_system/build_files/rules.mk
  11027. filelist
  11028. versionfile
  11029. --- a/versionfile
  11030. +++ b/versionfile
  11031. @@ -1,4 +1,4 @@
  11032. -PACKAGE_TYPE=Embedded
  11033. +PACKAGE_TYPE=Embedded.Release.Patch
  11034. PACKAGE_OS=L
  11035. @@ -6,4 +6,6 @@ PACKAGE_VERSION_MAJOR_NUMBER=1
  11036. PACKAGE_VERSION_MINOR_NUMBER=0
  11037. -PACKAGE_VERSION_PATCH_NUMBER=34
  11038. +PACKAGE_VERSION_PATCH_NUMBER=7
  11039. +
  11040. +PACKAGE_VERSION_BUILD_NUMBER=5