ÿØÿà JFIF    ÿÛ „  ( %"1!%)+...383,7(-.+  -+++--++++---+-+-----+---------------+---+-++7-----ÿÀ  ß â" ÿÄ     ÿÄ H    !1AQaq"‘¡2B±ÁÑð#R“Ò Tbr‚²á3csƒ’ÂñDS¢³$CÿÄ   ÿÄ %  !1AQa"23‘ÿÚ   ? ôÿ ¨pŸªáÿ —åYõõ\?àÒü©ŠÄï¨pŸªáÿ —åYõõ\?àÓü©ŠÄá 0Ÿªáÿ Ÿå[úƒ ú®ði~TÁbqÐ8OÕpÿ ƒOò¤Oè`–RÂáœá™êi€ßÉ< FtŸI“öÌ8úDf´°å}“¾œ6  öFá°y¥jñÇh†ˆ¢ã/ÃÐ:ªcÈ "Y¡ðÑl>ÿ ”ÏËte:qž\oäŠe÷󲍷˜HT4&ÿ ÓÐü6ö®¿øþßèô Ÿ•7Ñi’•j|“ñì>b…þS?*Óôÿ ÓÐü*h¥£ír¶ü UãS炟[AÐaè[ûª•õ&õj?†Éö+EzP—WeÒírJFt ‘BŒ†Ï‡%#tE Øz ¥OÛ«!1›üä±Í™%ºÍãö]°î(–:@<‹ŒÊö×òÆt¦ãº+‡¦%ÌÁ²h´OƒJŒtMÜ>ÀÜÊw3Y´•牋4ǍýʏTì>œú=Íwhyë,¾Ôò×õ¿ßÊa»«þˆѪQ|%6ž™A õ%:øj<>É—ÿ Å_ˆCbõ¥š±ý¯Ýƒï…¶|RëócÍf溪“t.СøTÿ *Ä¿-{†çàczůŽ_–^XþŒ±miB[X±d 1,é”zEù»& î9gœf™9Ð'.;—™i}!ôšåîqêÛ٤ёý£½ÆA–àôe"A$˝Úsäÿ ÷Û #°xŸëí(l »ý3—¥5m! rt`†0~'j2(]S¦¦kv,ÚÇ l¦øJA£Šƒ J3E8ÙiŽ:cÉžúeZ°€¯\®kÖ(79«Ž:¯X”¾³Š&¡* ….‰Ž(ÜíŸ2¥ª‡×Hi²TF¤ò[¨íÈRëÉ䢍mgÑ.Ÿ<öäS0í„ǹÁU´f#Vß;Õ–…P@3ío<ä-±»Ž.L|kªÀê›fÂ6@»eu‚|ÓaÞÆŸ…¨ááå>åŠ?cKü6ùTÍÆ”†sĤÚ;H2RÚ†õ\Ö·Ÿn'¾ ñ#ºI¤Å´%çÁ­‚â7›‹qT3Iï¨ÖÚ5I7Ë!ÅOóŸ¶øÝñØôת¦$Tcö‘[«Ö³šÒ';Aþ ¸èíg A2Z"i¸vdÄ÷.iõ®§)¿]¤À†–‡É&ä{V¶iŽ”.Ó×Õÿ û?h¬Mt–íª[ÿ Ñÿ ÌV(í}=ibÔ¡›¥¢±b Lô¥‡piη_Z<‡z§èŒ)iÖwiÇ 2hÙ3·=’d÷8éŽ1¦¸c¤µ€7›7Ø ð\á)} ¹fËí›pAÃL%âc2 í§æQz¿;T8sæ°qø)QFMð‰XŒÂ±N¢aF¨…8¯!U  Z©RÊ ÖPVÄÀÍin™Ì-GˆªÅËŠ›•zË}º±ŽÍFò¹}Uw×#ä5B¤{î}Ð<ÙD é©¤&‡ïDbàÁôMÁ." ¤‡ú*õ'VŽ|¼´Úgllº¼klz[Æüï÷Aób‡Eÿ dÑ»Xx9ÃÜ£ÁT/`¼¸vI±Ýµ·Ë‚“G³þ*Ÿû´r|*}<¨îºœ @¦mÄ’M¹”.œ«Y–|6ÏU¤jç¥ÕÞqO ˜kDÆÁ¨5ÿ š;ÐЦ¦€GÙk \ –Þ=â¼=SͧµªS°ÚÍpÜãQűÀõ¬?ÃÁ1Ñ•õZà?hóœ€ L¦l{Y*K˜Ù›zc˜–ˆâ ø+¾ ­-Ök¥%ùEÜA'}ˆ><ÊIè“bpÍ/qÞâvoX€w,\úªò6Z[XdÒæ­@Ö—€$òJí#é>'°Ú ôª˜<)4ryÙ£|óAÅn5žêŸyÒäMÝ2{"}‰–¤l÷ûWX\l¾Á¸góÉOÔ /óñB¤f¸çñ[.P˜ZsÊË*ßT܈§QN¢’¡¨§V¼(Üù*eÕ“”5T¨‹Âê¥FŒã½Dü[8'Ò¥a…Ú¶k7a *•›¼'Ò·\8¨ª\@\õ¢¦íq+DÙrmÎ…_ªæ»ŠÓœ¡¯’Ré9MÅ×D™lælffc+ŒÑ,ý™ÿ ¯þǤ=Å’Á7µ÷ÚÛ/“Ü€ñýã¼àí¾ÕÑ+ƒ,uµMâÀÄbm:ÒÎPæ{˜Gz[ƒ¯«® KHà`ߨŠéí¯P8Aq.C‰ à€kòpj´kN¶qô€…Õ,ÜNŠª-­{Zö’æû44‰sŽè‰îVíRœÕm" 6?³D9¡ÇTíÅꋇ`4«¸ÝÁô ï’ýorqКÇZ«x4Žâéþuïf¹µö[P ,Q£éaX±`PÉÍZ ¸äYúg üAx ’6Lê‚xÝÓ*äQ  Ï’¨hÍ =²,6ï#rÃ<¯–£»ƒ‹,–ê•€ aÛsñ'%Æ"®ÛüìBᝠHÚ3ß°©$“XnœÖ’î2ËTeûìxîß ¦å¿çÉ ðK§þ{‘t‚Ϋ¬jéîZ[ ”š7L¥4VÚCE×]m¤Øy”ä4-dz£œ§¸x.*ãÊÊ b÷•h:©‡¦s`BTÁRû¾g⻩‹jø sF¢àJøFl‘È•Xᓁà~*j¯ +(ÚÕ6-£¯÷GŠØy‚<Ç’.F‹Hœw(+)ÜÜâÈzÄäT§FߘãÏ;DmVœ3Àu@mÚüXÝü•3B¨òÌÁÛ<·ÃÜ z,Ì@õÅ·d2]ü8s÷IôÞ¯^Ç9¢u„~ëAŸï4«M? K]­ÅàPl@s_ p:°¬ZR”´›JC[CS.h‹ƒïËœ«Æ]–÷ó‚wR×k7X‰k›‘´ù¦=¡«‰¨¨Â')—71ó’c‡Ðúµ `é.{§p¹ój\Ž{1h{o±Ý=áUÊïGÖŒõ–-BÄm+AZX¶¡ ïHðæ¥JmÙ;…䡟ˆ¦ ° äšiÉg«$üMk5¤L“’çÊvïâï ,=f“"íἊ5ô¬x6{ɏžID0e¸vçmi'︧ºð9$ò¹÷*£’9ÿ ²TÔ…×>JV¥}Œ}$p[bÔ®*[jzS*8 ”·T›Í–ñUîƒwo$áè=LT™ç—~ô·¤ÈÚ$榍q‰„+´kFm)ž‹©i–ËqÞŠ‰à¶ü( ‚•§ •°ò·‡#5ª•µÊ﯅¡X¨šÁ*F#TXJÊ ušJVÍ&=iÄs1‚3•'fý§5Ñ<=[íÞ­ PÚ;ѱÌ_~Ä££8rÞ ²w;’hDT°>ÈG¬8Á²ÚzŽ®ò®qZcqJêäÞ-ö[ܘbň±çb“ж31²n×iƒðÕ;1¶þÉ ªX‰,ßqÏ$>•î íZ¥Z 1{ç൵+ƒÕµ¥°T$§K]á»Ûï*·¤tMI’ÂZbŽÕiÒ˜}bÓ0£ª5›¨ [5Ž^ÝœWøÂÝh° ¢OWun£¤5 a2Z.G2³YL]jåtì”ä ÁÓ‘%"©<Ôúʰsº UZvä‡ÄiÆÒM .÷V·™ø#kèýiíÌ–ª)µT[)BˆõÑ xB¾B€ÖT¨.¥~ð@VĶr#¸ü*åZNDŽH;âi ],©£öØpù(šºãö¼T.uCê•4@ÿ GÕÛ)Cx›®0ø#:ÏðFÒbR\(€€Ä®fã4Þ‰Fä¯HXƒÅ,†öEÑÔÜ]Öv²?tLÃvBY£ú6Êu5ÅAQ³1‘’¬x–HŒÐ‡ ^ ¸KwJôÖŽ5×CÚ¨vÜ«/B0$×k°=ðbÇ(Ï)w±A†Á† 11Í=èQšµ626ŒÜ/`G«µ<}—-Ö7KEHÈÉðóȤmݱû±·ø«Snmá=“䫚mݱŸ¡¶~ó·“äUóJæúòB|E LêŽy´jDÔ$G¢þÐñ7óR8ýÒ…Ç› WVe#·Ÿ p·Fx~•ݤF÷0Èÿ K¯æS<6’¡WШ; ´ÿ ¥Êø\Òuî†åÝ–VNœkÒ7oòX¨Á­Ø÷FÎÑä±g÷ÿ M~Çî=p,X´ ÝÌÚÅ‹’ÃjÖ.ØöÏñ qïQ¤ÓZE†° =6·]܈ s¸>v•Ž^Ý\wq9r‰Î\¸¡kURÒ$­*‹Nq?Þª*!sŠÆ:TU_u±T+øX¡ ®¹¡,ÄâÃBTsÜ$Ø›4m椴zÜK]’’›Pƒ @€#â˜`é¹=I‡fiV•Ôî“nRm+µFPOhÍ0B£ €+¬5c v•:P'ÒyÎ ‰V~‚Ó†ÖuókDoh$å\*ö%Ю=£«…aȼ½÷Û.-½VŒŠ¼'lyî±1¬3ó#ÞE¿ÔS¤gV£m›=§\û"—WU¤ÚǼÿ ÂnÁGŒÃ ‚õN D³õNÚíŒÕ;HôyÄÈ©P¹Ä{:?R‘Ô¨âF÷ø£bÅó® JS|‚R÷ivýáâ€Æé¡è³´IئÑT!§˜•ت‚¬â@q€wnïCWÄ@JU€ê¯m6]Ï:£âx'+ÒðXvÓ¦Úm=–´7œ $ì“B£~p%ÕŸUþ« N@¼üï~w˜ñø5®—'Ôe»¤5ã//€ž~‰Tþ›Å7•#¤× Íö pÄ$ùeåì*«ÓŠEØWEÈsßg ¦ûvžSsLpºÊW–âµEWöˬH; ™!CYõZ ÃÄf æ#1W. \uWâ\,\Çf j’<qTbên›Î[vxx£ë 'ö¨1›˜ÀM¼Pÿ H)ƒêêŒA7s,|F“ 꺸k³9Ìö*ç®;Ö!Ö$Eiž•¹ÒÚ†ýóéÝû¾ÕS®ó$’NÝäŸz¤5r¦ãÄÃD÷Üø!°ø‡Ô&@m™Ì^Ãä­d q5Lnÿ N;.6½·N|#ä"1Nƒx“ã<3('&ñßt  ~ªu”1Tb㫨9ê–›–bìd$ߣ=#ÕãÒmU¯eí$EFù5ýYô櫨æì™Ç—±ssM]·á¿0ÕåJRÓªîiƒ+O58ÖñªŠÒx" \µâá¨i’¤i —Ö ” M+M¤ë9‚‰A¦°Qõ¾ßøK~¼Ã‘g…Ö´~÷Ï[3GUœÒ½#…kàÔ®Ò”‰³·dWV‰IP‰Ú8u¹”E ÖqLj¾êÕCBš{A^Âß;–¨`¯¬ìö ˼ ×tìø.tƐm*n¨y4o&Àx¥n¦×î‡aupáÛj8¿m›è¶ã!o½;ß0y^ý×^EÑ¿ÒjzŒ­)vÚÑnÄL …^ªô× ‡—‚3k Îý­hï]içå–îÏ*÷ñþ»Ô CÒjøjÍznˆ´ ¹#b'Fô‹ ‰v¥'’à'T´ƒHýÍ%M‰ ƒ&ÆÇŒï1 ‘ –Þ ‰i¬s žR-Ÿ kЬá¬7:þ 0ŒÅÒÕ/aÙ¬ÃÝ#Úøœ ©aiVc‰. ¹¦ãµ” ›Yg¦›ÆÎýº°f³7ƒhá·¸­}&D9¡ÂsÉÙÞèŠõØàC™¨ñbFC|´Ü(ŸƒÚÒ-%»'a Ì¿)ËÇn¿úÿ ÞŽX…4ÊÅH^ôΑí@ù¹Eh¶“L8Çjù ¼ÎåVªóR©Ï5uà V4lZß®=€xÖŸ–ÑÈ ÷”¨°¾__yM1tÉ?uÆþIkÄgæ@þ[¢†°XÃJ£j·:nkÅ¢u ‘}âGzö­/IµèЬ¼48q¦F°ŽR¼=ûì{´¯RýicS ÕÛ íNtÍÙï£,w4rêì®»~x(©Uñ§#Ñ&œÕ¤>ÎåÍÓ9’Ö{9eV­[Öjâ²ãu]˜å2›qÑšÕJç0€sÄ|Êëè0튔bÁ>“{×_F`Ø©ºê:µä,v¤ðfc1±"«ÔÍän1#=· Âøv~H½ÐßA¾¿Ü€Óš]Õ; I¾÷ç‚Qi†î¹9ywÔKG˜áñ zQY—§ÃÕZ07§X‚ Áh;ÁM)iÌCH-¯T‘ë|A0{Ò½LÚ–TâÖkÜ’dÀ“rmm»”جPF³ÖcbE§T€ÒxKºû’Ó®7±²(\4ŽÃ¸Uu@j™yĵ;³µ!Á¢b.W¤=mõ´êµK k ¸K^ÜÛ#p*Ü14qkZç5ïë †°5Ï%ÍÛ<Õ¤×Ô¥ê†C Õ´¼ú$ƒÖ“”]Ù¬qÞÚ[4©ý!ûÏ—Áb쳐XµA¬â~`›Çr¸8ìùÝ䫦<>ä÷«?xs´ÇÑ /á;¹øüÊÈÙà{"@Žïzâ¬[âß‚ U_<ÇŸ½4èN˜ú61®qŠu ¦þF£»äJ_ˆÙÎ~ ÞAã–݄ϗrŠD;xTž‘ô`É«…suãO`?³à™ô Lý#Íc5öoæØ‚y´´÷«ZR§<&JÇ+éâô´€i!Àˆ0æAoàðLèÖ-2ŸõW.’t^–(KÁmHµV@xÜÇy®Ñø­â^:Ú3w· 7½¹°ñ¸â¹®:',«Mœ—n­Á+Ãbš LÈ‘ÄnRÓÅœ%¦²‰¨ùQ:¤f‚ "PÕtô¸…cæl…&˜Ú˜Ôkv‹ž+vŠ,=¢v­6—Xy*¥t£«<™:“aîϲ=¦6rO]XI¿Œ÷¤zÚ­›¶ 6÷”w\d ü~v®ˆÌk«^m<ÿ ¢‰Õ\)ùºŽ;… lîÙÅEŠ®cѾ@vnMÏ,¼“ñ•ŽBxðÃzãÇç%3ˆ"}Ù•Åî> BÉú;Ò]V+P˜F_´ßé> Øše|ï‡ÄOmFæÇ ãqÞ$/xÐx­z`ï9"œÜij‚!7.\Td…9M‡•iŽ‹¾‘50ÞŽn¥ß4ÉôO ¹*í^QêËÜÇÌ8=ާs‰'ÂëÙ«á%Pú[O †ÅP¯Vsް.‰,kc¶ ¬A9n˜XÎ-ÞšN["¹QÕ‰ƒMýÁߺXJæÍaLj¾×Ãmã¾ãÚ uñÒþåQô¦¥ /ÄUx:‚ÍÜ’ Đ©ØÝ3V¨‰ÕnÐ6ó*óúK­«…c ¯U òhsý­jóÔj#,ímŒRµ«lbïUTŒÑ8†Ä0œÏr`ð¡¬É Ї ë"À² ™ 6¥ f¶ ¢ÚoܱԷ-<Àî)†a¶ž'Ú»¨TXqØæ¶÷YÄHy˜9ÈIW­YÀuMFë ºÏ’AqÌ4·/Ú †ô'i$øä­=Ä Ý|öK×40è|È6p‘0§)o¥ctî§H+CA-“ xØ|ÐXАç l8íºð3Ø:³¤¬KX¯UÿÙ /* Copyright (C) 2002-2019 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see . */ /* Implemented from the specification included in the Intel C++ Compiler User Guide and Reference, version 9.0. */ #ifndef _XMMINTRIN_H_INCLUDED #define _XMMINTRIN_H_INCLUDED /* We need type definitions from the MMX header file. */ #include /* Get _mm_malloc () and _mm_free (). */ #include /* Constants for use with _mm_prefetch. */ enum _mm_hint { /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit. */ _MM_HINT_ET0 = 7, _MM_HINT_ET1 = 6, _MM_HINT_T0 = 3, _MM_HINT_T1 = 2, _MM_HINT_T2 = 1, _MM_HINT_NTA = 0 }; /* Loads one cache line from address P to a location "closer" to the processor. The selector I specifies the type of prefetch operation. */ #ifdef __OPTIMIZE__ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_prefetch (const void *__P, enum _mm_hint __I) { __builtin_prefetch (__P, (__I & 0x4) >> 2, __I & 0x3); } #else #define _mm_prefetch(P, I) \ __builtin_prefetch ((P), ((I & 0x4) >> 2), (I & 0x3)) #endif #ifndef __SSE__ #pragma GCC push_options #pragma GCC target("sse") #define __DISABLE_SSE__ #endif /* __SSE__ */ /* The Intel API is flexible enough that we must allow aliasing with other vector types, and their scalar components. */ typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__)); /* Unaligned version of the same type. */ typedef float __m128_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1))); /* Internal data types for implementing the intrinsics. */ typedef float __v4sf __attribute__ ((__vector_size__ (16))); /* Create a selector for use with the SHUFPS instruction. */ #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \ (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0)) /* Bits in the MXCSR. */ #define _MM_EXCEPT_MASK 0x003f #define _MM_EXCEPT_INVALID 0x0001 #define _MM_EXCEPT_DENORM 0x0002 #define _MM_EXCEPT_DIV_ZERO 0x0004 #define _MM_EXCEPT_OVERFLOW 0x0008 #define _MM_EXCEPT_UNDERFLOW 0x0010 #define _MM_EXCEPT_INEXACT 0x0020 #define _MM_MASK_MASK 0x1f80 #define _MM_MASK_INVALID 0x0080 #define _MM_MASK_DENORM 0x0100 #define _MM_MASK_DIV_ZERO 0x0200 #define _MM_MASK_OVERFLOW 0x0400 #define _MM_MASK_UNDERFLOW 0x0800 #define _MM_MASK_INEXACT 0x1000 #define _MM_ROUND_MASK 0x6000 #define _MM_ROUND_NEAREST 0x0000 #define _MM_ROUND_DOWN 0x2000 #define _MM_ROUND_UP 0x4000 #define _MM_ROUND_TOWARD_ZERO 0x6000 #define _MM_FLUSH_ZERO_MASK 0x8000 #define _MM_FLUSH_ZERO_ON 0x8000 #define _MM_FLUSH_ZERO_OFF 0x0000 /* Create an undefined vector. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_undefined_ps (void) { __m128 __Y = __Y; return __Y; } /* Create a vector of zeros. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_setzero_ps (void) { return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f }; } /* Perform the respective operation on the lower SPFP (single-precision floating-point) values of A and B; the upper three SPFP values are passed through from A. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_add_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sub_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mul_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_div_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sqrt_ss (__m128 __A) { return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_rcp_ss (__m128 __A) { return (__m128) __builtin_ia32_rcpss ((__v4sf)__A); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_rsqrt_ss (__m128 __A) { return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_min_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_max_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B); } /* Perform the respective operation on the four SPFP values in A and B. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_add_ps (__m128 __A, __m128 __B) { return (__m128) ((__v4sf)__A + (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sub_ps (__m128 __A, __m128 __B) { return (__m128) ((__v4sf)__A - (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mul_ps (__m128 __A, __m128 __B) { return (__m128) ((__v4sf)__A * (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_div_ps (__m128 __A, __m128 __B) { return (__m128) ((__v4sf)__A / (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sqrt_ps (__m128 __A) { return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_rcp_ps (__m128 __A) { return (__m128) __builtin_ia32_rcpps ((__v4sf)__A); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_rsqrt_ps (__m128 __A) { return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_min_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_max_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B); } /* Perform logical bit-wise operations on 128-bit values. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_and_ps (__m128 __A, __m128 __B) { return __builtin_ia32_andps (__A, __B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_andnot_ps (__m128 __A, __m128 __B) { return __builtin_ia32_andnps (__A, __B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_or_ps (__m128 __A, __m128 __B) { return __builtin_ia32_orps (__A, __B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_xor_ps (__m128 __A, __m128 __B) { return __builtin_ia32_xorps (__A, __B); } /* Perform a comparison on the lower SPFP values of A and B. If the comparison is true, place a mask of all ones in the result, otherwise a mask of zeros. The upper three SPFP values are passed through from A. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpeq_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmplt_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmple_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpgt_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_movss ((__v4sf) __A, (__v4sf) __builtin_ia32_cmpltss ((__v4sf) __B, (__v4sf) __A)); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpge_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_movss ((__v4sf) __A, (__v4sf) __builtin_ia32_cmpless ((__v4sf) __B, (__v4sf) __A)); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpneq_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpnlt_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpnle_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpngt_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_movss ((__v4sf) __A, (__v4sf) __builtin_ia32_cmpnltss ((__v4sf) __B, (__v4sf) __A)); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpnge_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_movss ((__v4sf) __A, (__v4sf) __builtin_ia32_cmpnless ((__v4sf) __B, (__v4sf) __A)); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpord_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpunord_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B); } /* Perform a comparison on the four SPFP values of A and B. For each element, if the comparison is true, place a mask of all ones in the result, otherwise a mask of zeros. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpeq_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmplt_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmple_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpgt_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpge_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpneq_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpnlt_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpnle_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpngt_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpnge_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpord_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpunord_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B); } /* Compare the lower SPFP values of A and B and return 1 if true and 0 if false. */ extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_comieq_ss (__m128 __A, __m128 __B) { return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B); } extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_comilt_ss (__m128 __A, __m128 __B) { return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B); } extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_comile_ss (__m128 __A, __m128 __B) { return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B); } extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_comigt_ss (__m128 __A, __m128 __B) { return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B); } extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_comige_ss (__m128 __A, __m128 __B) { return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B); } extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_comineq_ss (__m128 __A, __m128 __B) { return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B); } extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_ucomieq_ss (__m128 __A, __m128 __B) { return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B); } extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_ucomilt_ss (__m128 __A, __m128 __B) { return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B); } extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_ucomile_ss (__m128 __A, __m128 __B) { return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B); } extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_ucomigt_ss (__m128 __A, __m128 __B) { return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B); } extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_ucomige_ss (__m128 __A, __m128 __B) { return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B); } extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_ucomineq_ss (__m128 __A, __m128 __B) { return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B); } /* Convert the lower SPFP value to a 32-bit integer according to the current rounding mode. */ extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtss_si32 (__m128 __A) { return __builtin_ia32_cvtss2si ((__v4sf) __A); } extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvt_ss2si (__m128 __A) { return _mm_cvtss_si32 (__A); } #ifdef __x86_64__ /* Convert the lower SPFP value to a 32-bit integer according to the current rounding mode. */ /* Intel intrinsic. */ extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtss_si64 (__m128 __A) { return __builtin_ia32_cvtss2si64 ((__v4sf) __A); } /* Microsoft intrinsic. */ extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtss_si64x (__m128 __A) { return __builtin_ia32_cvtss2si64 ((__v4sf) __A); } #endif /* Convert the two lower SPFP values to 32-bit integers according to the current rounding mode. Return the integers in packed form. */ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtps_pi32 (__m128 __A) { return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvt_ps2pi (__m128 __A) { return _mm_cvtps_pi32 (__A); } /* Truncate the lower SPFP value to a 32-bit integer. */ extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvttss_si32 (__m128 __A) { return __builtin_ia32_cvttss2si ((__v4sf) __A); } extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtt_ss2si (__m128 __A) { return _mm_cvttss_si32 (__A); } #ifdef __x86_64__ /* Truncate the lower SPFP value to a 32-bit integer. */ /* Intel intrinsic. */ extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvttss_si64 (__m128 __A) { return __builtin_ia32_cvttss2si64 ((__v4sf) __A); } /* Microsoft intrinsic. */ extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvttss_si64x (__m128 __A) { return __builtin_ia32_cvttss2si64 ((__v4sf) __A); } #endif /* Truncate the two lower SPFP values to 32-bit integers. Return the integers in packed form. */ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvttps_pi32 (__m128 __A) { return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtt_ps2pi (__m128 __A) { return _mm_cvttps_pi32 (__A); } /* Convert B to a SPFP value and insert it as element zero in A. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtsi32_ss (__m128 __A, int __B) { return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvt_si2ss (__m128 __A, int __B) { return _mm_cvtsi32_ss (__A, __B); } #ifdef __x86_64__ /* Convert B to a SPFP value and insert it as element zero in A. */ /* Intel intrinsic. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtsi64_ss (__m128 __A, long long __B) { return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B); } /* Microsoft intrinsic. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtsi64x_ss (__m128 __A, long long __B) { return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B); } #endif /* Convert the two 32-bit values in B to SPFP form and insert them as the two lower elements in A. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtpi32_ps (__m128 __A, __m64 __B) { return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvt_pi2ps (__m128 __A, __m64 __B) { return _mm_cvtpi32_ps (__A, __B); } /* Convert the four signed 16-bit values in A to SPFP form. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtpi16_ps (__m64 __A) { __v4hi __sign; __v2si __hisi, __losi; __v4sf __zero, __ra, __rb; /* This comparison against zero gives us a mask that can be used to fill in the missing sign bits in the unpack operations below, so that we get signed values after unpacking. */ __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A); /* Convert the four words to doublewords. */ __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign); __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign); /* Convert the doublewords to floating point two at a time. */ __zero = (__v4sf) _mm_setzero_ps (); __ra = __builtin_ia32_cvtpi2ps (__zero, __losi); __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi); return (__m128) __builtin_ia32_movlhps (__ra, __rb); } /* Convert the four unsigned 16-bit values in A to SPFP form. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtpu16_ps (__m64 __A) { __v2si __hisi, __losi; __v4sf __zero, __ra, __rb; /* Convert the four words to doublewords. */ __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL); __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL); /* Convert the doublewords to floating point two at a time. */ __zero = (__v4sf) _mm_setzero_ps (); __ra = __builtin_ia32_cvtpi2ps (__zero, __losi); __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi); return (__m128) __builtin_ia32_movlhps (__ra, __rb); } /* Convert the low four signed 8-bit values in A to SPFP form. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtpi8_ps (__m64 __A) { __v8qi __sign; /* This comparison against zero gives us a mask that can be used to fill in the missing sign bits in the unpack operations below, so that we get signed values after unpacking. */ __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A); /* Convert the four low bytes to words. */ __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign); return _mm_cvtpi16_ps(__A); } /* Convert the low four unsigned 8-bit values in A to SPFP form. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtpu8_ps(__m64 __A) { __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL); return _mm_cvtpu16_ps(__A); } /* Convert the four signed 32-bit values in A and B to SPFP form. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtpi32x2_ps(__m64 __A, __m64 __B) { __v4sf __zero = (__v4sf) _mm_setzero_ps (); __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A); __v4sf __sfb = __builtin_ia32_cvtpi2ps (__sfa, (__v2si)__B); return (__m128) __builtin_ia32_movlhps (__sfa, __sfb); } /* Convert the four SPFP values in A to four signed 16-bit integers. */ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtps_pi16(__m128 __A) { __v4sf __hisf = (__v4sf)__A; __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf); __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf); __v2si __losi = __builtin_ia32_cvtps2pi (__losf); return (__m64) __builtin_ia32_packssdw (__hisi, __losi); } /* Convert the four SPFP values in A to four signed 8-bit integers. */ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtps_pi8(__m128 __A) { __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A); return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL); } /* Selects four specific SPFP values from A and B based on MASK. */ #ifdef __OPTIMIZE__ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask) { return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask); } #else #define _mm_shuffle_ps(A, B, MASK) \ ((__m128) __builtin_ia32_shufps ((__v4sf)(__m128)(A), \ (__v4sf)(__m128)(B), (int)(MASK))) #endif /* Selects and interleaves the upper two SPFP values from A and B. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_unpackhi_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B); } /* Selects and interleaves the lower two SPFP values from A and B. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_unpacklo_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B); } /* Sets the upper two SPFP values with 64-bits of data loaded from P; the lower two values are passed through from A. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_loadh_pi (__m128 __A, __m64 const *__P) { return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (const __v2sf *)__P); } /* Stores the upper two SPFP values of A into P. */ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_storeh_pi (__m64 *__P, __m128 __A) { __builtin_ia32_storehps ((__v2sf *)__P, (__v4sf)__A); } /* Moves the upper two values of B into the lower two values of A. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_movehl_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B); } /* Moves the lower two values of B into the upper two values of A. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_movelh_ps (__m128 __A, __m128 __B) { return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B); } /* Sets the lower two SPFP values with 64-bits of data loaded from P; the upper two values are passed through from A. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_loadl_pi (__m128 __A, __m64 const *__P) { return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (const __v2sf *)__P); } /* Stores the lower two SPFP values of A into P. */ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_storel_pi (__m64 *__P, __m128 __A) { __builtin_ia32_storelps ((__v2sf *)__P, (__v4sf)__A); } /* Creates a 4-bit mask from the most significant bits of the SPFP values. */ extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_movemask_ps (__m128 __A) { return __builtin_ia32_movmskps ((__v4sf)__A); } /* Return the contents of the control register. */ extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_getcsr (void) { return __builtin_ia32_stmxcsr (); } /* Read exception bits from the control register. */ extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _MM_GET_EXCEPTION_STATE (void) { return _mm_getcsr() & _MM_EXCEPT_MASK; } extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _MM_GET_EXCEPTION_MASK (void) { return _mm_getcsr() & _MM_MASK_MASK; } extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _MM_GET_ROUNDING_MODE (void) { return _mm_getcsr() & _MM_ROUND_MASK; } extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _MM_GET_FLUSH_ZERO_MODE (void) { return _mm_getcsr() & _MM_FLUSH_ZERO_MASK; } /* Set the control register to I. */ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_setcsr (unsigned int __I) { __builtin_ia32_ldmxcsr (__I); } /* Set exception bits in the control register. */ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _MM_SET_EXCEPTION_STATE(unsigned int __mask) { _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask); } extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _MM_SET_EXCEPTION_MASK (unsigned int __mask) { _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask); } extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _MM_SET_ROUNDING_MODE (unsigned int __mode) { _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode); } extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode) { _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode); } /* Create a vector with element 0 as F and the rest zero. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_set_ss (float __F) { return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f }; } /* Create a vector with all four elements equal to F. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_set1_ps (float __F) { return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F }; } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_set_ps1 (float __F) { return _mm_set1_ps (__F); } /* Create a vector with element 0 as *P and the rest zero. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_load_ss (float const *__P) { return _mm_set_ss (*__P); } /* Create a vector with all four elements equal to *P. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_load1_ps (float const *__P) { return _mm_set1_ps (*__P); } extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_load_ps1 (float const *__P) { return _mm_load1_ps (__P); } /* Load four SPFP values from P. The address must be 16-byte aligned. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_load_ps (float const *__P) { return *(__m128 *)__P; } /* Load four SPFP values from P. The address need not be 16-byte aligned. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_loadu_ps (float const *__P) { return *(__m128_u *)__P; } /* Load four SPFP values in reverse order. The address must be aligned. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_loadr_ps (float const *__P) { __v4sf __tmp = *(__v4sf *)__P; return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3)); } /* Create the vector [Z Y X W]. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W) { return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z }; } /* Create the vector [W X Y Z]. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_setr_ps (float __Z, float __Y, float __X, float __W) { return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W }; } /* Stores the lower SPFP value. */ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_store_ss (float *__P, __m128 __A) { *__P = ((__v4sf)__A)[0]; } extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtss_f32 (__m128 __A) { return ((__v4sf)__A)[0]; } /* Store four SPFP values. The address must be 16-byte aligned. */ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_store_ps (float *__P, __m128 __A) { *(__m128 *)__P = __A; } /* Store four SPFP values. The address need not be 16-byte aligned. */ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_storeu_ps (float *__P, __m128 __A) { *(__m128_u *)__P = __A; } /* Store the lower SPFP value across four words. */ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_store1_ps (float *__P, __m128 __A) { __v4sf __va = (__v4sf)__A; __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0)); _mm_storeu_ps (__P, __tmp); } extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_store_ps1 (float *__P, __m128 __A) { _mm_store1_ps (__P, __A); } /* Store four SPFP values in reverse order. The address must be aligned. */ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_storer_ps (float *__P, __m128 __A) { __v4sf __va = (__v4sf)__A; __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3)); _mm_store_ps (__P, __tmp); } /* Sets the low SPFP value of A from the low value of B. */ extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_move_ss (__m128 __A, __m128 __B) { return (__m128) __builtin_shuffle ((__v4sf)__A, (__v4sf)__B, __extension__ (__attribute__((__vector_size__ (16))) int) {4,1,2,3}); } /* Extracts one of the four words of A. The selector N must be immediate. */ #ifdef __OPTIMIZE__ extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_extract_pi16 (__m64 const __A, int const __N) { return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N); } extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pextrw (__m64 const __A, int const __N) { return _mm_extract_pi16 (__A, __N); } #else #define _mm_extract_pi16(A, N) \ ((int) __builtin_ia32_vec_ext_v4hi ((__v4hi)(__m64)(A), (int)(N))) #define _m_pextrw(A, N) _mm_extract_pi16(A, N) #endif /* Inserts word D into one of four words of A. The selector N must be immediate. */ #ifdef __OPTIMIZE__ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_insert_pi16 (__m64 const __A, int const __D, int const __N) { return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pinsrw (__m64 const __A, int const __D, int const __N) { return _mm_insert_pi16 (__A, __D, __N); } #else #define _mm_insert_pi16(A, D, N) \ ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(__m64)(A), \ (int)(D), (int)(N))) #define _m_pinsrw(A, D, N) _mm_insert_pi16(A, D, N) #endif /* Compute the element-wise maximum of signed 16-bit values. */ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_max_pi16 (__m64 __A, __m64 __B) { return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pmaxsw (__m64 __A, __m64 __B) { return _mm_max_pi16 (__A, __B); } /* Compute the element-wise maximum of unsigned 8-bit values. */ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_max_pu8 (__m64 __A, __m64 __B) { return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pmaxub (__m64 __A, __m64 __B) { return _mm_max_pu8 (__A, __B); } /* Compute the element-wise minimum of signed 16-bit values. */ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_min_pi16 (__m64 __A, __m64 __B) { return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pminsw (__m64 __A, __m64 __B) { return _mm_min_pi16 (__A, __B); } /* Compute the element-wise minimum of unsigned 8-bit values. */ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_min_pu8 (__m64 __A, __m64 __B) { return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pminub (__m64 __A, __m64 __B) { return _mm_min_pu8 (__A, __B); } /* Create an 8-bit mask of the signs of 8-bit values. */ extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_movemask_pi8 (__m64 __A) { return __builtin_ia32_pmovmskb ((__v8qi)__A); } extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pmovmskb (__m64 __A) { return _mm_movemask_pi8 (__A); } /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values in B and produce the high 16 bits of the 32-bit results. */ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mulhi_pu16 (__m64 __A, __m64 __B) { return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pmulhuw (__m64 __A, __m64 __B) { return _mm_mulhi_pu16 (__A, __B); } /* Return a combination of the four 16-bit values in A. The selector must be an immediate. */ #ifdef __OPTIMIZE__ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_shuffle_pi16 (__m64 __A, int const __N) { return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pshufw (__m64 __A, int const __N) { return _mm_shuffle_pi16 (__A, __N); } #else #define _mm_shuffle_pi16(A, N) \ ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N))) #define _m_pshufw(A, N) _mm_shuffle_pi16 (A, N) #endif /* Conditionally store byte elements of A into P. The high bit of each byte in the selector N determines whether the corresponding byte from A is stored. */ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P) { __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P); } extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_maskmovq (__m64 __A, __m64 __N, char *__P) { _mm_maskmove_si64 (__A, __N, __P); } /* Compute the rounded averages of the unsigned 8-bit values in A and B. */ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_avg_pu8 (__m64 __A, __m64 __B) { return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pavgb (__m64 __A, __m64 __B) { return _mm_avg_pu8 (__A, __B); } /* Compute the rounded averages of the unsigned 16-bit values in A and B. */ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_avg_pu16 (__m64 __A, __m64 __B) { return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pavgw (__m64 __A, __m64 __B) { return _mm_avg_pu16 (__A, __B); } /* Compute the sum of the absolute differences of the unsigned 8-bit values in A and B. Return the value in the lower 16-bit word; the upper words are cleared. */ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sad_pu8 (__m64 __A, __m64 __B) { return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B); } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psadbw (__m64 __A, __m64 __B) { return _mm_sad_pu8 (__A, __B); } /* Stores the data in A to the address P without polluting the caches. */ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_stream_pi (__m64 *__P, __m64 __A) { __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A); } /* Likewise. The address must be 16-byte aligned. */ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_stream_ps (float *__P, __m128 __A) { __builtin_ia32_movntps (__P, (__v4sf)__A); } /* Guarantees that every preceding store is globally visible before any subsequent store. */ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sfence (void) { __builtin_ia32_sfence (); } /* Transpose the 4x4 matrix composed of row[0-3]. */ #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ do { \ __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \ __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \ __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \ __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \ __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \ (row0) = __builtin_ia32_movlhps (__t0, __t1); \ (row1) = __builtin_ia32_movhlps (__t1, __t0); \ (row2) = __builtin_ia32_movlhps (__t2, __t3); \ (row3) = __builtin_ia32_movhlps (__t3, __t2); \ } while (0) /* For backward source compatibility. */ # include #ifdef __DISABLE_SSE__ #undef __DISABLE_SSE__ #pragma GCC pop_options #endif /* __DISABLE_SSE__ */ /* The execution of the next instruction is delayed by an implementation specific amount of time. The instruction does not modify the architectural state. This is after the pop_options pragma because it does not require SSE support in the processor--the encoding is a nop on processors that do not support it. */ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_pause (void) { __builtin_ia32_pause (); } #endif /* _XMMINTRIN_H_INCLUDED */