Zm2OSl>41&-{h|?YVVPan`tiW|Ub^x5F
z4G@+LJgq-Y0C5Fnb8~@ZcI__);X^qF%9gjWckH(z#%Lu4R_YowU01vAfcU2JiiZ$V>k@h!25`dk?-!}yfW
zCY_{|0mUNp^2&c;$KcY8UsN&Lvd*@DV0S5s+_tZF3-L-K5+(n--9z|1k1lQH3^&Vb
z&xL>2ZjYrTX06!gxmb6~DbTda(vgOiZxC4#uuEwBJ?m3IN~R6O1C7{x1{#^
zteZzTjGWsPh(s=MZ^Rt%hw|t}$j0IJVIx70vR{1i)AD@xa_>Elq2V#w>cfByJwxGk
z9)D8wy#&1ZB_CqIsQ7@OLXvXPrk;nW$}b>|km+O&&`i-mZuP%TC7Z~~gOJbg2c%f}
zo`%T{jV!sfgoWjh6vN!qFGVUtR
zwcl*e$AHis`i_%Xjx&tB@qo&8$2lCkgQBPEI9}9t97*yiUnfHToATzC97j?Ik_19I
zj#A0Y0v)QP`~?yOLXuJ&Z~%pQ#4sCayeIAqEb
zpoBPoq)lN(5PzUHqhJx?;EWZaAz?vJE1o{n@a>H>N@c$(p3{QzO%Rm7iR?L@zE$(b
z?LpxAZa5Klu7MqN7)j1eX5KHpvZ+_dYHk=HGKXnK=%yB29fVf4WY)r9&ULl7Ze^Bn?b3@zE
L8XvH?z%~9KyL;g`
diff --git a/src/Search_2D/__pycache__/env.cpython-37.pyc b/src/Search_2D/__pycache__/env.cpython-37.pyc
deleted file mode 100644
index 945aa4dd30ff0b41097d4b473bf4321682bfd244..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 1434
zcmah}&2G~`5Z+xovFrX&Ri)|y6$@2@94Jt3QH3fR)N&xIRG~^y(Bfv@#wD@SU8hQ;
zL_(wwz$3I5BwmCocOT(GUV#hDtR449NNl?^s8KyAa^f~lT@5OhE|
z90(*FNa)SN61JEU&XEnP;JGj^30S5e061M8KZkwe`yfhe{0GJuOt2-~pNugB*ac9s
znA)gr*yw~9y^fnE@tKWEgOv)2)#K6&kNm_sn;7jV4x&&cR&=0v+v_9-7$usR%5NQh
zC2|(F|NgX9uD?~jRP~*#nl-3qz2d76anz}A2XV7|@VK_~roQXN&E1w4hEVeQp6|&<
zlOK4>7yS0>dgaO58vg$$s_uuUYn`*i<~#_4m~*VS(mbI>__OI8?ax44)+PZBY%C7c
zICXz;#~|$wR-RT%R8GShlxqf*w)JgOF$Zgj)9nZ^_BoW~4ty&OO#5?~?aGz&0XuZ=APMay^i0zvp}{StNX)SSQz0BFA~B0>jxGQ>mvTL?zZY1l7g>KAiIhFO
zYN?1E552a}dE#*1jzqVG_6+AIU9XjTB;G;d$#u?a=fIaLk+u?lHSj(P$!JsCvJJgf
cEwRMnTtp4d^}@e)C-z2~`33%RXv&+$U$g)@-2eap
diff --git a/src/Search_2D/__pycache__/env.cpython-38.pyc b/src/Search_2D/__pycache__/env.cpython-38.pyc
deleted file mode 100644
index e45c75b8d0504fc65225c15ff892fb552c211b30..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 1405
zcmah}&2G~`5Z+z?#!X9Am8#-Ud_V$U8dM|>6+)FLDqNyU6&xa=m78@Nm&8ujcGX7C
zDSZSUpgnTqz;o~bd*x&vflHaO6E{Ifth2MT`^}F%^KCvXFFOPVZG7iVosgfnnI9T#
z_FyI(AOz`<10twk*F>-h>rkN`5ln_u%8XoZJ2LhCeJ|#niCE$
z1`=LOlmUb#Y|#+TsST%q+-SE1G+ht~y3{USfM4+#gvW|I7zL=HOQ;1YFaziYEYhrW
zTo!Z$!KOS%rBIz8^(U0wnrtN
zL5-GRDfRLbE3ph`%E^YpPXi8hcmVsP3{FQYnDHK3AOf}k>tE(%(6pSKkul^wqhpq`
z2^njtmeUE{C!f~Fdam8*xi07lyQP$zKdP~f8J07_Hpvu!2{@bg1)Awo>|>41eBo|v
zk{Xw3$>s@L$ZSl#zW{11L^f;BZ-@zN-cwjL`U6mV!Lw0gzjC~j&gjT-r{g{JGxYo6
zC~%>wso(7f$y0Y2CvXhptL=7WkY+O4b%BU(d~$DGTijoT0{d7zM1gH8)=_MsSVi#+
zgl9+#7KubsS`wtnsOYcxLI8*qQ1nvKtC9$chJSHEHoyWIm|!?!A`MN}q^q<^*Xwnq
z+^@3PI$A=iOz%_MGm+7eKL|Ki4(EedWPQ||oS$cYze3~+#FzLXWzMy&t;Q7J37-C?se`I2;Tl
zJt0E-fuBYX`pU_V|&X1cH|dhx6#}-t&4L|Npbs951&RFGfSF)9Vj}rWR-@~*MmH
3i#-?4flp8(F(v*S;P)g*HJjR&trfAB$CSmn
Xh`XE@fxEJm{c5)JYb;ZcS~vd!GNH!Ld#tF63G1rDsHt!(9>b&6#*T~$(L+n91M8k>)*)X|7g;`~o`9N9n=)yc
zW0h@M5na?$c?Tu?GOCdnNokF&fQrcFp?yS>99i4)&|%qC7TzY-8=jGS@1oq)5>FcY
z+r8apDQ#szH|!f5Lc=yr7A2e7!MJu&ePZbfnoq-b-q`qI7U^u`*2Kb1Sa{=llsZQgos<93*DO^v&8qRFSdm#=U1C;F}R_f0tn;$EBw!D)Oj^H5o$E_^XO
zJ+;ExbVi%jPZzaR*c=2-m;Gh27}5I+d^&cf&kRd*`vPjMTPZLi!u7%ygPf
znyEx)B8#&f??ZT4r6X(XbgZ$v_oH0q_DI~r-*xN58)FaioRJ)P=dF=L^^t_a?xCkx
zb&Qta5$Y%7xP|;0-ZYVX>yxPLoszNu1@uW#j6w*NKdsL_J-{uwJG)Za*9Vz^AB#JZL-}b<^EQ6Wki5
z0wQ2FOFu@{Dov5?q*)`g2ko@mihGd}JI764pII|VbRIs9=duf^$nIs~$Q7{)sV-^^
z<{Ek8>}Nf}&z|g#1n)ZTjtx0>;Er1lx#RVd-GOH(hKB>-3b@9GlR&_VkxCB0U3q3;
zd;JF;QoyqQdbLq0EbPxMtj}UZpQDPc%N8XQYr@>;nbT<}!LFgYub-f~L@f)Oab<4G
zP=`U5hx#SV$mla<&Keu@jHt`u#S=_8eWp#sMHjW?YDV>o5ZH{)1Y6js*tEbadLOk_
z3*}EOZqlF?7a(Gl89eSBJjUA%Hd7r@+X~w|jHA!dvlRy+>G=j$H`}fZl9M!?&Ytlf
zX-8VJhC>JxWe;dSS=SL%#uCA5@!rk_bhWk-!mwkhbpZ&E0a1)NlM#eBjFe_0u$3xL
z?f@s=l>+;`LK0J(zup#D0bRtRw6yn8GTKQUeC)&8Sd0)!&s)EecN~gHn&z`{8q&zM
zuNkp-%v7KHZ_=j5?(1K9i(W==_N?J^GxxJ=h)UPqOwx9kWLF?eANgMw^k88@`Z-$M
z^V1d1VX7(YzKt?jOrb>s9Y(e?BReSBj&Y+qgD?TH5VsZ-fkCtz_3{}?
z$om8^hAj~sS%bg1l4D=p6FsS{jsSG+b>l@@JM2gNHGIX!YmF1_#W@m6m*0ELai;uc
z5_VfEyz&8F{Vx=2`252-p2YS=C>KvWurd1q?W6~^zz0uJML6vI`;&kE<-OMJtDT>$
z{^91|h6h(0et|HGL7c)d_VgNVjooQ)8m}GpcEik6yZr=lt=Eg%c|qsI-wm%IGY*0m
z=^@x}(93xz7!NVElW!fCyr9e+$*{g%imHiQ3|hm=GyFegbLMj*}+mx0LA`yBqG!IaI$zpQZ4x
z2^kNe^Gno06xb|-Cpix+B+dj+$awV^F*!%ch|uT6xgu9VoX=2^M4n8U=N_eHo1UeM
zLc=T@q;*g>Sb(VGj*;ilPZb0R+#%AB#3uckQbjDw;shrlru5swnDD=i3@5S<#&m2px#@C9013kW~H!3Lm^JDMj(;
zORey|~%WCR%%rTWLkD@}Il8U>m%y((PSE=eu
zQ2c%BK)N>Qnp53K#e*(u&PJ#e9sPmsBYE?wZ#PbpC@(Y?n)vCsvh?G^Z%~dYJ5?IW
z%7vIXS2no3jx9j@54(cc^+scvekq~De-Icy2)d~nBqY~@fHKZvL{~9a*Qk1&swb)X
z0#(maMW^P4(;LCi)Nl^T<90W~FKJ4XcQmbfL6iCZvoS!7%Au_+%5K>%d*$VF6}9g_
z;n&M&YIRMs8g)|&0+qJ0LYZd9&h|5N8oyBx<0f$nzxrO8*bSKsPr1dXR?7zaIzf
zHb$=1L~e%dod%7aK1|%)!kO+uga!;T3M2Ln&s3(eaJ7o5dA2I4GVUE!Q7*0}HKVGy
zmes79!?mKWsCjjEUwE#1UM;F~&}P(zTwj;GI?Mgk|z7cmOQXnx0Uh0I1=kMCzreL;Jm9P
zp7hMO+dEzuh2n=z-krc2tmsK0LC7`;=cVde4o2c+%gXSB?(S
z$H93OH+|#4=$nLWXb99$lYL-b7@0`{0_w+8v-1kMz;EhE#(Q@L4n|ph*>^4)eT(Fwgwghaqu6}(mJl0iCkNSu
z{&jlKc+A7Az;U^6Z#(+uefyw7cS9$Z`Zhb!K5$1)tjYD}?|w_ipKjJ>^eiTtR~t#3
zXg`k9%)ge~I%qe;+>FDvoSCbXu~E>eryK8?7O}uw1cpfmpOYeZdwkI5WLe{d!bDi4A86
z*%Jum&W^v4=#Z}Ups~$pdWN<2$PdWcw=lo)q9wSTYUgP5F-9aC$yT$JqN
z7CSe-MK7bbI!FY)m&b76AQ82$UXPPT5T`ewj2`+d(|R(VmVRY4!3kzHWq@x_&!sSf
zke17On#*SD$#mPZ!-w4<2G0=97K~)kTqjvLx3egVL(d9!qjX663>?p<^u1eL_D=YO
zv3ZLoP^^-9F)x?Ja*;(Rls|S$ZZ&nVcdXnX(x+K5bp8y5K+_kXuJJGc&iWzNG}E1SKH}eAb5+XG8nYp!$%CD^=R(bkR4n
zGNlw%zGQ40`h8Uy*zM=Xt?r^R>^V>~=&@9FVE4_wy#$bNOZ^6NY(>q|93K|?+!%^y
z)~DhjlA6qIX0qIA1$sNw`PuDo&(~obqHy%Xov@u90|^D6@XOdG&LeN|uP$U*V2?#x
zDx)cYUvoWo!n6@|LjFZy*u<-IE8LB;BZ2tSyH5l!FJF&?R$T=*evDWD8>i8G3$l`#S>3VjDAAP=?P8n$@3(rSp4YYqksM7-TM8TAN_3c4|o68+rL?J3zSh>;)0Ly
z$K0Bm&BjLVG=la{kmfV3PK;F7ZikJmV0fbRno}?v=fz8CARl&NObxo{Ak8+j%@dY!
zYWN=d)0ZI%@+JbeWEoj;8F|q8#4_iFgY4)?%cAVqJ9nz-xzKlxWk$j)o#cS@0OA+M
z2Jb}n%bAgh4?uTRoPhK!?jhnk*#rEf_cFOXy_QIqMsC5{#D7O)HOO==?
zEmZ~`R+s>b9&8Y}$L1b)*yE^~F$$}v6Cyb@B~URT&JQs4J{HY{6g4@IJ$ocI#Woia
zh?1)m&Ydpx69rMiekvVO1v3qxXVT&SpVSxWNBQR|!1~_!c03y@SH`o+#q0VVG+^yj
zV$@KNA_cg-PERql5>@Iikys^h8v-;rDKa?2QXMd%O0G0Uuw-u#EIn%@4w|0B1>5fM
zI^;{9B+rk^l?|LJz1IK~m!%~Zt3fW3K!MlxJ|IWUiFVpv;v;
z;S34hkl@LIm01}vne`Yg;Ls1iYq$po_A_UT3TH~a#G{1i^SF_w9j#ugRcR^`A|?Tt8j6zY214fR*i_}MsBe~sS!CW&v5V9ZT$e;sZ6)c#F~-m9mQ
z2kNwG`vHHzfI5FA#s<)G&_Et8eW9(%3wq;{|b!@
zz*EXyeHDW2sS95C#`X-}kANSufu<*Z-gEH+G1is9eKmOZUcZOSLxkYi>O
zkx)=6rYZS)V)m5D?f}b`&DB4^7}%19VPRHDMJ+b!`_$p8L^X%-eLxK;)w(V3Oe<7T
zx5XY{XH<)ZPFHtOxVhA}6D4t2Xm+Cool
z3dA>XY>AIKQx$=?}?yMA=
zM%L!?lJBdefmzBV&CPT#_0Hm>1rcr>)$v(~G|)F+i^WGApF2UP6Sh^3SZOjWF9r3K
s7@B;r5wYEs@pmTv{Kda`+N4o=6~4Mb(BWKQ*;JZTam=duKZnt<8
diff --git a/src/Search_2D/__pycache__/plotting.cpython-39.pyc b/src/Search_2D/__pycache__/plotting.cpython-39.pyc
deleted file mode 100644
index c381ea6565fe0c10189368e3029be1494803ece1..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 5332
zcmbVQ+ix6K8J~0Jva{p0P$ZsbgrYMKSiYs-%vuyMEHVR=}T_Sw&mNxh~$Q|Ux@^2
zm+NZe9f$*=%#+4}5VY4y=GryCCXCyLvX!%EDCeQ{=asx`)GPfLygNxZyO4FeNqXVZ
z4exrmm92O6lJ2023|wMy1}TUC^czN4zjLt9aEs)p88^J)QY
zRh?Ih>iC}UJ@t%QR!@VPQzz6(%+;cKbxNJyGad?m0rVMlmg!^a97KJ#a7oe(oHmK0
z(L*$q@1mq%MU{(ON+UM{Dk78n<^fSMH#X$H#k8%AyT&82T6YS$^L>=tT4HBm-t27q
zm2M*q+F`FSLDbE{N~2^=+nCo5sz-(<`~N4udHLR*Fk2s~@6~pgW|2N}>_~mz8;)MO
z@g%EER^96j#b4>|7H$y4oj3~uc%YFIUxru^p6DMRDQRW2qDi8S9-^tRwnz?W3z{N*
zXbauC&@MGnSJ5!d;+EAM#tLt%r@|}>pq&%wr~G=+e`@^0lldmSa`X^AHq^`LRONaH0ou6lO2UV6*k^gSy2+
z6c&$HMr*B3*p6m}s)n7o9cFR2a~hnbqyq!$q`&^sShDe=LxRzPCkwDne+A;OUvV3n22+&OLJ7S#t5LfiX}V(~Fsfo*7<6l9n5TlAjsn)?^w
z({65W8~W$DyGe?W+mGD-bjgcx!U^eZ|M2cwfdZ10HZ~%*-g4SNa8dLE)}*8
zJFTcNlc=MQVOo=O6?QM&N+Z2U19S=sM@Q}McBGfksaHxzd5UJ2sd}20$Y$c#NbNzh
z+it|2s1Td8Dqk3DGe~q6K8teW)i-bBOt6lOH}
zSE*grD5+SJ*iTZY)l7ozg6f_=PiyJqP94dmi7G=K25A=R*RUcb82j%sYCBrX$N<{R#+Hqc!v|99JA&cq~T0_`sGbI5vG$gGOAs5(~~QWKUeEaJGZB
zu8yc}gw0L%rYA_-hy$Q*s4s)R@u4MmIa1Ei>O+V~GLoz;XCDNG=RL@^Vcb9@8c0OA
z>Asl>NNj8%?&0K8Yr`=dc8qey8qOdlVx}^lfyR_O?7^-0dSk6|#{QvX%NW
z-Sq9~{#KX(UkHm!B(jLlNhB`pEY6b1x5Aw`9fCbOjBis2-zl5j*&nAvaUErNIu^yE
zJR{DO(Q^XzW0e$EONYCMQVs%p0)2LwKZ77=$pv8eL@p4?&?}G0295?sUm1=vW+*X0
z7(fX4aA3h9Bp}_)%nd8Il%Ptl$k#-^b^
zRMmmqd1gH7EgQp`eKm&}OVtK;Zszt0IQOR1Zz8r<)jaWdzoZw&E_iHp${wOymxav;
zRygfYZ$`Q}z8UQXI!YoWk3qB@b+SVaLWU>5WuGO$BW>`nE@e1a4@E~Rqb1A!T?jfY`=+LOy2k4nM({qLiH{pG#Jook=|Z26D3|JL8TR`*ITJBwLP`q=;U*U>G^R&%Xz
znqg-kd&r!S%^
zL7RZvk}jg*8N@&5Gs|2Q4x*wXEsLUJ|LG$|FND5zC?b--QXvJT`>1|ltnqDR&&`ZX
zd<2-Iqy%7R@eBdp>VoLMaM!QiwM+c>vB_H9^kwAXadb
zZ_qTuDM2L1b^SU~H)&~QDyiUzN1Qlsb%ymX+}~G;0sgesaGMjeuJtvQPoZTL!#N9-vm`G
zG`qU9mUJIPx{+)}`YjB8IV{z0)0-&?(%+)W8&_<}tLX005QW?R%SVC+^0H~KA3`R-
z=mu?NZ>!D1A}6zSAPch{?o3cp-=aOYsrnvO)2R474ZMKLcY%bQ?v)fLd`G0W`Z+Al
zP|^#iCeXMfmjI2=Q;{V+GAN$Sj^SB
zT{vl+;L79M57whF^J_`mTF=tWT{2uz)!p4NA=0m=3GT*}G1P6gz3+7SR)wz|QPdmm
z@J)?Nt7x;KaM!*7M)X&xnz0;RihhYw-teVsyY|#YdvdinB-$thvuHK!hDa=0PC2Gr
zMRa=~qu@1*qLS^3ird7r{t-=hs;gQ*rvw6d+?AIu%h02ib+*A~s2ozje#eO{&Xe;s0GQkI_z+4LPT1>39aX?CnglBU5-NXmyyUXq!
zQX;>>2f>g1O8eR;zCxcmvuDSb#J2cT*4n>wp5N^3Z)SdPw_60-uzrodb_w|hH`C_8
z;vRH;7Y0EDjY&|>G@uWNV8VG%g!7IC40bi)!p?=A1G~Ctz^(zinxM}}$NLC6NVkvU
zv@uxRgRXytL6U$95->pnr_>Dpv^oQ~)E3l*Cz|g`&=4)qxMG@53t7mWKX#rRy=?i=
zw&PIiE&uq~9^RL_uc9oE(xf8tBkc>Rqy0n*e=k-3PNa0c5hWYtA}w-X6|vm3I{FkW
z#D+7Py$xNj!H|;=bWBEsGeN*8+U+=IrJu&J>?5K!u8;?XG%gH<>`+tFGT!@}s0RG`
z-!zkJJgF#yKz~nc1n)Mz;`Ji~U5j=d{+YR%p;n+W$Q}MFM+?0m4gRcGmBPnT=sAVXsN4=YqtLqw
zU53urXt!6ZG0(xDJ^uzV_6TF_#hDnx;{`E>qBAk}DEyy7Od(xV4Tr@Pw9dSOPC7w+
zwCpR0uh8SmQ1SJCuAr8*x2RuCd$kG*C!AMpG{90}#s_4})e2}yc?#X`7bK47(~y
z)zM#P=xxsz@Y~9KB~u0YNBc6CLz(2-FEnQHw_#jJ|IZ8+NBLnHIg1m|J{FMUOh=co
z3lqd|=%NXy*Jn5Q)}+2sfjwV=Em@5bi4A9r$!+NRG7MNDA+xd(8__*BX0w#;7LHKiZ<^
zC?ub0SZmU3*)yP?E$H*WArMYHHR`tDSSGQ?lm%_sGn=MNilJ0tF7H%OE>^C1{6V+V
z{N$kf36xd*ILja3gWwt>VWOGU6R0RC>9}RWMfcbeqc9?^Xr4CX-7f3gUA(%+g5^=q
JSzBLQ{|}hB_<#Ta
diff --git a/src/Search_2D/__pycache__/queue.cpython-38.pyc b/src/Search_2D/__pycache__/queue.cpython-38.pyc
deleted file mode 100644
index 69c46c686152f84441b374e99fc01ccabf6439d4..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 2668
zcmcIlQIFe15Z<-pi<7$?rB?w31g-=^5i|jW1gfAq>8dM8RXB9w#T6{Bckklj*uGsG
z=}viTe+YTZJHKXMdFo%_iJ5gAn>&Y!12)<5?vB0R&g?fc-wy_T0*&^5V;?z${Em}m
zvthCiUEK%438yjf%O~~eL&7a?KPTLNX89J(I^2Po12Y?DTf7UiF3dWd9+RQ_0`?%2
z5M{GjFxiK$J_aF(PdV`|PJO%F9lkdE`cAo{zr|hNdrtf=@58R!y7yfn3UT<&;kOq*
z_C06^aiG*aZ@p|59|#r7D9fWXX{fwNd0eRIG!fi8Nu_rfNtNG?lDp*~Epks5u^5>>
zLJB9W8jkVk2hi0nh@AXN7i3PD#mO;Q(#g`++hH2VB1BZB9WKrap&bx~SW?|lB0gCX
ziC6H)Y&=b8Vtg@8i}AxK|GIcGJ__>bQ5+;ml$?%_MIgf|dlD$Y+2z$wAB;yM{J-oi
z13k*lwaZwPL^)&Iczm>j(2JXEmyIg*%S@h=r}P=+^k)l9wk8t2vb3}vi$r!|EL{|6
zVJQ$!J7SjQ=jgKv`IQ>k=t6TMcmW%Zk%-3AH(&su;OjYEPzq!`CK9zv%K+-FAj?F;
zVcLULzmud~Tt0xE75S^6t>6bWJZLuCjCPdZiwbtQg6(E$7K@X-)nNNn
mo
zcpphH6O8FTW3!Y4=U}|U*jW+8D@~U%o`#Ic9oR~uBP4DrUqey5*o@H65*-IoO%yJ<
zwBxuo{>*v^OMhwj@;>x`7lw~841c&1hPb>T3{iC@3?BvmO%xht$PSp*b}z<}@1V_S
z{5k;eMAr%cIY1!fbt@X)*d6E6H23SdP19n}C+d%PF
z!;pXIC10dg1aNs22m>H^F`+qf}n}
zbcLyAeFIgE&XXdQkW!Q0 else 0
- heapq.heappush(self.OPEN, (prior, s_n))
-
- return self.extract_path(self.PARENT), self.CLOSED
-
-
-def main():
- s_start = (5, 5)
- s_goal = (45, 25)
-
- bfs = BFS(s_start, s_goal, 'None')
- plot = plotting.Plotting(s_start, s_goal)
-
- path, visited = bfs.searching()
- plot.animation(path, visited, "Breadth-first Searching (BFS)")
-
-
-if __name__ == '__main__':
- main()
diff --git a/src/Search_2D/dfs.py b/src/Search_2D/dfs.py
deleted file mode 100644
index 3b30b03..0000000
--- a/src/Search_2D/dfs.py
+++ /dev/null
@@ -1,65 +0,0 @@
-
-import os
-import sys
-import math
-import heapq
-
-sys.path.append(os.path.dirname(os.path.abspath(__file__)) +
- "/../../Search_based_Planning/")
-
-from Search_2D import plotting, env
-from Search_2D.Astar import AStar
-
-class DFS(AStar):
- """DFS add the new visited node in the front of the openset
- """
- def searching(self):
- """
- Breadth-first Searching.
- :return: path, visited order
- """
-
- self.PARENT[self.s_start] = self.s_start
- self.g[self.s_start] = 0
- self.g[self.s_goal] = math.inf
- heapq.heappush(self.OPEN,
- (0, self.s_start))
-
- while self.OPEN:
- _, s = heapq.heappop(self.OPEN)
- self.CLOSED.append(s)
-
- if s == self.s_goal:
- break
-
- for s_n in self.get_neighbor(s):
- new_cost = self.g[s] + self.cost(s, s_n)
-
- if s_n not in self.g:
- self.g[s_n] = math.inf
-
- if new_cost < self.g[s_n]: # conditions for updating Cost
- self.g[s_n] = new_cost
- self.PARENT[s_n] = s
-
- # dfs, add new node to the front of the openset
- prior = self.OPEN[0][0]-1 if len(self.OPEN)>0 else 0
- heapq.heappush(self.OPEN, (prior, s_n))
-
- return self.extract_path(self.PARENT), self.CLOSED
-
-
-def main():
- s_start = (5, 5)
- s_goal = (45, 25)
-
- dfs = DFS(s_start, s_goal, 'None')
- plot = plotting.Plotting(s_start, s_goal)
-
- path, visited = dfs.searching()
- visited = list(dict.fromkeys(visited))
- plot.animation(path, visited, "Depth-first Searching (DFS)") # animation
-
-
-if __name__ == '__main__':
- main()
diff --git a/src/Search_2D/env.py b/src/Search_2D/env.py
deleted file mode 100644
index 9523c98..0000000
--- a/src/Search_2D/env.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""
-Env 2D
-@author: huiming zhou
-"""
-
-
-class Env:
- def __init__(self):
- self.x_range = 51 # size of background
- self.y_range = 31
- self.motions = [(-1, 0), (-1, 1), (0, 1), (1, 1),
- (1, 0), (1, -1), (0, -1), (-1, -1)]
- self.obs = self.obs_map()
-
- def update_obs(self, obs):
- self.obs = obs
-
- def obs_map(self):
- """
- Initialize obstacles' positions
- :return: map of obstacles
- """
-
- x = self.x_range #51
- y = self.y_range #31
- obs = set()
- #画上下边框
- for i in range(x):
- obs.add((i, 0))
- for i in range(x):
- obs.add((i, y - 1))
- #画左右边框
- for i in range(y):
- obs.add((0, i))
- for i in range(y):
- obs.add((x - 1, i))
-
- for i in range(2, 21):
- obs.add((i, 15))
- for i in range(15):
- obs.add((20, i))
-
- for i in range(15, 30):
- obs.add((30, i))
- for i in range(16):
- obs.add((40, i))
-
- return obs
-
-# if __name__ == '__main__':
-# a = Env()
-# print(a.obs)
\ No newline at end of file
diff --git a/src/Search_2D/plotting.py b/src/Search_2D/plotting.py
deleted file mode 100644
index 1cf98a3..0000000
--- a/src/Search_2D/plotting.py
+++ /dev/null
@@ -1,165 +0,0 @@
-"""
-Plot tools 2D
-@author: huiming zhou
-"""
-
-import os
-import sys
-import matplotlib.pyplot as plt
-
-sys.path.append(os.path.dirname(os.path.abspath(__file__)) +
- "/../../Search_based_Planning/")
-
-from Search_2D import env
-
-
-class Plotting:
- def __init__(self, xI, xG):
- self.xI, self.xG = xI, xG
- self.env = env.Env()
- self.obs = self.env.obs_map()
-
- def update_obs(self, obs):
- self.obs = obs
-
- def animation(self, path, visited, name):
- self.plot_grid(name)
- self.plot_visited(visited)
- self.plot_path(path)
- plt.show()
-
- def animation_lrta(self, path, visited, name):
- self.plot_grid(name)
- cl = self.color_list_2()
- path_combine = []
-
- for k in range(len(path)):
- self.plot_visited(visited[k], cl[k])
- plt.pause(0.2)
- self.plot_path(path[k])
- path_combine += path[k]
- plt.pause(0.2)
- if self.xI in path_combine:
- path_combine.remove(self.xI)
- self.plot_path(path_combine)
- plt.show()
-
- def animation_ara_star(self, path, visited, name):
- self.plot_grid(name)
- cl_v, cl_p = self.color_list()
-
- for k in range(len(path)):
- self.plot_visited(visited[k], cl_v[k])
- self.plot_path(path[k], cl_p[k], True)
- plt.pause(0.5)
-
- plt.show()
-
- def animation_bi_astar(self, path, v_fore, v_back, name):
- self.plot_grid(name)
- self.plot_visited_bi(v_fore, v_back)
- self.plot_path(path)
- plt.show()
-
- def plot_grid(self, name):
- obs_x = [x[0] for x in self.obs]
- obs_y = [x[1] for x in self.obs]
-
- plt.plot(self.xI[0], self.xI[1], "bs")
- plt.plot(self.xG[0], self.xG[1], "gs")
- plt.plot(obs_x, obs_y, "sk")
- plt.title(name)
- plt.axis("equal")
-
- def plot_visited(self, visited, cl='gray'):
- if self.xI in visited:
- visited.remove(self.xI)
-
- if self.xG in visited:
- visited.remove(self.xG)
-
- count = 0
-
- for x in visited:
- count += 1
- plt.plot(x[0], x[1], color=cl, marker='o')
- plt.gcf().canvas.mpl_connect('key_release_event',
- lambda event: [exit(0) if event.key == 'escape' else None])
-
- if count < len(visited) / 3:
- length = 20
- elif count < len(visited) * 2 / 3:
- length = 30
- else:
- length = 40
- #
- # length = 15
-
- if count % length == 0:
- plt.pause(0.001)
- plt.pause(0.01)
-
- def plot_path(self, path, cl='r', flag=False):
- path_x = [path[i][0] for i in range(len(path))]
- path_y = [path[i][1] for i in range(len(path))]
-
- if not flag:
- plt.plot(path_x, path_y, linewidth='3', color='r')
- else:
- plt.plot(path_x, path_y, linewidth='3', color=cl)
-
- plt.plot(self.xI[0], self.xI[1], "bs")
- plt.plot(self.xG[0], self.xG[1], "gs")
-
- plt.pause(0.01)
-
- def plot_visited_bi(self, v_fore, v_back):
- if self.xI in v_fore:
- v_fore.remove(self.xI)
-
- if self.xG in v_back:
- v_back.remove(self.xG)
-
- len_fore, len_back = len(v_fore), len(v_back)
-
- for k in range(max(len_fore, len_back)):
- if k < len_fore:
- plt.plot(v_fore[k][0], v_fore[k][1], linewidth='3', color='gray', marker='o')
- if k < len_back:
- plt.plot(v_back[k][0], v_back[k][1], linewidth='3', color='cornflowerblue', marker='o')
-
- plt.gcf().canvas.mpl_connect('key_release_event',
- lambda event: [exit(0) if event.key == 'escape' else None])
-
- if k % 10 == 0:
- plt.pause(0.001)
- plt.pause(0.01)
-
- @staticmethod
- def color_list():
- cl_v = ['silver',
- 'wheat',
- 'lightskyblue',
- 'royalblue',
- 'slategray']
- cl_p = ['gray',
- 'orange',
- 'deepskyblue',
- 'red',
- 'm']
- return cl_v, cl_p
-
- @staticmethod
- def color_list_2():
- cl = ['silver',
- 'steelblue',
- 'dimgray',
- 'cornflowerblue',
- 'dodgerblue',
- 'royalblue',
- 'plum',
- 'mediumslateblue',
- 'mediumpurple',
- 'blueviolet',
- ]
- return cl
diff --git a/src/Search_2D/queue.py b/src/Search_2D/queue.py
deleted file mode 100644
index 51703ae..0000000
--- a/src/Search_2D/queue.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import collections
-import heapq
-
-
-class QueueFIFO:
- """
- Class: QueueFIFO
- Description: QueueFIFO is designed for First-in-First-out rule.
- """
-
- def __init__(self):
- self.queue = collections.deque()
-
- def empty(self):
- return len(self.queue) == 0
-
- def put(self, node):
- self.queue.append(node) # enter from back
-
- def get(self):
- return self.queue.popleft() # leave from front
-
-
-class QueueLIFO:
- """
- Class: QueueLIFO
- Description: QueueLIFO is designed for Last-in-First-out rule.
- """
-
- def __init__(self):
- self.queue = collections.deque()
-
- def empty(self):
- return len(self.queue) == 0
-
- def put(self, node):
- self.queue.append(node) # enter from back
-
- def get(self):
- return self.queue.pop() # leave from back
-
-
-class QueuePrior:
- """
- Class: QueuePrior
- Description: QueuePrior reorders elements using value [priority]
- """
-
- def __init__(self):
- self.queue = []
-
- def empty(self):
- return len(self.queue) == 0
-
- def put(self, item, priority):
- heapq.heappush(self.queue, (priority, item)) # reorder s using priority
-
- def get(self):
- return heapq.heappop(self.queue)[1] # pop out the smallest item
-
- def enumerate(self):
- return self.queue
From 07215c01a86229d6898db18b6dad2725379d8d22 Mon Sep 17 00:00:00 2001
From: wangziyang <2890199310@qq.com>
Date: Thu, 12 May 2022 19:56:05 +0800
Subject: [PATCH 2/4] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=BA=90=E7=A0=81?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
PaddleClas/MANIFEST.in | 7 +
PaddleClas/__init__.py | 17 +
PaddleClas/hubconf.py | 788 ++++++++++++++++++++++++++++++++++++
PaddleClas/paddleclas.py | 572 ++++++++++++++++++++++++++
PaddleClas/requirements.txt | 11 +
PaddleClas/setup.py | 60 +++
6 files changed, 1455 insertions(+)
create mode 100644 PaddleClas/MANIFEST.in
create mode 100644 PaddleClas/__init__.py
create mode 100644 PaddleClas/hubconf.py
create mode 100644 PaddleClas/paddleclas.py
create mode 100644 PaddleClas/requirements.txt
create mode 100644 PaddleClas/setup.py
diff --git a/PaddleClas/MANIFEST.in b/PaddleClas/MANIFEST.in
new file mode 100644
index 0000000..b0a4f6d
--- /dev/null
+++ b/PaddleClas/MANIFEST.in
@@ -0,0 +1,7 @@
+include LICENSE.txt
+include README.md
+include docs/en/whl_en.md
+recursive-include deploy/python predict_cls.py preprocess.py postprocess.py det_preprocess.py
+recursive-include deploy/utils get_image_list.py config.py logger.py predictor.py
+
+recursive-include ppcls/ *.py *.txt
\ No newline at end of file
diff --git a/PaddleClas/__init__.py b/PaddleClas/__init__.py
new file mode 100644
index 0000000..2128a6c
--- /dev/null
+++ b/PaddleClas/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__all__ = ['PaddleClas']
+from .paddleclas import PaddleClas
+from ppcls.arch.backbone import *
diff --git a/PaddleClas/hubconf.py b/PaddleClas/hubconf.py
new file mode 100644
index 0000000..b7f7674
--- /dev/null
+++ b/PaddleClas/hubconf.py
@@ -0,0 +1,788 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+dependencies = ['paddle']
+
+import paddle
+import os
+import sys
+
+
+class _SysPathG(object):
+ """
+ _SysPathG used to add/clean path for sys.path. Making sure minimal pkgs dependents by skiping parent dirs.
+
+ __enter__
+ add path into sys.path
+ __exit__
+ clean user's sys.path to avoid unexpect behaviors
+ """
+
+ def __init__(self, path):
+ self.path = path
+
+ def __enter__(self, ):
+ sys.path.insert(0, self.path)
+
+ def __exit__(self, type, value, traceback):
+ _p = sys.path.pop(0)
+ assert _p == self.path, 'Make sure sys.path cleaning {} correctly.'.format(
+ self.path)
+
+
+with _SysPathG(os.path.dirname(os.path.abspath(__file__)), ):
+ import ppcls
+ import ppcls.arch.backbone as backbone
+
+ def ppclas_init():
+ if ppcls.utils.logger._logger is None:
+ ppcls.utils.logger.init_logger()
+
+ ppclas_init()
+
+ def _load_pretrained_parameters(model, name):
+ url = 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/{}_pretrained.pdparams'.format(
+ name)
+ path = paddle.utils.download.get_weights_path_from_url(url)
+ model.set_state_dict(paddle.load(path))
+ return model
+
+ def alexnet(pretrained=False, **kwargs):
+ """
+ AlexNet
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `AlexNet` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.AlexNet(**kwargs)
+
+ return model
+
+ def vgg11(pretrained=False, **kwargs):
+ """
+ VGG11
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
+ Returns:
+ model: nn.Layer. Specific `VGG11` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.VGG11(**kwargs)
+
+ return model
+
+ def vgg13(pretrained=False, **kwargs):
+ """
+ VGG13
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
+ Returns:
+ model: nn.Layer. Specific `VGG13` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.VGG13(**kwargs)
+
+ return model
+
+ def vgg16(pretrained=False, **kwargs):
+ """
+ VGG16
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
+ Returns:
+ model: nn.Layer. Specific `VGG16` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.VGG16(**kwargs)
+
+ return model
+
+ def vgg19(pretrained=False, **kwargs):
+ """
+ VGG19
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
+ Returns:
+ model: nn.Layer. Specific `VGG19` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.VGG19(**kwargs)
+
+ return model
+
+ def resnet18(pretrained=False, **kwargs):
+ """
+ ResNet18
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ input_image_channel: int=3. The number of input image channels
+ data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC')
+ Returns:
+ model: nn.Layer. Specific `ResNet18` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNet18(**kwargs)
+
+ return model
+
+ def resnet34(pretrained=False, **kwargs):
+ """
+ ResNet34
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ input_image_channel: int=3. The number of input image channels
+ data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC')
+ Returns:
+ model: nn.Layer. Specific `ResNet34` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNet34(**kwargs)
+
+ return model
+
+ def resnet50(pretrained=False, **kwargs):
+ """
+ ResNet50
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ input_image_channel: int=3. The number of input image channels
+ data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC')
+ Returns:
+ model: nn.Layer. Specific `ResNet50` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNet50(**kwargs)
+
+ return model
+
+ def resnet101(pretrained=False, **kwargs):
+ """
+ ResNet101
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ input_image_channel: int=3. The number of input image channels
+ data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC')
+ Returns:
+ model: nn.Layer. Specific `ResNet101` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNet101(**kwargs)
+
+ return model
+
+ def resnet152(pretrained=False, **kwargs):
+ """
+ ResNet152
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ input_image_channel: int=3. The number of input image channels
+ data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC')
+ Returns:
+ model: nn.Layer. Specific `ResNet152` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNet152(**kwargs)
+
+ return model
+
+ def squeezenet1_0(pretrained=False, **kwargs):
+ """
+ SqueezeNet1_0
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `SqueezeNet1_0` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.SqueezeNet1_0(**kwargs)
+
+ return model
+
+ def squeezenet1_1(pretrained=False, **kwargs):
+ """
+ SqueezeNet1_1
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `SqueezeNet1_1` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.SqueezeNet1_1(**kwargs)
+
+ return model
+
+ def densenet121(pretrained=False, **kwargs):
+ """
+ DenseNet121
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ dropout: float=0. Probability of setting units to zero.
+ bn_size: int=4. The number of channals per group
+ Returns:
+ model: nn.Layer. Specific `DenseNet121` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.DenseNet121(**kwargs)
+
+ return model
+
+ def densenet161(pretrained=False, **kwargs):
+ """
+ DenseNet161
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ dropout: float=0. Probability of setting units to zero.
+ bn_size: int=4. The number of channals per group
+ Returns:
+ model: nn.Layer. Specific `DenseNet161` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.DenseNet161(**kwargs)
+
+ return model
+
+ def densenet169(pretrained=False, **kwargs):
+ """
+ DenseNet169
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ dropout: float=0. Probability of setting units to zero.
+ bn_size: int=4. The number of channals per group
+ Returns:
+ model: nn.Layer. Specific `DenseNet169` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.DenseNet169(**kwargs)
+
+ return model
+
+ def densenet201(pretrained=False, **kwargs):
+ """
+ DenseNet201
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ dropout: float=0. Probability of setting units to zero.
+ bn_size: int=4. The number of channals per group
+ Returns:
+ model: nn.Layer. Specific `DenseNet201` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.DenseNet201(**kwargs)
+
+ return model
+
+ def densenet264(pretrained=False, **kwargs):
+ """
+ DenseNet264
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ dropout: float=0. Probability of setting units to zero.
+ bn_size: int=4. The number of channals per group
+ Returns:
+ model: nn.Layer. Specific `DenseNet264` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.DenseNet264(**kwargs)
+
+ return model
+
+ def inceptionv3(pretrained=False, **kwargs):
+ """
+ InceptionV3
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `InceptionV3` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.InceptionV3(**kwargs)
+
+ return model
+
+ def inceptionv4(pretrained=False, **kwargs):
+ """
+ InceptionV4
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `InceptionV4` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.InceptionV4(**kwargs)
+
+ return model
+
+ def googlenet(pretrained=False, **kwargs):
+ """
+ GoogLeNet
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `GoogLeNet` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.GoogLeNet(**kwargs)
+
+ return model
+
+ def shufflenetv2_x0_25(pretrained=False, **kwargs):
+ """
+ ShuffleNetV2_x0_25
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ShuffleNetV2_x0_25` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ShuffleNetV2_x0_25(**kwargs)
+
+ return model
+
+ def mobilenetv1(pretrained=False, **kwargs):
+ """
+ MobileNetV1
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV1` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV1(**kwargs)
+
+ return model
+
+ def mobilenetv1_x0_25(pretrained=False, **kwargs):
+ """
+ MobileNetV1_x0_25
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV1_x0_25` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV1_x0_25(**kwargs)
+
+ return model
+
+ def mobilenetv1_x0_5(pretrained=False, **kwargs):
+ """
+ MobileNetV1_x0_5
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV1_x0_5` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV1_x0_5(**kwargs)
+
+ return model
+
+ def mobilenetv1_x0_75(pretrained=False, **kwargs):
+ """
+ MobileNetV1_x0_75
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV1_x0_75` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV1_x0_75(**kwargs)
+
+ return model
+
+ def mobilenetv2_x0_25(pretrained=False, **kwargs):
+ """
+ MobileNetV2_x0_25
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV2_x0_25` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV2_x0_25(**kwargs)
+
+ return model
+
+ def mobilenetv2_x0_5(pretrained=False, **kwargs):
+ """
+ MobileNetV2_x0_5
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV2_x0_5` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV2_x0_5(**kwargs)
+
+ return model
+
+ def mobilenetv2_x0_75(pretrained=False, **kwargs):
+ """
+ MobileNetV2_x0_75
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV2_x0_75` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV2_x0_75(**kwargs)
+
+ return model
+
+ def mobilenetv2_x1_5(pretrained=False, **kwargs):
+ """
+ MobileNetV2_x1_5
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV2_x1_5` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV2_x1_5(**kwargs)
+
+ return model
+
+ def mobilenetv2_x2_0(pretrained=False, **kwargs):
+ """
+ MobileNetV2_x2_0
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV2_x2_0` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV2_x2_0(**kwargs)
+
+ return model
+
+ def mobilenetv3_large_x0_35(pretrained=False, **kwargs):
+ """
+ MobileNetV3_large_x0_35
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_large_x0_35` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_large_x0_35(**kwargs)
+
+ return model
+
+ def mobilenetv3_large_x0_5(pretrained=False, **kwargs):
+ """
+ MobileNetV3_large_x0_5
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_large_x0_5` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_large_x0_5(**kwargs)
+
+ return model
+
+ def mobilenetv3_large_x0_75(pretrained=False, **kwargs):
+ """
+ MobileNetV3_large_x0_75
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_large_x0_75` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_large_x0_75(**kwargs)
+
+ return model
+
+ def mobilenetv3_large_x1_0(pretrained=False, **kwargs):
+ """
+ MobileNetV3_large_x1_0
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_large_x1_0` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_large_x1_0(**kwargs)
+
+ return model
+
+ def mobilenetv3_large_x1_25(pretrained=False, **kwargs):
+ """
+ MobileNetV3_large_x1_25
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_large_x1_25` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_large_x1_25(**kwargs)
+
+ return model
+
+ def mobilenetv3_small_x0_35(pretrained=False, **kwargs):
+ """
+ MobileNetV3_small_x0_35
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_small_x0_35` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_small_x0_35(**kwargs)
+
+ return model
+
+ def mobilenetv3_small_x0_5(pretrained=False, **kwargs):
+ """
+ MobileNetV3_small_x0_5
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_small_x0_5` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_small_x0_5(**kwargs)
+
+ return model
+
+ def mobilenetv3_small_x0_75(pretrained=False, **kwargs):
+ """
+ MobileNetV3_small_x0_75
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_small_x0_75` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_small_x0_75(**kwargs)
+
+ return model
+
+ def mobilenetv3_small_x1_0(pretrained=False, **kwargs):
+ """
+ MobileNetV3_small_x1_0
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_small_x1_0` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_small_x1_0(**kwargs)
+
+ return model
+
+ def mobilenetv3_small_x1_25(pretrained=False, **kwargs):
+ """
+ MobileNetV3_small_x1_25
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_small_x1_25` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_small_x1_25(**kwargs)
+
+ return model
+
+ def resnext101_32x4d(pretrained=False, **kwargs):
+ """
+ ResNeXt101_32x4d
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ResNeXt101_32x4d` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNeXt101_32x4d(**kwargs)
+
+ return model
+
+ def resnext101_64x4d(pretrained=False, **kwargs):
+ """
+ ResNeXt101_64x4d
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ResNeXt101_64x4d` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNeXt101_64x4d(**kwargs)
+
+ return model
+
+ def resnext152_32x4d(pretrained=False, **kwargs):
+ """
+ ResNeXt152_32x4d
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ResNeXt152_32x4d` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNeXt152_32x4d(**kwargs)
+
+ return model
+
+ def resnext152_64x4d(pretrained=False, **kwargs):
+ """
+ ResNeXt152_64x4d
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ResNeXt152_64x4d` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNeXt152_64x4d(**kwargs)
+
+ return model
+
+ def resnext50_32x4d(pretrained=False, **kwargs):
+ """
+ ResNeXt50_32x4d
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ResNeXt50_32x4d` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNeXt50_32x4d(**kwargs)
+
+ return model
+
+ def resnext50_64x4d(pretrained=False, **kwargs):
+ """
+ ResNeXt50_64x4d
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ResNeXt50_64x4d` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNeXt50_64x4d(**kwargs)
+
+ return model
+
+ def darknet53(pretrained=False, **kwargs):
+ """
+ DarkNet53
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ResNeXt50_64x4d` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.DarkNet53(**kwargs)
+
+ return model
diff --git a/PaddleClas/paddleclas.py b/PaddleClas/paddleclas.py
new file mode 100644
index 0000000..bfad193
--- /dev/null
+++ b/PaddleClas/paddleclas.py
@@ -0,0 +1,572 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+__dir__ = os.path.dirname(__file__)
+sys.path.append(os.path.join(__dir__, ""))
+sys.path.append(os.path.join(__dir__, "deploy"))
+
+from typing import Union, Generator
+import argparse
+import shutil
+import textwrap
+import tarfile
+import requests
+import warnings
+from functools import partial
+from difflib import SequenceMatcher
+
+import cv2
+import numpy as np
+from tqdm import tqdm
+from prettytable import PrettyTable
+
+from deploy.python.predict_cls import ClsPredictor
+from deploy.utils.get_image_list import get_image_list
+from deploy.utils import config
+
+from ppcls.arch.backbone import *
+from ppcls.utils.logger import init_logger
+
+# for building model with loading pretrained weights from backbone
+init_logger()
+
+__all__ = ["PaddleClas"]
+
+BASE_DIR = os.path.expanduser("~/.paddleclas/")
+BASE_INFERENCE_MODEL_DIR = os.path.join(BASE_DIR, "inference_model")
+BASE_IMAGES_DIR = os.path.join(BASE_DIR, "images")
+BASE_DOWNLOAD_URL = "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/{}_infer.tar"
+MODEL_SERIES = {
+ "AlexNet": ["AlexNet"],
+ "DarkNet": ["DarkNet53"],
+ "DeiT": [
+ "DeiT_base_distilled_patch16_224", "DeiT_base_distilled_patch16_384",
+ "DeiT_base_patch16_224", "DeiT_base_patch16_384",
+ "DeiT_small_distilled_patch16_224", "DeiT_small_patch16_224",
+ "DeiT_tiny_distilled_patch16_224", "DeiT_tiny_patch16_224"
+ ],
+ "DenseNet": [
+ "DenseNet121", "DenseNet161", "DenseNet169", "DenseNet201",
+ "DenseNet264"
+ ],
+ "DLA": [
+ "DLA46_c", "DLA60x_c", "DLA34", "DLA60", "DLA60x", "DLA102", "DLA102x",
+ "DLA102x2", "DLA169"
+ ],
+ "DPN": ["DPN68", "DPN92", "DPN98", "DPN107", "DPN131"],
+ "EfficientNet": [
+ "EfficientNetB0", "EfficientNetB0_small", "EfficientNetB1",
+ "EfficientNetB2", "EfficientNetB3", "EfficientNetB4", "EfficientNetB5",
+ "EfficientNetB6", "EfficientNetB7"
+ ],
+ "ESNet": ["ESNet_x0_25", "ESNet_x0_5", "ESNet_x0_75", "ESNet_x1_0"],
+ "GhostNet":
+ ["GhostNet_x0_5", "GhostNet_x1_0", "GhostNet_x1_3", "GhostNet_x1_3_ssld"],
+ "HarDNet": ["HarDNet39_ds", "HarDNet68_ds", "HarDNet68", "HarDNet85"],
+ "HRNet": [
+ "HRNet_W18_C", "HRNet_W30_C", "HRNet_W32_C", "HRNet_W40_C",
+ "HRNet_W44_C", "HRNet_W48_C", "HRNet_W64_C", "HRNet_W18_C_ssld",
+ "HRNet_W48_C_ssld"
+ ],
+ "Inception": ["GoogLeNet", "InceptionV3", "InceptionV4"],
+ "MixNet": ["MixNet_S", "MixNet_M", "MixNet_L"],
+ "MobileNetV1": [
+ "MobileNetV1_x0_25", "MobileNetV1_x0_5", "MobileNetV1_x0_75",
+ "MobileNetV1", "MobileNetV1_ssld"
+ ],
+ "MobileNetV2": [
+ "MobileNetV2_x0_25", "MobileNetV2_x0_5", "MobileNetV2_x0_75",
+ "MobileNetV2", "MobileNetV2_x1_5", "MobileNetV2_x2_0",
+ "MobileNetV2_ssld"
+ ],
+ "MobileNetV3": [
+ "MobileNetV3_small_x0_35", "MobileNetV3_small_x0_5",
+ "MobileNetV3_small_x0_75", "MobileNetV3_small_x1_0",
+ "MobileNetV3_small_x1_25", "MobileNetV3_large_x0_35",
+ "MobileNetV3_large_x0_5", "MobileNetV3_large_x0_75",
+ "MobileNetV3_large_x1_0", "MobileNetV3_large_x1_25",
+ "MobileNetV3_small_x1_0_ssld", "MobileNetV3_large_x1_0_ssld"
+ ],
+ "PPLCNet": [
+ "PPLCNet_x0_25", "PPLCNet_x0_35", "PPLCNet_x0_5", "PPLCNet_x0_75",
+ "PPLCNet_x1_0", "PPLCNet_x1_5", "PPLCNet_x2_0", "PPLCNet_x2_5"
+ ],
+ "RedNet": ["RedNet26", "RedNet38", "RedNet50", "RedNet101", "RedNet152"],
+ "RegNet": ["RegNetX_4GF"],
+ "Res2Net": [
+ "Res2Net50_14w_8s", "Res2Net50_26w_4s", "Res2Net50_vd_26w_4s",
+ "Res2Net200_vd_26w_4s", "Res2Net101_vd_26w_4s",
+ "Res2Net50_vd_26w_4s_ssld", "Res2Net101_vd_26w_4s_ssld",
+ "Res2Net200_vd_26w_4s_ssld"
+ ],
+ "ResNeSt": ["ResNeSt50", "ResNeSt50_fast_1s1x64d"],
+ "ResNet": [
+ "ResNet18", "ResNet18_vd", "ResNet34", "ResNet34_vd", "ResNet50",
+ "ResNet50_vc", "ResNet50_vd", "ResNet50_vd_v2", "ResNet101",
+ "ResNet101_vd", "ResNet152", "ResNet152_vd", "ResNet200_vd",
+ "ResNet34_vd_ssld", "ResNet50_vd_ssld", "ResNet50_vd_ssld_v2",
+ "ResNet101_vd_ssld", "Fix_ResNet50_vd_ssld_v2", "ResNet50_ACNet_deploy"
+ ],
+ "ResNeXt": [
+ "ResNeXt50_32x4d", "ResNeXt50_vd_32x4d", "ResNeXt50_64x4d",
+ "ResNeXt50_vd_64x4d", "ResNeXt101_32x4d", "ResNeXt101_vd_32x4d",
+ "ResNeXt101_32x8d_wsl", "ResNeXt101_32x16d_wsl",
+ "ResNeXt101_32x32d_wsl", "ResNeXt101_32x48d_wsl",
+ "Fix_ResNeXt101_32x48d_wsl", "ResNeXt101_64x4d", "ResNeXt101_vd_64x4d",
+ "ResNeXt152_32x4d", "ResNeXt152_vd_32x4d", "ResNeXt152_64x4d",
+ "ResNeXt152_vd_64x4d"
+ ],
+ "ReXNet":
+ ["ReXNet_1_0", "ReXNet_1_3", "ReXNet_1_5", "ReXNet_2_0", "ReXNet_3_0"],
+ "SENet": [
+ "SENet154_vd", "SE_HRNet_W64_C_ssld", "SE_ResNet18_vd",
+ "SE_ResNet34_vd", "SE_ResNet50_vd", "SE_ResNeXt50_32x4d",
+ "SE_ResNeXt50_vd_32x4d", "SE_ResNeXt101_32x4d"
+ ],
+ "ShuffleNetV2": [
+ "ShuffleNetV2_swish", "ShuffleNetV2_x0_25", "ShuffleNetV2_x0_33",
+ "ShuffleNetV2_x0_5", "ShuffleNetV2_x1_0", "ShuffleNetV2_x1_5",
+ "ShuffleNetV2_x2_0"
+ ],
+ "SqueezeNet": ["SqueezeNet1_0", "SqueezeNet1_1"],
+ "SwinTransformer": [
+ "SwinTransformer_large_patch4_window7_224_22kto1k",
+ "SwinTransformer_large_patch4_window12_384_22kto1k",
+ "SwinTransformer_base_patch4_window7_224_22kto1k",
+ "SwinTransformer_base_patch4_window12_384_22kto1k",
+ "SwinTransformer_base_patch4_window12_384",
+ "SwinTransformer_base_patch4_window7_224",
+ "SwinTransformer_small_patch4_window7_224",
+ "SwinTransformer_tiny_patch4_window7_224"
+ ],
+ "Twins": [
+ "pcpvt_small", "pcpvt_base", "pcpvt_large", "alt_gvt_small",
+ "alt_gvt_base", "alt_gvt_large"
+ ],
+ "VGG": ["VGG11", "VGG13", "VGG16", "VGG19"],
+ "VisionTransformer": [
+ "ViT_base_patch16_224", "ViT_base_patch16_384", "ViT_base_patch32_384",
+ "ViT_large_patch16_224", "ViT_large_patch16_384",
+ "ViT_large_patch32_384", "ViT_small_patch16_224"
+ ],
+ "Xception": [
+ "Xception41", "Xception41_deeplab", "Xception65", "Xception65_deeplab",
+ "Xception71"
+ ]
+}
+
+
+class ImageTypeError(Exception):
+ """ImageTypeError.
+ """
+
+ def __init__(self, message=""):
+ super().__init__(message)
+
+
+class InputModelError(Exception):
+ """InputModelError.
+ """
+
+ def __init__(self, message=""):
+ super().__init__(message)
+
+
+def init_config(model_name,
+ inference_model_dir,
+ use_gpu=True,
+ batch_size=1,
+ topk=5,
+ **kwargs):
+ imagenet1k_map_path = os.path.join(
+ os.path.abspath(__dir__), "ppcls/utils/imagenet1k_label_list.txt")
+ cfg = {
+ "Global": {
+ "infer_imgs": kwargs["infer_imgs"]
+ if "infer_imgs" in kwargs else False,
+ "model_name": model_name,
+ "inference_model_dir": inference_model_dir,
+ "batch_size": batch_size,
+ "use_gpu": use_gpu,
+ "enable_mkldnn": kwargs["enable_mkldnn"]
+ if "enable_mkldnn" in kwargs else False,
+ "cpu_num_threads": kwargs["cpu_num_threads"]
+ if "cpu_num_threads" in kwargs else 1,
+ "enable_benchmark": False,
+ "use_fp16": kwargs["use_fp16"] if "use_fp16" in kwargs else False,
+ "ir_optim": True,
+ "use_tensorrt": kwargs["use_tensorrt"]
+ if "use_tensorrt" in kwargs else False,
+ "gpu_mem": kwargs["gpu_mem"] if "gpu_mem" in kwargs else 8000,
+ "enable_profile": False
+ },
+ "PreProcess": {
+ "transform_ops": [{
+ "ResizeImage": {
+ "resize_short": kwargs["resize_short"]
+ if "resize_short" in kwargs else 256
+ }
+ }, {
+ "CropImage": {
+ "size": kwargs["crop_size"]
+ if "crop_size" in kwargs else 224
+ }
+ }, {
+ "NormalizeImage": {
+ "scale": 0.00392157,
+ "mean": [0.485, 0.456, 0.406],
+ "std": [0.229, 0.224, 0.225],
+ "order": ''
+ }
+ }, {
+ "ToCHWImage": None
+ }]
+ },
+ "PostProcess": {
+ "main_indicator": "Topk",
+ "Topk": {
+ "topk": topk,
+ "class_id_map_file": imagenet1k_map_path
+ }
+ }
+ }
+ if "save_dir" in kwargs:
+ if kwargs["save_dir"] is not None:
+ cfg["PostProcess"]["SavePreLabel"] = {
+ "save_dir": kwargs["save_dir"]
+ }
+ if "class_id_map_file" in kwargs:
+ if kwargs["class_id_map_file"] is not None:
+ cfg["PostProcess"]["Topk"]["class_id_map_file"] = kwargs[
+ "class_id_map_file"]
+
+ cfg = config.AttrDict(cfg)
+ config.create_attr_dict(cfg)
+ return cfg
+
+
+def args_cfg():
+ def str2bool(v):
+ return v.lower() in ("true", "t", "1")
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--infer_imgs",
+ type=str,
+ required=True,
+ help="The image(s) to be predicted.")
+ parser.add_argument(
+ "--model_name", type=str, help="The model name to be used.")
+ parser.add_argument(
+ "--inference_model_dir",
+ type=str,
+ help="The directory of model files. Valid when model_name not specifed."
+ )
+ parser.add_argument(
+ "--use_gpu", type=str, default=True, help="Whether use GPU.")
+ parser.add_argument("--gpu_mem", type=int, default=8000, help="")
+ parser.add_argument(
+ "--enable_mkldnn",
+ type=str2bool,
+ default=False,
+ help="Whether use MKLDNN. Valid when use_gpu is False")
+ parser.add_argument("--cpu_num_threads", type=int, default=1, help="")
+ parser.add_argument(
+ "--use_tensorrt", type=str2bool, default=False, help="")
+ parser.add_argument("--use_fp16", type=str2bool, default=False, help="")
+ parser.add_argument(
+ "--batch_size", type=int, default=1, help="Batch size. Default by 1.")
+ parser.add_argument(
+ "--topk",
+ type=int,
+ default=5,
+ help="Return topk score(s) and corresponding results. Default by 5.")
+ parser.add_argument(
+ "--class_id_map_file",
+ type=str,
+ help="The path of file that map class_id and label.")
+ parser.add_argument(
+ "--save_dir",
+ type=str,
+ help="The directory to save prediction results as pre-label.")
+ parser.add_argument(
+ "--resize_short",
+ type=int,
+ default=256,
+ help="Resize according to short size.")
+ parser.add_argument(
+ "--crop_size", type=int, default=224, help="Centor crop size.")
+
+ args = parser.parse_args()
+ return vars(args)
+
+
+def print_info():
+ """Print list of supported models in formatted.
+ """
+ table = PrettyTable(["Series", "Name"])
+ try:
+ sz = os.get_terminal_size()
+ width = sz.columns - 30 if sz.columns > 50 else 10
+ except OSError:
+ width = 100
+ for series in MODEL_SERIES:
+ names = textwrap.fill(" ".join(MODEL_SERIES[series]), width=width)
+ table.add_row([series, names])
+ width = len(str(table).split("\n")[0])
+ print("{}".format("-" * width))
+ print("Models supported by PaddleClas".center(width))
+ print(table)
+ print("Powered by PaddlePaddle!".rjust(width))
+ print("{}".format("-" * width))
+
+
+def get_model_names():
+ """Get the model names list.
+ """
+ model_names = []
+ for series in MODEL_SERIES:
+ model_names += (MODEL_SERIES[series])
+ return model_names
+
+
+def similar_architectures(name="", names=[], thresh=0.1, topk=10):
+ """Find the most similar topk model names.
+ """
+ scores = []
+ for idx, n in enumerate(names):
+ if n.startswith("__"):
+ continue
+ score = SequenceMatcher(None, n.lower(), name.lower()).quick_ratio()
+ if score > thresh:
+ scores.append((idx, score))
+ scores.sort(key=lambda x: x[1], reverse=True)
+ similar_names = [names[s[0]] for s in scores[:min(topk, len(scores))]]
+ return similar_names
+
+
+def download_with_progressbar(url, save_path):
+ """Download from url with progressbar.
+ """
+ if os.path.isfile(save_path):
+ os.remove(save_path)
+ response = requests.get(url, stream=True)
+ total_size_in_bytes = int(response.headers.get("content-length", 0))
+ block_size = 1024 # 1 Kibibyte
+ progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
+ with open(save_path, "wb") as file:
+ for data in response.iter_content(block_size):
+ progress_bar.update(len(data))
+ file.write(data)
+ progress_bar.close()
+ if total_size_in_bytes == 0 or progress_bar.n != total_size_in_bytes or not os.path.isfile(
+ save_path):
+ raise Exception(
+ f"Something went wrong while downloading file from {url}")
+
+
+def check_model_file(model_name):
+ """Check the model files exist and download and untar when no exist.
+ """
+ storage_directory = partial(os.path.join, BASE_INFERENCE_MODEL_DIR,
+ model_name)
+ url = BASE_DOWNLOAD_URL.format(model_name)
+
+ tar_file_name_list = [
+ "inference.pdiparams", "inference.pdiparams.info", "inference.pdmodel"
+ ]
+ model_file_path = storage_directory("inference.pdmodel")
+ params_file_path = storage_directory("inference.pdiparams")
+ if not os.path.exists(model_file_path) or not os.path.exists(
+ params_file_path):
+ tmp_path = storage_directory(url.split("/")[-1])
+ print(f"download {url} to {tmp_path}")
+ os.makedirs(storage_directory(), exist_ok=True)
+ download_with_progressbar(url, tmp_path)
+ with tarfile.open(tmp_path, "r") as tarObj:
+ for member in tarObj.getmembers():
+ filename = None
+ for tar_file_name in tar_file_name_list:
+ if tar_file_name in member.name:
+ filename = tar_file_name
+ if filename is None:
+ continue
+ file = tarObj.extractfile(member)
+ with open(storage_directory(filename), "wb") as f:
+ f.write(file.read())
+ os.remove(tmp_path)
+ if not os.path.exists(model_file_path) or not os.path.exists(
+ params_file_path):
+ raise Exception(
+ f"Something went wrong while praparing the model[{model_name}] files!"
+ )
+
+ return storage_directory()
+
+
+class PaddleClas(object):
+ """PaddleClas.
+ """
+
+ print_info()
+
+ def __init__(self,
+ model_name: str=None,
+ inference_model_dir: str=None,
+ use_gpu: bool=True,
+ batch_size: int=1,
+ topk: int=5,
+ **kwargs):
+ """Init PaddleClas with config.
+
+ Args:
+ model_name (str, optional): The model name supported by PaddleClas. If specified, override config. Defaults to None.
+ inference_model_dir (str, optional): The directory that contained model file and params file to be used. If specified, override config. Defaults to None.
+ use_gpu (bool, optional): Whether use GPU. If specified, override config. Defaults to True.
+ batch_size (int, optional): The batch size to pridict. If specified, override config. Defaults to 1.
+ topk (int, optional): Return the top k prediction results with the highest score. Defaults to 5.
+ """
+ super().__init__()
+ self._config = init_config(model_name, inference_model_dir, use_gpu,
+ batch_size, topk, **kwargs)
+ self._check_input_model()
+ self.cls_predictor = ClsPredictor(self._config)
+
+ def get_config(self):
+ """Get the config.
+ """
+ return self._config
+
+ def _check_input_model(self):
+ """Check input model name or model files.
+ """
+ candidate_model_names = get_model_names()
+ input_model_name = self._config.Global.get("model_name", None)
+ inference_model_dir = self._config.Global.get("inference_model_dir",
+ None)
+ if input_model_name is not None:
+ similar_names = similar_architectures(input_model_name,
+ candidate_model_names)
+ similar_names_str = ", ".join(similar_names)
+ if input_model_name not in candidate_model_names:
+ err = f"{input_model_name} is not provided by PaddleClas. \nMaybe you want: [{similar_names_str}]. \nIf you want to use your own model, please specify inference_model_dir!"
+ raise InputModelError(err)
+ self._config.Global.inference_model_dir = check_model_file(
+ input_model_name)
+ return
+ elif inference_model_dir is not None:
+ model_file_path = os.path.join(inference_model_dir,
+ "inference.pdmodel")
+ params_file_path = os.path.join(inference_model_dir,
+ "inference.pdiparams")
+ if not os.path.isfile(model_file_path) or not os.path.isfile(
+ params_file_path):
+ err = f"There is no model file or params file in this directory: {inference_model_dir}"
+ raise InputModelError(err)
+ return
+ else:
+ err = f"Please specify the model name supported by PaddleClas or directory contained model files(inference.pdmodel, inference.pdiparams)."
+ raise InputModelError(err)
+ return
+
+ def predict(self, input_data: Union[str, np.array],
+ print_pred: bool=False) -> Generator[list, None, None]:
+ """Predict input_data.
+
+ Args:
+ input_data (Union[str, np.array]):
+ When the type is str, it is the path of image, or the directory containing images, or the URL of image from Internet.
+ When the type is np.array, it is the image data whose channel order is RGB.
+ print_pred (bool, optional): Whether print the prediction result. Defaults to False.
+
+ Raises:
+ ImageTypeError: Illegal input_data.
+
+ Yields:
+ Generator[list, None, None]:
+ The prediction result(s) of input_data by batch_size. For every one image,
+ prediction result(s) is zipped as a dict, that includs topk "class_ids", "scores" and "label_names".
+ The format of batch prediction result(s) is as follow: [{"class_ids": [...], "scores": [...], "label_names": [...]}, ...]
+ """
+
+ if isinstance(input_data, np.ndarray):
+ yield self.cls_predictor.predict(input_data)
+ elif isinstance(input_data, str):
+ if input_data.startswith("http") or input_data.startswith("https"):
+ image_storage_dir = partial(os.path.join, BASE_IMAGES_DIR)
+ if not os.path.exists(image_storage_dir()):
+ os.makedirs(image_storage_dir())
+ image_save_path = image_storage_dir("tmp.jpg")
+ download_with_progressbar(input_data, image_save_path)
+ input_data = image_save_path
+ warnings.warn(
+ f"Image to be predicted from Internet: {input_data}, has been saved to: {image_save_path}"
+ )
+ image_list = get_image_list(input_data)
+
+ batch_size = self._config.Global.get("batch_size", 1)
+ topk = self._config.PostProcess.Topk.get('topk', 1)
+
+ img_list = []
+ img_path_list = []
+ cnt = 0
+ for idx, img_path in enumerate(image_list):
+ img = cv2.imread(img_path)
+ if img is None:
+ warnings.warn(
+ f"Image file failed to read and has been skipped. The path: {img_path}"
+ )
+ continue
+ img = img[:, :, ::-1]
+ img_list.append(img)
+ img_path_list.append(img_path)
+ cnt += 1
+
+ if cnt % batch_size == 0 or (idx + 1) == len(image_list):
+ preds = self.cls_predictor.predict(img_list)
+
+ if print_pred and preds:
+ for idx, pred in enumerate(preds):
+ pred_str = ", ".join(
+ [f"{k}: {pred[k]}" for k in pred])
+ print(
+ f"filename: {img_path_list[idx]}, top-{topk}, {pred_str}"
+ )
+
+ img_list = []
+ img_path_list = []
+ yield preds
+ else:
+ err = "Please input legal image! The type of image supported by PaddleClas are: NumPy.ndarray and string of local path or Ineternet URL"
+ raise ImageTypeError(err)
+ return
+
+
+# for CLI
+def main():
+ """Function API used for commad line.
+ """
+ cfg = args_cfg()
+ clas_engine = PaddleClas(**cfg)
+ res = clas_engine.predict(cfg["infer_imgs"], print_pred=True)
+ for _ in res:
+ pass
+ print("Predict complete!")
+ return
+
+
+if __name__ == "__main__":
+ main()
diff --git a/PaddleClas/requirements.txt b/PaddleClas/requirements.txt
new file mode 100644
index 0000000..79f548c
--- /dev/null
+++ b/PaddleClas/requirements.txt
@@ -0,0 +1,11 @@
+prettytable
+ujson
+opencv-python==4.4.0.46
+pillow
+tqdm
+PyYAML
+visualdl >= 2.2.0
+scipy
+scikit-learn==0.23.2
+gast==0.3.3
+faiss-cpu==1.7.1.post2
diff --git a/PaddleClas/setup.py b/PaddleClas/setup.py
new file mode 100644
index 0000000..57045d3
--- /dev/null
+++ b/PaddleClas/setup.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from io import open
+from setuptools import setup
+
+with open('requirements.txt', encoding="utf-8-sig") as f:
+ requirements = f.readlines()
+
+
+def readme():
+ with open(
+ 'docs/en/inference_deployment/whl_deploy_en.md',
+ encoding="utf-8-sig") as f:
+ README = f.read()
+ return README
+
+
+setup(
+ name='paddleclas',
+ packages=['paddleclas'],
+ package_dir={'paddleclas': ''},
+ include_package_data=True,
+ entry_points={
+ "console_scripts": ["paddleclas= paddleclas.paddleclas:main"]
+ },
+ version='0.0.0',
+ install_requires=requirements,
+ license='Apache License 2.0',
+ description='Awesome Image Classification toolkits based on PaddlePaddle ',
+ long_description=readme(),
+ long_description_content_type='text/markdown',
+ url='https://github.com/PaddlePaddle/PaddleClas',
+ download_url='https://github.com/PaddlePaddle/PaddleClas.git',
+ keywords=[
+ 'A treasure chest for image classification powered by PaddlePaddle.'
+ ],
+ classifiers=[
+ 'Intended Audience :: Developers',
+ 'Operating System :: OS Independent',
+ 'Natural Language :: Chinese (Simplified)',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.2',
+ 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7', 'Topic :: Utilities'
+ ], )
From cd02861c74a01b02c5d688458693a8f2078525ee Mon Sep 17 00:00:00 2001
From: wangziyang <2890199310@qq.com>
Date: Thu, 12 May 2022 19:56:58 +0800
Subject: [PATCH 3/4] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=96=87=E4=BB=B6?=
=?UTF-8?q?=E5=A4=B9=E7=9B=AE=E5=BD=95?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
PaddleClas/MANIFEST.in | 7 -
PaddleClas/__init__.py | 17 -
PaddleClas/hubconf.py | 788 ------------------------------------
PaddleClas/paddleclas.py | 572 --------------------------
PaddleClas/requirements.txt | 11 -
PaddleClas/setup.py | 60 ---
6 files changed, 1455 deletions(-)
delete mode 100644 PaddleClas/MANIFEST.in
delete mode 100644 PaddleClas/__init__.py
delete mode 100644 PaddleClas/hubconf.py
delete mode 100644 PaddleClas/paddleclas.py
delete mode 100644 PaddleClas/requirements.txt
delete mode 100644 PaddleClas/setup.py
diff --git a/PaddleClas/MANIFEST.in b/PaddleClas/MANIFEST.in
deleted file mode 100644
index b0a4f6d..0000000
--- a/PaddleClas/MANIFEST.in
+++ /dev/null
@@ -1,7 +0,0 @@
-include LICENSE.txt
-include README.md
-include docs/en/whl_en.md
-recursive-include deploy/python predict_cls.py preprocess.py postprocess.py det_preprocess.py
-recursive-include deploy/utils get_image_list.py config.py logger.py predictor.py
-
-recursive-include ppcls/ *.py *.txt
\ No newline at end of file
diff --git a/PaddleClas/__init__.py b/PaddleClas/__init__.py
deleted file mode 100644
index 2128a6c..0000000
--- a/PaddleClas/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-__all__ = ['PaddleClas']
-from .paddleclas import PaddleClas
-from ppcls.arch.backbone import *
diff --git a/PaddleClas/hubconf.py b/PaddleClas/hubconf.py
deleted file mode 100644
index b7f7674..0000000
--- a/PaddleClas/hubconf.py
+++ /dev/null
@@ -1,788 +0,0 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-dependencies = ['paddle']
-
-import paddle
-import os
-import sys
-
-
-class _SysPathG(object):
- """
- _SysPathG used to add/clean path for sys.path. Making sure minimal pkgs dependents by skiping parent dirs.
-
- __enter__
- add path into sys.path
- __exit__
- clean user's sys.path to avoid unexpect behaviors
- """
-
- def __init__(self, path):
- self.path = path
-
- def __enter__(self, ):
- sys.path.insert(0, self.path)
-
- def __exit__(self, type, value, traceback):
- _p = sys.path.pop(0)
- assert _p == self.path, 'Make sure sys.path cleaning {} correctly.'.format(
- self.path)
-
-
-with _SysPathG(os.path.dirname(os.path.abspath(__file__)), ):
- import ppcls
- import ppcls.arch.backbone as backbone
-
- def ppclas_init():
- if ppcls.utils.logger._logger is None:
- ppcls.utils.logger.init_logger()
-
- ppclas_init()
-
- def _load_pretrained_parameters(model, name):
- url = 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/{}_pretrained.pdparams'.format(
- name)
- path = paddle.utils.download.get_weights_path_from_url(url)
- model.set_state_dict(paddle.load(path))
- return model
-
- def alexnet(pretrained=False, **kwargs):
- """
- AlexNet
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `AlexNet` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.AlexNet(**kwargs)
-
- return model
-
- def vgg11(pretrained=False, **kwargs):
- """
- VGG11
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
- Returns:
- model: nn.Layer. Specific `VGG11` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.VGG11(**kwargs)
-
- return model
-
- def vgg13(pretrained=False, **kwargs):
- """
- VGG13
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
- Returns:
- model: nn.Layer. Specific `VGG13` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.VGG13(**kwargs)
-
- return model
-
- def vgg16(pretrained=False, **kwargs):
- """
- VGG16
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
- Returns:
- model: nn.Layer. Specific `VGG16` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.VGG16(**kwargs)
-
- return model
-
- def vgg19(pretrained=False, **kwargs):
- """
- VGG19
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
- Returns:
- model: nn.Layer. Specific `VGG19` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.VGG19(**kwargs)
-
- return model
-
- def resnet18(pretrained=False, **kwargs):
- """
- ResNet18
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- input_image_channel: int=3. The number of input image channels
- data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC')
- Returns:
- model: nn.Layer. Specific `ResNet18` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.ResNet18(**kwargs)
-
- return model
-
- def resnet34(pretrained=False, **kwargs):
- """
- ResNet34
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- input_image_channel: int=3. The number of input image channels
- data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC')
- Returns:
- model: nn.Layer. Specific `ResNet34` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.ResNet34(**kwargs)
-
- return model
-
- def resnet50(pretrained=False, **kwargs):
- """
- ResNet50
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- input_image_channel: int=3. The number of input image channels
- data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC')
- Returns:
- model: nn.Layer. Specific `ResNet50` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.ResNet50(**kwargs)
-
- return model
-
- def resnet101(pretrained=False, **kwargs):
- """
- ResNet101
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- input_image_channel: int=3. The number of input image channels
- data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC')
- Returns:
- model: nn.Layer. Specific `ResNet101` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.ResNet101(**kwargs)
-
- return model
-
- def resnet152(pretrained=False, **kwargs):
- """
- ResNet152
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- input_image_channel: int=3. The number of input image channels
- data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC')
- Returns:
- model: nn.Layer. Specific `ResNet152` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.ResNet152(**kwargs)
-
- return model
-
- def squeezenet1_0(pretrained=False, **kwargs):
- """
- SqueezeNet1_0
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `SqueezeNet1_0` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.SqueezeNet1_0(**kwargs)
-
- return model
-
- def squeezenet1_1(pretrained=False, **kwargs):
- """
- SqueezeNet1_1
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `SqueezeNet1_1` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.SqueezeNet1_1(**kwargs)
-
- return model
-
- def densenet121(pretrained=False, **kwargs):
- """
- DenseNet121
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- dropout: float=0. Probability of setting units to zero.
- bn_size: int=4. The number of channals per group
- Returns:
- model: nn.Layer. Specific `DenseNet121` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.DenseNet121(**kwargs)
-
- return model
-
- def densenet161(pretrained=False, **kwargs):
- """
- DenseNet161
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- dropout: float=0. Probability of setting units to zero.
- bn_size: int=4. The number of channals per group
- Returns:
- model: nn.Layer. Specific `DenseNet161` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.DenseNet161(**kwargs)
-
- return model
-
- def densenet169(pretrained=False, **kwargs):
- """
- DenseNet169
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- dropout: float=0. Probability of setting units to zero.
- bn_size: int=4. The number of channals per group
- Returns:
- model: nn.Layer. Specific `DenseNet169` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.DenseNet169(**kwargs)
-
- return model
-
- def densenet201(pretrained=False, **kwargs):
- """
- DenseNet201
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- dropout: float=0. Probability of setting units to zero.
- bn_size: int=4. The number of channals per group
- Returns:
- model: nn.Layer. Specific `DenseNet201` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.DenseNet201(**kwargs)
-
- return model
-
- def densenet264(pretrained=False, **kwargs):
- """
- DenseNet264
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- dropout: float=0. Probability of setting units to zero.
- bn_size: int=4. The number of channals per group
- Returns:
- model: nn.Layer. Specific `DenseNet264` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.DenseNet264(**kwargs)
-
- return model
-
- def inceptionv3(pretrained=False, **kwargs):
- """
- InceptionV3
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `InceptionV3` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.InceptionV3(**kwargs)
-
- return model
-
- def inceptionv4(pretrained=False, **kwargs):
- """
- InceptionV4
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `InceptionV4` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.InceptionV4(**kwargs)
-
- return model
-
- def googlenet(pretrained=False, **kwargs):
- """
- GoogLeNet
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `GoogLeNet` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.GoogLeNet(**kwargs)
-
- return model
-
- def shufflenetv2_x0_25(pretrained=False, **kwargs):
- """
- ShuffleNetV2_x0_25
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `ShuffleNetV2_x0_25` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.ShuffleNetV2_x0_25(**kwargs)
-
- return model
-
- def mobilenetv1(pretrained=False, **kwargs):
- """
- MobileNetV1
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV1` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV1(**kwargs)
-
- return model
-
- def mobilenetv1_x0_25(pretrained=False, **kwargs):
- """
- MobileNetV1_x0_25
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV1_x0_25` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV1_x0_25(**kwargs)
-
- return model
-
- def mobilenetv1_x0_5(pretrained=False, **kwargs):
- """
- MobileNetV1_x0_5
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV1_x0_5` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV1_x0_5(**kwargs)
-
- return model
-
- def mobilenetv1_x0_75(pretrained=False, **kwargs):
- """
- MobileNetV1_x0_75
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV1_x0_75` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV1_x0_75(**kwargs)
-
- return model
-
- def mobilenetv2_x0_25(pretrained=False, **kwargs):
- """
- MobileNetV2_x0_25
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV2_x0_25` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV2_x0_25(**kwargs)
-
- return model
-
- def mobilenetv2_x0_5(pretrained=False, **kwargs):
- """
- MobileNetV2_x0_5
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV2_x0_5` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV2_x0_5(**kwargs)
-
- return model
-
- def mobilenetv2_x0_75(pretrained=False, **kwargs):
- """
- MobileNetV2_x0_75
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV2_x0_75` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV2_x0_75(**kwargs)
-
- return model
-
- def mobilenetv2_x1_5(pretrained=False, **kwargs):
- """
- MobileNetV2_x1_5
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV2_x1_5` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV2_x1_5(**kwargs)
-
- return model
-
- def mobilenetv2_x2_0(pretrained=False, **kwargs):
- """
- MobileNetV2_x2_0
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV2_x2_0` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV2_x2_0(**kwargs)
-
- return model
-
- def mobilenetv3_large_x0_35(pretrained=False, **kwargs):
- """
- MobileNetV3_large_x0_35
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV3_large_x0_35` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV3_large_x0_35(**kwargs)
-
- return model
-
- def mobilenetv3_large_x0_5(pretrained=False, **kwargs):
- """
- MobileNetV3_large_x0_5
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV3_large_x0_5` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV3_large_x0_5(**kwargs)
-
- return model
-
- def mobilenetv3_large_x0_75(pretrained=False, **kwargs):
- """
- MobileNetV3_large_x0_75
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV3_large_x0_75` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV3_large_x0_75(**kwargs)
-
- return model
-
- def mobilenetv3_large_x1_0(pretrained=False, **kwargs):
- """
- MobileNetV3_large_x1_0
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV3_large_x1_0` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV3_large_x1_0(**kwargs)
-
- return model
-
- def mobilenetv3_large_x1_25(pretrained=False, **kwargs):
- """
- MobileNetV3_large_x1_25
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV3_large_x1_25` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV3_large_x1_25(**kwargs)
-
- return model
-
- def mobilenetv3_small_x0_35(pretrained=False, **kwargs):
- """
- MobileNetV3_small_x0_35
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV3_small_x0_35` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV3_small_x0_35(**kwargs)
-
- return model
-
- def mobilenetv3_small_x0_5(pretrained=False, **kwargs):
- """
- MobileNetV3_small_x0_5
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV3_small_x0_5` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV3_small_x0_5(**kwargs)
-
- return model
-
- def mobilenetv3_small_x0_75(pretrained=False, **kwargs):
- """
- MobileNetV3_small_x0_75
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV3_small_x0_75` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV3_small_x0_75(**kwargs)
-
- return model
-
- def mobilenetv3_small_x1_0(pretrained=False, **kwargs):
- """
- MobileNetV3_small_x1_0
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV3_small_x1_0` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV3_small_x1_0(**kwargs)
-
- return model
-
- def mobilenetv3_small_x1_25(pretrained=False, **kwargs):
- """
- MobileNetV3_small_x1_25
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `MobileNetV3_small_x1_25` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.MobileNetV3_small_x1_25(**kwargs)
-
- return model
-
- def resnext101_32x4d(pretrained=False, **kwargs):
- """
- ResNeXt101_32x4d
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `ResNeXt101_32x4d` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.ResNeXt101_32x4d(**kwargs)
-
- return model
-
- def resnext101_64x4d(pretrained=False, **kwargs):
- """
- ResNeXt101_64x4d
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `ResNeXt101_64x4d` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.ResNeXt101_64x4d(**kwargs)
-
- return model
-
- def resnext152_32x4d(pretrained=False, **kwargs):
- """
- ResNeXt152_32x4d
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `ResNeXt152_32x4d` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.ResNeXt152_32x4d(**kwargs)
-
- return model
-
- def resnext152_64x4d(pretrained=False, **kwargs):
- """
- ResNeXt152_64x4d
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `ResNeXt152_64x4d` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.ResNeXt152_64x4d(**kwargs)
-
- return model
-
- def resnext50_32x4d(pretrained=False, **kwargs):
- """
- ResNeXt50_32x4d
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `ResNeXt50_32x4d` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.ResNeXt50_32x4d(**kwargs)
-
- return model
-
- def resnext50_64x4d(pretrained=False, **kwargs):
- """
- ResNeXt50_64x4d
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `ResNeXt50_64x4d` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.ResNeXt50_64x4d(**kwargs)
-
- return model
-
- def darknet53(pretrained=False, **kwargs):
- """
- DarkNet53
- Args:
- pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
- kwargs:
- class_dim: int=1000. Output dim of last fc layer.
- Returns:
- model: nn.Layer. Specific `ResNeXt50_64x4d` model depends on args.
- """
- kwargs.update({'pretrained': pretrained})
- model = backbone.DarkNet53(**kwargs)
-
- return model
diff --git a/PaddleClas/paddleclas.py b/PaddleClas/paddleclas.py
deleted file mode 100644
index bfad193..0000000
--- a/PaddleClas/paddleclas.py
+++ /dev/null
@@ -1,572 +0,0 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import sys
-__dir__ = os.path.dirname(__file__)
-sys.path.append(os.path.join(__dir__, ""))
-sys.path.append(os.path.join(__dir__, "deploy"))
-
-from typing import Union, Generator
-import argparse
-import shutil
-import textwrap
-import tarfile
-import requests
-import warnings
-from functools import partial
-from difflib import SequenceMatcher
-
-import cv2
-import numpy as np
-from tqdm import tqdm
-from prettytable import PrettyTable
-
-from deploy.python.predict_cls import ClsPredictor
-from deploy.utils.get_image_list import get_image_list
-from deploy.utils import config
-
-from ppcls.arch.backbone import *
-from ppcls.utils.logger import init_logger
-
-# for building model with loading pretrained weights from backbone
-init_logger()
-
-__all__ = ["PaddleClas"]
-
-BASE_DIR = os.path.expanduser("~/.paddleclas/")
-BASE_INFERENCE_MODEL_DIR = os.path.join(BASE_DIR, "inference_model")
-BASE_IMAGES_DIR = os.path.join(BASE_DIR, "images")
-BASE_DOWNLOAD_URL = "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/{}_infer.tar"
-MODEL_SERIES = {
- "AlexNet": ["AlexNet"],
- "DarkNet": ["DarkNet53"],
- "DeiT": [
- "DeiT_base_distilled_patch16_224", "DeiT_base_distilled_patch16_384",
- "DeiT_base_patch16_224", "DeiT_base_patch16_384",
- "DeiT_small_distilled_patch16_224", "DeiT_small_patch16_224",
- "DeiT_tiny_distilled_patch16_224", "DeiT_tiny_patch16_224"
- ],
- "DenseNet": [
- "DenseNet121", "DenseNet161", "DenseNet169", "DenseNet201",
- "DenseNet264"
- ],
- "DLA": [
- "DLA46_c", "DLA60x_c", "DLA34", "DLA60", "DLA60x", "DLA102", "DLA102x",
- "DLA102x2", "DLA169"
- ],
- "DPN": ["DPN68", "DPN92", "DPN98", "DPN107", "DPN131"],
- "EfficientNet": [
- "EfficientNetB0", "EfficientNetB0_small", "EfficientNetB1",
- "EfficientNetB2", "EfficientNetB3", "EfficientNetB4", "EfficientNetB5",
- "EfficientNetB6", "EfficientNetB7"
- ],
- "ESNet": ["ESNet_x0_25", "ESNet_x0_5", "ESNet_x0_75", "ESNet_x1_0"],
- "GhostNet":
- ["GhostNet_x0_5", "GhostNet_x1_0", "GhostNet_x1_3", "GhostNet_x1_3_ssld"],
- "HarDNet": ["HarDNet39_ds", "HarDNet68_ds", "HarDNet68", "HarDNet85"],
- "HRNet": [
- "HRNet_W18_C", "HRNet_W30_C", "HRNet_W32_C", "HRNet_W40_C",
- "HRNet_W44_C", "HRNet_W48_C", "HRNet_W64_C", "HRNet_W18_C_ssld",
- "HRNet_W48_C_ssld"
- ],
- "Inception": ["GoogLeNet", "InceptionV3", "InceptionV4"],
- "MixNet": ["MixNet_S", "MixNet_M", "MixNet_L"],
- "MobileNetV1": [
- "MobileNetV1_x0_25", "MobileNetV1_x0_5", "MobileNetV1_x0_75",
- "MobileNetV1", "MobileNetV1_ssld"
- ],
- "MobileNetV2": [
- "MobileNetV2_x0_25", "MobileNetV2_x0_5", "MobileNetV2_x0_75",
- "MobileNetV2", "MobileNetV2_x1_5", "MobileNetV2_x2_0",
- "MobileNetV2_ssld"
- ],
- "MobileNetV3": [
- "MobileNetV3_small_x0_35", "MobileNetV3_small_x0_5",
- "MobileNetV3_small_x0_75", "MobileNetV3_small_x1_0",
- "MobileNetV3_small_x1_25", "MobileNetV3_large_x0_35",
- "MobileNetV3_large_x0_5", "MobileNetV3_large_x0_75",
- "MobileNetV3_large_x1_0", "MobileNetV3_large_x1_25",
- "MobileNetV3_small_x1_0_ssld", "MobileNetV3_large_x1_0_ssld"
- ],
- "PPLCNet": [
- "PPLCNet_x0_25", "PPLCNet_x0_35", "PPLCNet_x0_5", "PPLCNet_x0_75",
- "PPLCNet_x1_0", "PPLCNet_x1_5", "PPLCNet_x2_0", "PPLCNet_x2_5"
- ],
- "RedNet": ["RedNet26", "RedNet38", "RedNet50", "RedNet101", "RedNet152"],
- "RegNet": ["RegNetX_4GF"],
- "Res2Net": [
- "Res2Net50_14w_8s", "Res2Net50_26w_4s", "Res2Net50_vd_26w_4s",
- "Res2Net200_vd_26w_4s", "Res2Net101_vd_26w_4s",
- "Res2Net50_vd_26w_4s_ssld", "Res2Net101_vd_26w_4s_ssld",
- "Res2Net200_vd_26w_4s_ssld"
- ],
- "ResNeSt": ["ResNeSt50", "ResNeSt50_fast_1s1x64d"],
- "ResNet": [
- "ResNet18", "ResNet18_vd", "ResNet34", "ResNet34_vd", "ResNet50",
- "ResNet50_vc", "ResNet50_vd", "ResNet50_vd_v2", "ResNet101",
- "ResNet101_vd", "ResNet152", "ResNet152_vd", "ResNet200_vd",
- "ResNet34_vd_ssld", "ResNet50_vd_ssld", "ResNet50_vd_ssld_v2",
- "ResNet101_vd_ssld", "Fix_ResNet50_vd_ssld_v2", "ResNet50_ACNet_deploy"
- ],
- "ResNeXt": [
- "ResNeXt50_32x4d", "ResNeXt50_vd_32x4d", "ResNeXt50_64x4d",
- "ResNeXt50_vd_64x4d", "ResNeXt101_32x4d", "ResNeXt101_vd_32x4d",
- "ResNeXt101_32x8d_wsl", "ResNeXt101_32x16d_wsl",
- "ResNeXt101_32x32d_wsl", "ResNeXt101_32x48d_wsl",
- "Fix_ResNeXt101_32x48d_wsl", "ResNeXt101_64x4d", "ResNeXt101_vd_64x4d",
- "ResNeXt152_32x4d", "ResNeXt152_vd_32x4d", "ResNeXt152_64x4d",
- "ResNeXt152_vd_64x4d"
- ],
- "ReXNet":
- ["ReXNet_1_0", "ReXNet_1_3", "ReXNet_1_5", "ReXNet_2_0", "ReXNet_3_0"],
- "SENet": [
- "SENet154_vd", "SE_HRNet_W64_C_ssld", "SE_ResNet18_vd",
- "SE_ResNet34_vd", "SE_ResNet50_vd", "SE_ResNeXt50_32x4d",
- "SE_ResNeXt50_vd_32x4d", "SE_ResNeXt101_32x4d"
- ],
- "ShuffleNetV2": [
- "ShuffleNetV2_swish", "ShuffleNetV2_x0_25", "ShuffleNetV2_x0_33",
- "ShuffleNetV2_x0_5", "ShuffleNetV2_x1_0", "ShuffleNetV2_x1_5",
- "ShuffleNetV2_x2_0"
- ],
- "SqueezeNet": ["SqueezeNet1_0", "SqueezeNet1_1"],
- "SwinTransformer": [
- "SwinTransformer_large_patch4_window7_224_22kto1k",
- "SwinTransformer_large_patch4_window12_384_22kto1k",
- "SwinTransformer_base_patch4_window7_224_22kto1k",
- "SwinTransformer_base_patch4_window12_384_22kto1k",
- "SwinTransformer_base_patch4_window12_384",
- "SwinTransformer_base_patch4_window7_224",
- "SwinTransformer_small_patch4_window7_224",
- "SwinTransformer_tiny_patch4_window7_224"
- ],
- "Twins": [
- "pcpvt_small", "pcpvt_base", "pcpvt_large", "alt_gvt_small",
- "alt_gvt_base", "alt_gvt_large"
- ],
- "VGG": ["VGG11", "VGG13", "VGG16", "VGG19"],
- "VisionTransformer": [
- "ViT_base_patch16_224", "ViT_base_patch16_384", "ViT_base_patch32_384",
- "ViT_large_patch16_224", "ViT_large_patch16_384",
- "ViT_large_patch32_384", "ViT_small_patch16_224"
- ],
- "Xception": [
- "Xception41", "Xception41_deeplab", "Xception65", "Xception65_deeplab",
- "Xception71"
- ]
-}
-
-
-class ImageTypeError(Exception):
- """ImageTypeError.
- """
-
- def __init__(self, message=""):
- super().__init__(message)
-
-
-class InputModelError(Exception):
- """InputModelError.
- """
-
- def __init__(self, message=""):
- super().__init__(message)
-
-
-def init_config(model_name,
- inference_model_dir,
- use_gpu=True,
- batch_size=1,
- topk=5,
- **kwargs):
- imagenet1k_map_path = os.path.join(
- os.path.abspath(__dir__), "ppcls/utils/imagenet1k_label_list.txt")
- cfg = {
- "Global": {
- "infer_imgs": kwargs["infer_imgs"]
- if "infer_imgs" in kwargs else False,
- "model_name": model_name,
- "inference_model_dir": inference_model_dir,
- "batch_size": batch_size,
- "use_gpu": use_gpu,
- "enable_mkldnn": kwargs["enable_mkldnn"]
- if "enable_mkldnn" in kwargs else False,
- "cpu_num_threads": kwargs["cpu_num_threads"]
- if "cpu_num_threads" in kwargs else 1,
- "enable_benchmark": False,
- "use_fp16": kwargs["use_fp16"] if "use_fp16" in kwargs else False,
- "ir_optim": True,
- "use_tensorrt": kwargs["use_tensorrt"]
- if "use_tensorrt" in kwargs else False,
- "gpu_mem": kwargs["gpu_mem"] if "gpu_mem" in kwargs else 8000,
- "enable_profile": False
- },
- "PreProcess": {
- "transform_ops": [{
- "ResizeImage": {
- "resize_short": kwargs["resize_short"]
- if "resize_short" in kwargs else 256
- }
- }, {
- "CropImage": {
- "size": kwargs["crop_size"]
- if "crop_size" in kwargs else 224
- }
- }, {
- "NormalizeImage": {
- "scale": 0.00392157,
- "mean": [0.485, 0.456, 0.406],
- "std": [0.229, 0.224, 0.225],
- "order": ''
- }
- }, {
- "ToCHWImage": None
- }]
- },
- "PostProcess": {
- "main_indicator": "Topk",
- "Topk": {
- "topk": topk,
- "class_id_map_file": imagenet1k_map_path
- }
- }
- }
- if "save_dir" in kwargs:
- if kwargs["save_dir"] is not None:
- cfg["PostProcess"]["SavePreLabel"] = {
- "save_dir": kwargs["save_dir"]
- }
- if "class_id_map_file" in kwargs:
- if kwargs["class_id_map_file"] is not None:
- cfg["PostProcess"]["Topk"]["class_id_map_file"] = kwargs[
- "class_id_map_file"]
-
- cfg = config.AttrDict(cfg)
- config.create_attr_dict(cfg)
- return cfg
-
-
-def args_cfg():
- def str2bool(v):
- return v.lower() in ("true", "t", "1")
-
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "--infer_imgs",
- type=str,
- required=True,
- help="The image(s) to be predicted.")
- parser.add_argument(
- "--model_name", type=str, help="The model name to be used.")
- parser.add_argument(
- "--inference_model_dir",
- type=str,
- help="The directory of model files. Valid when model_name not specifed."
- )
- parser.add_argument(
- "--use_gpu", type=str, default=True, help="Whether use GPU.")
- parser.add_argument("--gpu_mem", type=int, default=8000, help="")
- parser.add_argument(
- "--enable_mkldnn",
- type=str2bool,
- default=False,
- help="Whether use MKLDNN. Valid when use_gpu is False")
- parser.add_argument("--cpu_num_threads", type=int, default=1, help="")
- parser.add_argument(
- "--use_tensorrt", type=str2bool, default=False, help="")
- parser.add_argument("--use_fp16", type=str2bool, default=False, help="")
- parser.add_argument(
- "--batch_size", type=int, default=1, help="Batch size. Default by 1.")
- parser.add_argument(
- "--topk",
- type=int,
- default=5,
- help="Return topk score(s) and corresponding results. Default by 5.")
- parser.add_argument(
- "--class_id_map_file",
- type=str,
- help="The path of file that map class_id and label.")
- parser.add_argument(
- "--save_dir",
- type=str,
- help="The directory to save prediction results as pre-label.")
- parser.add_argument(
- "--resize_short",
- type=int,
- default=256,
- help="Resize according to short size.")
- parser.add_argument(
- "--crop_size", type=int, default=224, help="Centor crop size.")
-
- args = parser.parse_args()
- return vars(args)
-
-
-def print_info():
- """Print list of supported models in formatted.
- """
- table = PrettyTable(["Series", "Name"])
- try:
- sz = os.get_terminal_size()
- width = sz.columns - 30 if sz.columns > 50 else 10
- except OSError:
- width = 100
- for series in MODEL_SERIES:
- names = textwrap.fill(" ".join(MODEL_SERIES[series]), width=width)
- table.add_row([series, names])
- width = len(str(table).split("\n")[0])
- print("{}".format("-" * width))
- print("Models supported by PaddleClas".center(width))
- print(table)
- print("Powered by PaddlePaddle!".rjust(width))
- print("{}".format("-" * width))
-
-
-def get_model_names():
- """Get the model names list.
- """
- model_names = []
- for series in MODEL_SERIES:
- model_names += (MODEL_SERIES[series])
- return model_names
-
-
-def similar_architectures(name="", names=[], thresh=0.1, topk=10):
- """Find the most similar topk model names.
- """
- scores = []
- for idx, n in enumerate(names):
- if n.startswith("__"):
- continue
- score = SequenceMatcher(None, n.lower(), name.lower()).quick_ratio()
- if score > thresh:
- scores.append((idx, score))
- scores.sort(key=lambda x: x[1], reverse=True)
- similar_names = [names[s[0]] for s in scores[:min(topk, len(scores))]]
- return similar_names
-
-
-def download_with_progressbar(url, save_path):
- """Download from url with progressbar.
- """
- if os.path.isfile(save_path):
- os.remove(save_path)
- response = requests.get(url, stream=True)
- total_size_in_bytes = int(response.headers.get("content-length", 0))
- block_size = 1024 # 1 Kibibyte
- progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
- with open(save_path, "wb") as file:
- for data in response.iter_content(block_size):
- progress_bar.update(len(data))
- file.write(data)
- progress_bar.close()
- if total_size_in_bytes == 0 or progress_bar.n != total_size_in_bytes or not os.path.isfile(
- save_path):
- raise Exception(
- f"Something went wrong while downloading file from {url}")
-
-
-def check_model_file(model_name):
- """Check the model files exist and download and untar when no exist.
- """
- storage_directory = partial(os.path.join, BASE_INFERENCE_MODEL_DIR,
- model_name)
- url = BASE_DOWNLOAD_URL.format(model_name)
-
- tar_file_name_list = [
- "inference.pdiparams", "inference.pdiparams.info", "inference.pdmodel"
- ]
- model_file_path = storage_directory("inference.pdmodel")
- params_file_path = storage_directory("inference.pdiparams")
- if not os.path.exists(model_file_path) or not os.path.exists(
- params_file_path):
- tmp_path = storage_directory(url.split("/")[-1])
- print(f"download {url} to {tmp_path}")
- os.makedirs(storage_directory(), exist_ok=True)
- download_with_progressbar(url, tmp_path)
- with tarfile.open(tmp_path, "r") as tarObj:
- for member in tarObj.getmembers():
- filename = None
- for tar_file_name in tar_file_name_list:
- if tar_file_name in member.name:
- filename = tar_file_name
- if filename is None:
- continue
- file = tarObj.extractfile(member)
- with open(storage_directory(filename), "wb") as f:
- f.write(file.read())
- os.remove(tmp_path)
- if not os.path.exists(model_file_path) or not os.path.exists(
- params_file_path):
- raise Exception(
- f"Something went wrong while praparing the model[{model_name}] files!"
- )
-
- return storage_directory()
-
-
-class PaddleClas(object):
- """PaddleClas.
- """
-
- print_info()
-
- def __init__(self,
- model_name: str=None,
- inference_model_dir: str=None,
- use_gpu: bool=True,
- batch_size: int=1,
- topk: int=5,
- **kwargs):
- """Init PaddleClas with config.
-
- Args:
- model_name (str, optional): The model name supported by PaddleClas. If specified, override config. Defaults to None.
- inference_model_dir (str, optional): The directory that contained model file and params file to be used. If specified, override config. Defaults to None.
- use_gpu (bool, optional): Whether use GPU. If specified, override config. Defaults to True.
- batch_size (int, optional): The batch size to pridict. If specified, override config. Defaults to 1.
- topk (int, optional): Return the top k prediction results with the highest score. Defaults to 5.
- """
- super().__init__()
- self._config = init_config(model_name, inference_model_dir, use_gpu,
- batch_size, topk, **kwargs)
- self._check_input_model()
- self.cls_predictor = ClsPredictor(self._config)
-
- def get_config(self):
- """Get the config.
- """
- return self._config
-
- def _check_input_model(self):
- """Check input model name or model files.
- """
- candidate_model_names = get_model_names()
- input_model_name = self._config.Global.get("model_name", None)
- inference_model_dir = self._config.Global.get("inference_model_dir",
- None)
- if input_model_name is not None:
- similar_names = similar_architectures(input_model_name,
- candidate_model_names)
- similar_names_str = ", ".join(similar_names)
- if input_model_name not in candidate_model_names:
- err = f"{input_model_name} is not provided by PaddleClas. \nMaybe you want: [{similar_names_str}]. \nIf you want to use your own model, please specify inference_model_dir!"
- raise InputModelError(err)
- self._config.Global.inference_model_dir = check_model_file(
- input_model_name)
- return
- elif inference_model_dir is not None:
- model_file_path = os.path.join(inference_model_dir,
- "inference.pdmodel")
- params_file_path = os.path.join(inference_model_dir,
- "inference.pdiparams")
- if not os.path.isfile(model_file_path) or not os.path.isfile(
- params_file_path):
- err = f"There is no model file or params file in this directory: {inference_model_dir}"
- raise InputModelError(err)
- return
- else:
- err = f"Please specify the model name supported by PaddleClas or directory contained model files(inference.pdmodel, inference.pdiparams)."
- raise InputModelError(err)
- return
-
- def predict(self, input_data: Union[str, np.array],
- print_pred: bool=False) -> Generator[list, None, None]:
- """Predict input_data.
-
- Args:
- input_data (Union[str, np.array]):
- When the type is str, it is the path of image, or the directory containing images, or the URL of image from Internet.
- When the type is np.array, it is the image data whose channel order is RGB.
- print_pred (bool, optional): Whether print the prediction result. Defaults to False.
-
- Raises:
- ImageTypeError: Illegal input_data.
-
- Yields:
- Generator[list, None, None]:
- The prediction result(s) of input_data by batch_size. For every one image,
- prediction result(s) is zipped as a dict, that includs topk "class_ids", "scores" and "label_names".
- The format of batch prediction result(s) is as follow: [{"class_ids": [...], "scores": [...], "label_names": [...]}, ...]
- """
-
- if isinstance(input_data, np.ndarray):
- yield self.cls_predictor.predict(input_data)
- elif isinstance(input_data, str):
- if input_data.startswith("http") or input_data.startswith("https"):
- image_storage_dir = partial(os.path.join, BASE_IMAGES_DIR)
- if not os.path.exists(image_storage_dir()):
- os.makedirs(image_storage_dir())
- image_save_path = image_storage_dir("tmp.jpg")
- download_with_progressbar(input_data, image_save_path)
- input_data = image_save_path
- warnings.warn(
- f"Image to be predicted from Internet: {input_data}, has been saved to: {image_save_path}"
- )
- image_list = get_image_list(input_data)
-
- batch_size = self._config.Global.get("batch_size", 1)
- topk = self._config.PostProcess.Topk.get('topk', 1)
-
- img_list = []
- img_path_list = []
- cnt = 0
- for idx, img_path in enumerate(image_list):
- img = cv2.imread(img_path)
- if img is None:
- warnings.warn(
- f"Image file failed to read and has been skipped. The path: {img_path}"
- )
- continue
- img = img[:, :, ::-1]
- img_list.append(img)
- img_path_list.append(img_path)
- cnt += 1
-
- if cnt % batch_size == 0 or (idx + 1) == len(image_list):
- preds = self.cls_predictor.predict(img_list)
-
- if print_pred and preds:
- for idx, pred in enumerate(preds):
- pred_str = ", ".join(
- [f"{k}: {pred[k]}" for k in pred])
- print(
- f"filename: {img_path_list[idx]}, top-{topk}, {pred_str}"
- )
-
- img_list = []
- img_path_list = []
- yield preds
- else:
- err = "Please input legal image! The type of image supported by PaddleClas are: NumPy.ndarray and string of local path or Ineternet URL"
- raise ImageTypeError(err)
- return
-
-
-# for CLI
-def main():
- """Function API used for commad line.
- """
- cfg = args_cfg()
- clas_engine = PaddleClas(**cfg)
- res = clas_engine.predict(cfg["infer_imgs"], print_pred=True)
- for _ in res:
- pass
- print("Predict complete!")
- return
-
-
-if __name__ == "__main__":
- main()
diff --git a/PaddleClas/requirements.txt b/PaddleClas/requirements.txt
deleted file mode 100644
index 79f548c..0000000
--- a/PaddleClas/requirements.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-prettytable
-ujson
-opencv-python==4.4.0.46
-pillow
-tqdm
-PyYAML
-visualdl >= 2.2.0
-scipy
-scikit-learn==0.23.2
-gast==0.3.3
-faiss-cpu==1.7.1.post2
diff --git a/PaddleClas/setup.py b/PaddleClas/setup.py
deleted file mode 100644
index 57045d3..0000000
--- a/PaddleClas/setup.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from io import open
-from setuptools import setup
-
-with open('requirements.txt', encoding="utf-8-sig") as f:
- requirements = f.readlines()
-
-
-def readme():
- with open(
- 'docs/en/inference_deployment/whl_deploy_en.md',
- encoding="utf-8-sig") as f:
- README = f.read()
- return README
-
-
-setup(
- name='paddleclas',
- packages=['paddleclas'],
- package_dir={'paddleclas': ''},
- include_package_data=True,
- entry_points={
- "console_scripts": ["paddleclas= paddleclas.paddleclas:main"]
- },
- version='0.0.0',
- install_requires=requirements,
- license='Apache License 2.0',
- description='Awesome Image Classification toolkits based on PaddlePaddle ',
- long_description=readme(),
- long_description_content_type='text/markdown',
- url='https://github.com/PaddlePaddle/PaddleClas',
- download_url='https://github.com/PaddlePaddle/PaddleClas.git',
- keywords=[
- 'A treasure chest for image classification powered by PaddlePaddle.'
- ],
- classifiers=[
- 'Intended Audience :: Developers',
- 'Operating System :: OS Independent',
- 'Natural Language :: Chinese (Simplified)',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.2',
- 'Programming Language :: Python :: 3.3',
- 'Programming Language :: Python :: 3.4',
- 'Programming Language :: Python :: 3.5',
- 'Programming Language :: Python :: 3.6',
- 'Programming Language :: Python :: 3.7', 'Topic :: Utilities'
- ], )
From 1f8cedbdb05a7b41de671b3ed8bad94ce48996a0 Mon Sep 17 00:00:00 2001
From: wangziyang <2890199310@qq.com>
Date: Thu, 12 May 2022 19:57:55 +0800
Subject: [PATCH 4/4] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=96=87=E4=BB=B6?=
=?UTF-8?q?=E7=9B=AE=E5=BD=95?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/PaddleClas/MANIFEST.in | 7 +
src/PaddleClas/__init__.py | 17 +
src/PaddleClas/hubconf.py | 788 ++++++++++++++++++++++++++++++++
src/PaddleClas/paddleclas.py | 572 +++++++++++++++++++++++
src/PaddleClas/requirements.txt | 11 +
src/PaddleClas/setup.py | 60 +++
6 files changed, 1455 insertions(+)
create mode 100644 src/PaddleClas/MANIFEST.in
create mode 100644 src/PaddleClas/__init__.py
create mode 100644 src/PaddleClas/hubconf.py
create mode 100644 src/PaddleClas/paddleclas.py
create mode 100644 src/PaddleClas/requirements.txt
create mode 100644 src/PaddleClas/setup.py
diff --git a/src/PaddleClas/MANIFEST.in b/src/PaddleClas/MANIFEST.in
new file mode 100644
index 0000000..b0a4f6d
--- /dev/null
+++ b/src/PaddleClas/MANIFEST.in
@@ -0,0 +1,7 @@
+include LICENSE.txt
+include README.md
+include docs/en/whl_en.md
+recursive-include deploy/python predict_cls.py preprocess.py postprocess.py det_preprocess.py
+recursive-include deploy/utils get_image_list.py config.py logger.py predictor.py
+
+recursive-include ppcls/ *.py *.txt
\ No newline at end of file
diff --git a/src/PaddleClas/__init__.py b/src/PaddleClas/__init__.py
new file mode 100644
index 0000000..2128a6c
--- /dev/null
+++ b/src/PaddleClas/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__all__ = ['PaddleClas']
+from .paddleclas import PaddleClas
+from ppcls.arch.backbone import *
diff --git a/src/PaddleClas/hubconf.py b/src/PaddleClas/hubconf.py
new file mode 100644
index 0000000..b7f7674
--- /dev/null
+++ b/src/PaddleClas/hubconf.py
@@ -0,0 +1,788 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+dependencies = ['paddle']
+
+import paddle
+import os
+import sys
+
+
+class _SysPathG(object):
+ """
+ _SysPathG used to add/clean path for sys.path. Making sure minimal pkgs dependents by skiping parent dirs.
+
+ __enter__
+ add path into sys.path
+ __exit__
+ clean user's sys.path to avoid unexpect behaviors
+ """
+
+ def __init__(self, path):
+ self.path = path
+
+ def __enter__(self, ):
+ sys.path.insert(0, self.path)
+
+ def __exit__(self, type, value, traceback):
+ _p = sys.path.pop(0)
+ assert _p == self.path, 'Make sure sys.path cleaning {} correctly.'.format(
+ self.path)
+
+
+with _SysPathG(os.path.dirname(os.path.abspath(__file__)), ):
+ import ppcls
+ import ppcls.arch.backbone as backbone
+
+ def ppclas_init():
+ if ppcls.utils.logger._logger is None:
+ ppcls.utils.logger.init_logger()
+
+ ppclas_init()
+
+ def _load_pretrained_parameters(model, name):
+ url = 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/{}_pretrained.pdparams'.format(
+ name)
+ path = paddle.utils.download.get_weights_path_from_url(url)
+ model.set_state_dict(paddle.load(path))
+ return model
+
+ def alexnet(pretrained=False, **kwargs):
+ """
+ AlexNet
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `AlexNet` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.AlexNet(**kwargs)
+
+ return model
+
+ def vgg11(pretrained=False, **kwargs):
+ """
+ VGG11
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
+ Returns:
+ model: nn.Layer. Specific `VGG11` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.VGG11(**kwargs)
+
+ return model
+
+ def vgg13(pretrained=False, **kwargs):
+ """
+ VGG13
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
+ Returns:
+ model: nn.Layer. Specific `VGG13` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.VGG13(**kwargs)
+
+ return model
+
+ def vgg16(pretrained=False, **kwargs):
+ """
+ VGG16
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
+ Returns:
+ model: nn.Layer. Specific `VGG16` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.VGG16(**kwargs)
+
+ return model
+
+ def vgg19(pretrained=False, **kwargs):
+ """
+ VGG19
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ stop_grad_layers: int=0. The parameters in blocks which index larger than `stop_grad_layers`, will be set `param.trainable=False`
+ Returns:
+ model: nn.Layer. Specific `VGG19` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.VGG19(**kwargs)
+
+ return model
+
+ def resnet18(pretrained=False, **kwargs):
+ """
+ ResNet18
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ input_image_channel: int=3. The number of input image channels
+ data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC')
+ Returns:
+ model: nn.Layer. Specific `ResNet18` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNet18(**kwargs)
+
+ return model
+
+ def resnet34(pretrained=False, **kwargs):
+ """
+ ResNet34
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ input_image_channel: int=3. The number of input image channels
+ data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC')
+ Returns:
+ model: nn.Layer. Specific `ResNet34` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNet34(**kwargs)
+
+ return model
+
+ def resnet50(pretrained=False, **kwargs):
+ """
+ ResNet50
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ input_image_channel: int=3. The number of input image channels
+ data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC')
+ Returns:
+ model: nn.Layer. Specific `ResNet50` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNet50(**kwargs)
+
+ return model
+
+ def resnet101(pretrained=False, **kwargs):
+ """
+ ResNet101
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ input_image_channel: int=3. The number of input image channels
+ data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC')
+ Returns:
+ model: nn.Layer. Specific `ResNet101` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNet101(**kwargs)
+
+ return model
+
+ def resnet152(pretrained=False, **kwargs):
+ """
+ ResNet152
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ input_image_channel: int=3. The number of input image channels
+ data_format: str='NCHW'. The data format of batch input images, should in ('NCHW', 'NHWC')
+ Returns:
+ model: nn.Layer. Specific `ResNet152` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNet152(**kwargs)
+
+ return model
+
+ def squeezenet1_0(pretrained=False, **kwargs):
+ """
+ SqueezeNet1_0
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `SqueezeNet1_0` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.SqueezeNet1_0(**kwargs)
+
+ return model
+
+ def squeezenet1_1(pretrained=False, **kwargs):
+ """
+ SqueezeNet1_1
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `SqueezeNet1_1` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.SqueezeNet1_1(**kwargs)
+
+ return model
+
+ def densenet121(pretrained=False, **kwargs):
+ """
+ DenseNet121
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ dropout: float=0. Probability of setting units to zero.
+ bn_size: int=4. The number of channals per group
+ Returns:
+ model: nn.Layer. Specific `DenseNet121` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.DenseNet121(**kwargs)
+
+ return model
+
+ def densenet161(pretrained=False, **kwargs):
+ """
+ DenseNet161
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ dropout: float=0. Probability of setting units to zero.
+ bn_size: int=4. The number of channals per group
+ Returns:
+ model: nn.Layer. Specific `DenseNet161` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.DenseNet161(**kwargs)
+
+ return model
+
+ def densenet169(pretrained=False, **kwargs):
+ """
+ DenseNet169
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ dropout: float=0. Probability of setting units to zero.
+ bn_size: int=4. The number of channals per group
+ Returns:
+ model: nn.Layer. Specific `DenseNet169` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.DenseNet169(**kwargs)
+
+ return model
+
+ def densenet201(pretrained=False, **kwargs):
+ """
+ DenseNet201
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ dropout: float=0. Probability of setting units to zero.
+ bn_size: int=4. The number of channals per group
+ Returns:
+ model: nn.Layer. Specific `DenseNet201` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.DenseNet201(**kwargs)
+
+ return model
+
+ def densenet264(pretrained=False, **kwargs):
+ """
+ DenseNet264
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ dropout: float=0. Probability of setting units to zero.
+ bn_size: int=4. The number of channals per group
+ Returns:
+ model: nn.Layer. Specific `DenseNet264` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.DenseNet264(**kwargs)
+
+ return model
+
+ def inceptionv3(pretrained=False, **kwargs):
+ """
+ InceptionV3
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `InceptionV3` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.InceptionV3(**kwargs)
+
+ return model
+
+ def inceptionv4(pretrained=False, **kwargs):
+ """
+ InceptionV4
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `InceptionV4` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.InceptionV4(**kwargs)
+
+ return model
+
+ def googlenet(pretrained=False, **kwargs):
+ """
+ GoogLeNet
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `GoogLeNet` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.GoogLeNet(**kwargs)
+
+ return model
+
+ def shufflenetv2_x0_25(pretrained=False, **kwargs):
+ """
+ ShuffleNetV2_x0_25
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ShuffleNetV2_x0_25` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ShuffleNetV2_x0_25(**kwargs)
+
+ return model
+
+ def mobilenetv1(pretrained=False, **kwargs):
+ """
+ MobileNetV1
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV1` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV1(**kwargs)
+
+ return model
+
+ def mobilenetv1_x0_25(pretrained=False, **kwargs):
+ """
+ MobileNetV1_x0_25
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV1_x0_25` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV1_x0_25(**kwargs)
+
+ return model
+
+ def mobilenetv1_x0_5(pretrained=False, **kwargs):
+ """
+ MobileNetV1_x0_5
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV1_x0_5` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV1_x0_5(**kwargs)
+
+ return model
+
+ def mobilenetv1_x0_75(pretrained=False, **kwargs):
+ """
+ MobileNetV1_x0_75
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV1_x0_75` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV1_x0_75(**kwargs)
+
+ return model
+
+ def mobilenetv2_x0_25(pretrained=False, **kwargs):
+ """
+ MobileNetV2_x0_25
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV2_x0_25` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV2_x0_25(**kwargs)
+
+ return model
+
+ def mobilenetv2_x0_5(pretrained=False, **kwargs):
+ """
+ MobileNetV2_x0_5
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV2_x0_5` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV2_x0_5(**kwargs)
+
+ return model
+
+ def mobilenetv2_x0_75(pretrained=False, **kwargs):
+ """
+ MobileNetV2_x0_75
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV2_x0_75` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV2_x0_75(**kwargs)
+
+ return model
+
+ def mobilenetv2_x1_5(pretrained=False, **kwargs):
+ """
+ MobileNetV2_x1_5
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV2_x1_5` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV2_x1_5(**kwargs)
+
+ return model
+
+ def mobilenetv2_x2_0(pretrained=False, **kwargs):
+ """
+ MobileNetV2_x2_0
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV2_x2_0` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV2_x2_0(**kwargs)
+
+ return model
+
+ def mobilenetv3_large_x0_35(pretrained=False, **kwargs):
+ """
+ MobileNetV3_large_x0_35
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_large_x0_35` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_large_x0_35(**kwargs)
+
+ return model
+
+ def mobilenetv3_large_x0_5(pretrained=False, **kwargs):
+ """
+ MobileNetV3_large_x0_5
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_large_x0_5` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_large_x0_5(**kwargs)
+
+ return model
+
+ def mobilenetv3_large_x0_75(pretrained=False, **kwargs):
+ """
+ MobileNetV3_large_x0_75
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_large_x0_75` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_large_x0_75(**kwargs)
+
+ return model
+
+ def mobilenetv3_large_x1_0(pretrained=False, **kwargs):
+ """
+ MobileNetV3_large_x1_0
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_large_x1_0` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_large_x1_0(**kwargs)
+
+ return model
+
+ def mobilenetv3_large_x1_25(pretrained=False, **kwargs):
+ """
+ MobileNetV3_large_x1_25
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_large_x1_25` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_large_x1_25(**kwargs)
+
+ return model
+
+ def mobilenetv3_small_x0_35(pretrained=False, **kwargs):
+ """
+ MobileNetV3_small_x0_35
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_small_x0_35` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_small_x0_35(**kwargs)
+
+ return model
+
+ def mobilenetv3_small_x0_5(pretrained=False, **kwargs):
+ """
+ MobileNetV3_small_x0_5
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_small_x0_5` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_small_x0_5(**kwargs)
+
+ return model
+
+ def mobilenetv3_small_x0_75(pretrained=False, **kwargs):
+ """
+ MobileNetV3_small_x0_75
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_small_x0_75` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_small_x0_75(**kwargs)
+
+ return model
+
+ def mobilenetv3_small_x1_0(pretrained=False, **kwargs):
+ """
+ MobileNetV3_small_x1_0
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_small_x1_0` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_small_x1_0(**kwargs)
+
+ return model
+
+ def mobilenetv3_small_x1_25(pretrained=False, **kwargs):
+ """
+ MobileNetV3_small_x1_25
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `MobileNetV3_small_x1_25` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.MobileNetV3_small_x1_25(**kwargs)
+
+ return model
+
+ def resnext101_32x4d(pretrained=False, **kwargs):
+ """
+ ResNeXt101_32x4d
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ResNeXt101_32x4d` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNeXt101_32x4d(**kwargs)
+
+ return model
+
+ def resnext101_64x4d(pretrained=False, **kwargs):
+ """
+ ResNeXt101_64x4d
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ResNeXt101_64x4d` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNeXt101_64x4d(**kwargs)
+
+ return model
+
+ def resnext152_32x4d(pretrained=False, **kwargs):
+ """
+ ResNeXt152_32x4d
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ResNeXt152_32x4d` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNeXt152_32x4d(**kwargs)
+
+ return model
+
+ def resnext152_64x4d(pretrained=False, **kwargs):
+ """
+ ResNeXt152_64x4d
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ResNeXt152_64x4d` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNeXt152_64x4d(**kwargs)
+
+ return model
+
+ def resnext50_32x4d(pretrained=False, **kwargs):
+ """
+ ResNeXt50_32x4d
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ResNeXt50_32x4d` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNeXt50_32x4d(**kwargs)
+
+ return model
+
+ def resnext50_64x4d(pretrained=False, **kwargs):
+ """
+ ResNeXt50_64x4d
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ResNeXt50_64x4d` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.ResNeXt50_64x4d(**kwargs)
+
+ return model
+
+ def darknet53(pretrained=False, **kwargs):
+ """
+ DarkNet53
+ Args:
+ pretrained: bool=False. If `True` load pretrained parameters, `False` otherwise.
+ kwargs:
+ class_dim: int=1000. Output dim of last fc layer.
+ Returns:
+ model: nn.Layer. Specific `ResNeXt50_64x4d` model depends on args.
+ """
+ kwargs.update({'pretrained': pretrained})
+ model = backbone.DarkNet53(**kwargs)
+
+ return model
diff --git a/src/PaddleClas/paddleclas.py b/src/PaddleClas/paddleclas.py
new file mode 100644
index 0000000..bfad193
--- /dev/null
+++ b/src/PaddleClas/paddleclas.py
@@ -0,0 +1,572 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+__dir__ = os.path.dirname(__file__)
+sys.path.append(os.path.join(__dir__, ""))
+sys.path.append(os.path.join(__dir__, "deploy"))
+
+from typing import Union, Generator
+import argparse
+import shutil
+import textwrap
+import tarfile
+import requests
+import warnings
+from functools import partial
+from difflib import SequenceMatcher
+
+import cv2
+import numpy as np
+from tqdm import tqdm
+from prettytable import PrettyTable
+
+from deploy.python.predict_cls import ClsPredictor
+from deploy.utils.get_image_list import get_image_list
+from deploy.utils import config
+
+from ppcls.arch.backbone import *
+from ppcls.utils.logger import init_logger
+
+# for building model with loading pretrained weights from backbone
+init_logger()
+
+__all__ = ["PaddleClas"]
+
+BASE_DIR = os.path.expanduser("~/.paddleclas/")
+BASE_INFERENCE_MODEL_DIR = os.path.join(BASE_DIR, "inference_model")
+BASE_IMAGES_DIR = os.path.join(BASE_DIR, "images")
+BASE_DOWNLOAD_URL = "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/{}_infer.tar"
+MODEL_SERIES = {
+ "AlexNet": ["AlexNet"],
+ "DarkNet": ["DarkNet53"],
+ "DeiT": [
+ "DeiT_base_distilled_patch16_224", "DeiT_base_distilled_patch16_384",
+ "DeiT_base_patch16_224", "DeiT_base_patch16_384",
+ "DeiT_small_distilled_patch16_224", "DeiT_small_patch16_224",
+ "DeiT_tiny_distilled_patch16_224", "DeiT_tiny_patch16_224"
+ ],
+ "DenseNet": [
+ "DenseNet121", "DenseNet161", "DenseNet169", "DenseNet201",
+ "DenseNet264"
+ ],
+ "DLA": [
+ "DLA46_c", "DLA60x_c", "DLA34", "DLA60", "DLA60x", "DLA102", "DLA102x",
+ "DLA102x2", "DLA169"
+ ],
+ "DPN": ["DPN68", "DPN92", "DPN98", "DPN107", "DPN131"],
+ "EfficientNet": [
+ "EfficientNetB0", "EfficientNetB0_small", "EfficientNetB1",
+ "EfficientNetB2", "EfficientNetB3", "EfficientNetB4", "EfficientNetB5",
+ "EfficientNetB6", "EfficientNetB7"
+ ],
+ "ESNet": ["ESNet_x0_25", "ESNet_x0_5", "ESNet_x0_75", "ESNet_x1_0"],
+ "GhostNet":
+ ["GhostNet_x0_5", "GhostNet_x1_0", "GhostNet_x1_3", "GhostNet_x1_3_ssld"],
+ "HarDNet": ["HarDNet39_ds", "HarDNet68_ds", "HarDNet68", "HarDNet85"],
+ "HRNet": [
+ "HRNet_W18_C", "HRNet_W30_C", "HRNet_W32_C", "HRNet_W40_C",
+ "HRNet_W44_C", "HRNet_W48_C", "HRNet_W64_C", "HRNet_W18_C_ssld",
+ "HRNet_W48_C_ssld"
+ ],
+ "Inception": ["GoogLeNet", "InceptionV3", "InceptionV4"],
+ "MixNet": ["MixNet_S", "MixNet_M", "MixNet_L"],
+ "MobileNetV1": [
+ "MobileNetV1_x0_25", "MobileNetV1_x0_5", "MobileNetV1_x0_75",
+ "MobileNetV1", "MobileNetV1_ssld"
+ ],
+ "MobileNetV2": [
+ "MobileNetV2_x0_25", "MobileNetV2_x0_5", "MobileNetV2_x0_75",
+ "MobileNetV2", "MobileNetV2_x1_5", "MobileNetV2_x2_0",
+ "MobileNetV2_ssld"
+ ],
+ "MobileNetV3": [
+ "MobileNetV3_small_x0_35", "MobileNetV3_small_x0_5",
+ "MobileNetV3_small_x0_75", "MobileNetV3_small_x1_0",
+ "MobileNetV3_small_x1_25", "MobileNetV3_large_x0_35",
+ "MobileNetV3_large_x0_5", "MobileNetV3_large_x0_75",
+ "MobileNetV3_large_x1_0", "MobileNetV3_large_x1_25",
+ "MobileNetV3_small_x1_0_ssld", "MobileNetV3_large_x1_0_ssld"
+ ],
+ "PPLCNet": [
+ "PPLCNet_x0_25", "PPLCNet_x0_35", "PPLCNet_x0_5", "PPLCNet_x0_75",
+ "PPLCNet_x1_0", "PPLCNet_x1_5", "PPLCNet_x2_0", "PPLCNet_x2_5"
+ ],
+ "RedNet": ["RedNet26", "RedNet38", "RedNet50", "RedNet101", "RedNet152"],
+ "RegNet": ["RegNetX_4GF"],
+ "Res2Net": [
+ "Res2Net50_14w_8s", "Res2Net50_26w_4s", "Res2Net50_vd_26w_4s",
+ "Res2Net200_vd_26w_4s", "Res2Net101_vd_26w_4s",
+ "Res2Net50_vd_26w_4s_ssld", "Res2Net101_vd_26w_4s_ssld",
+ "Res2Net200_vd_26w_4s_ssld"
+ ],
+ "ResNeSt": ["ResNeSt50", "ResNeSt50_fast_1s1x64d"],
+ "ResNet": [
+ "ResNet18", "ResNet18_vd", "ResNet34", "ResNet34_vd", "ResNet50",
+ "ResNet50_vc", "ResNet50_vd", "ResNet50_vd_v2", "ResNet101",
+ "ResNet101_vd", "ResNet152", "ResNet152_vd", "ResNet200_vd",
+ "ResNet34_vd_ssld", "ResNet50_vd_ssld", "ResNet50_vd_ssld_v2",
+ "ResNet101_vd_ssld", "Fix_ResNet50_vd_ssld_v2", "ResNet50_ACNet_deploy"
+ ],
+ "ResNeXt": [
+ "ResNeXt50_32x4d", "ResNeXt50_vd_32x4d", "ResNeXt50_64x4d",
+ "ResNeXt50_vd_64x4d", "ResNeXt101_32x4d", "ResNeXt101_vd_32x4d",
+ "ResNeXt101_32x8d_wsl", "ResNeXt101_32x16d_wsl",
+ "ResNeXt101_32x32d_wsl", "ResNeXt101_32x48d_wsl",
+ "Fix_ResNeXt101_32x48d_wsl", "ResNeXt101_64x4d", "ResNeXt101_vd_64x4d",
+ "ResNeXt152_32x4d", "ResNeXt152_vd_32x4d", "ResNeXt152_64x4d",
+ "ResNeXt152_vd_64x4d"
+ ],
+ "ReXNet":
+ ["ReXNet_1_0", "ReXNet_1_3", "ReXNet_1_5", "ReXNet_2_0", "ReXNet_3_0"],
+ "SENet": [
+ "SENet154_vd", "SE_HRNet_W64_C_ssld", "SE_ResNet18_vd",
+ "SE_ResNet34_vd", "SE_ResNet50_vd", "SE_ResNeXt50_32x4d",
+ "SE_ResNeXt50_vd_32x4d", "SE_ResNeXt101_32x4d"
+ ],
+ "ShuffleNetV2": [
+ "ShuffleNetV2_swish", "ShuffleNetV2_x0_25", "ShuffleNetV2_x0_33",
+ "ShuffleNetV2_x0_5", "ShuffleNetV2_x1_0", "ShuffleNetV2_x1_5",
+ "ShuffleNetV2_x2_0"
+ ],
+ "SqueezeNet": ["SqueezeNet1_0", "SqueezeNet1_1"],
+ "SwinTransformer": [
+ "SwinTransformer_large_patch4_window7_224_22kto1k",
+ "SwinTransformer_large_patch4_window12_384_22kto1k",
+ "SwinTransformer_base_patch4_window7_224_22kto1k",
+ "SwinTransformer_base_patch4_window12_384_22kto1k",
+ "SwinTransformer_base_patch4_window12_384",
+ "SwinTransformer_base_patch4_window7_224",
+ "SwinTransformer_small_patch4_window7_224",
+ "SwinTransformer_tiny_patch4_window7_224"
+ ],
+ "Twins": [
+ "pcpvt_small", "pcpvt_base", "pcpvt_large", "alt_gvt_small",
+ "alt_gvt_base", "alt_gvt_large"
+ ],
+ "VGG": ["VGG11", "VGG13", "VGG16", "VGG19"],
+ "VisionTransformer": [
+ "ViT_base_patch16_224", "ViT_base_patch16_384", "ViT_base_patch32_384",
+ "ViT_large_patch16_224", "ViT_large_patch16_384",
+ "ViT_large_patch32_384", "ViT_small_patch16_224"
+ ],
+ "Xception": [
+ "Xception41", "Xception41_deeplab", "Xception65", "Xception65_deeplab",
+ "Xception71"
+ ]
+}
+
+
+class ImageTypeError(Exception):
+ """ImageTypeError.
+ """
+
+ def __init__(self, message=""):
+ super().__init__(message)
+
+
+class InputModelError(Exception):
+ """InputModelError.
+ """
+
+ def __init__(self, message=""):
+ super().__init__(message)
+
+
+def init_config(model_name,
+ inference_model_dir,
+ use_gpu=True,
+ batch_size=1,
+ topk=5,
+ **kwargs):
+ imagenet1k_map_path = os.path.join(
+ os.path.abspath(__dir__), "ppcls/utils/imagenet1k_label_list.txt")
+ cfg = {
+ "Global": {
+ "infer_imgs": kwargs["infer_imgs"]
+ if "infer_imgs" in kwargs else False,
+ "model_name": model_name,
+ "inference_model_dir": inference_model_dir,
+ "batch_size": batch_size,
+ "use_gpu": use_gpu,
+ "enable_mkldnn": kwargs["enable_mkldnn"]
+ if "enable_mkldnn" in kwargs else False,
+ "cpu_num_threads": kwargs["cpu_num_threads"]
+ if "cpu_num_threads" in kwargs else 1,
+ "enable_benchmark": False,
+ "use_fp16": kwargs["use_fp16"] if "use_fp16" in kwargs else False,
+ "ir_optim": True,
+ "use_tensorrt": kwargs["use_tensorrt"]
+ if "use_tensorrt" in kwargs else False,
+ "gpu_mem": kwargs["gpu_mem"] if "gpu_mem" in kwargs else 8000,
+ "enable_profile": False
+ },
+ "PreProcess": {
+ "transform_ops": [{
+ "ResizeImage": {
+ "resize_short": kwargs["resize_short"]
+ if "resize_short" in kwargs else 256
+ }
+ }, {
+ "CropImage": {
+ "size": kwargs["crop_size"]
+ if "crop_size" in kwargs else 224
+ }
+ }, {
+ "NormalizeImage": {
+ "scale": 0.00392157,
+ "mean": [0.485, 0.456, 0.406],
+ "std": [0.229, 0.224, 0.225],
+ "order": ''
+ }
+ }, {
+ "ToCHWImage": None
+ }]
+ },
+ "PostProcess": {
+ "main_indicator": "Topk",
+ "Topk": {
+ "topk": topk,
+ "class_id_map_file": imagenet1k_map_path
+ }
+ }
+ }
+ if "save_dir" in kwargs:
+ if kwargs["save_dir"] is not None:
+ cfg["PostProcess"]["SavePreLabel"] = {
+ "save_dir": kwargs["save_dir"]
+ }
+ if "class_id_map_file" in kwargs:
+ if kwargs["class_id_map_file"] is not None:
+ cfg["PostProcess"]["Topk"]["class_id_map_file"] = kwargs[
+ "class_id_map_file"]
+
+ cfg = config.AttrDict(cfg)
+ config.create_attr_dict(cfg)
+ return cfg
+
+
+def args_cfg():
+ def str2bool(v):
+ return v.lower() in ("true", "t", "1")
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--infer_imgs",
+ type=str,
+ required=True,
+ help="The image(s) to be predicted.")
+ parser.add_argument(
+ "--model_name", type=str, help="The model name to be used.")
+ parser.add_argument(
+ "--inference_model_dir",
+ type=str,
+ help="The directory of model files. Valid when model_name not specifed."
+ )
+ parser.add_argument(
+ "--use_gpu", type=str, default=True, help="Whether use GPU.")
+ parser.add_argument("--gpu_mem", type=int, default=8000, help="")
+ parser.add_argument(
+ "--enable_mkldnn",
+ type=str2bool,
+ default=False,
+ help="Whether use MKLDNN. Valid when use_gpu is False")
+ parser.add_argument("--cpu_num_threads", type=int, default=1, help="")
+ parser.add_argument(
+ "--use_tensorrt", type=str2bool, default=False, help="")
+ parser.add_argument("--use_fp16", type=str2bool, default=False, help="")
+ parser.add_argument(
+ "--batch_size", type=int, default=1, help="Batch size. Default by 1.")
+ parser.add_argument(
+ "--topk",
+ type=int,
+ default=5,
+ help="Return topk score(s) and corresponding results. Default by 5.")
+ parser.add_argument(
+ "--class_id_map_file",
+ type=str,
+ help="The path of file that map class_id and label.")
+ parser.add_argument(
+ "--save_dir",
+ type=str,
+ help="The directory to save prediction results as pre-label.")
+ parser.add_argument(
+ "--resize_short",
+ type=int,
+ default=256,
+ help="Resize according to short size.")
+ parser.add_argument(
+ "--crop_size", type=int, default=224, help="Centor crop size.")
+
+ args = parser.parse_args()
+ return vars(args)
+
+
+def print_info():
+ """Print list of supported models in formatted.
+ """
+ table = PrettyTable(["Series", "Name"])
+ try:
+ sz = os.get_terminal_size()
+ width = sz.columns - 30 if sz.columns > 50 else 10
+ except OSError:
+ width = 100
+ for series in MODEL_SERIES:
+ names = textwrap.fill(" ".join(MODEL_SERIES[series]), width=width)
+ table.add_row([series, names])
+ width = len(str(table).split("\n")[0])
+ print("{}".format("-" * width))
+ print("Models supported by PaddleClas".center(width))
+ print(table)
+ print("Powered by PaddlePaddle!".rjust(width))
+ print("{}".format("-" * width))
+
+
+def get_model_names():
+ """Get the model names list.
+ """
+ model_names = []
+ for series in MODEL_SERIES:
+ model_names += (MODEL_SERIES[series])
+ return model_names
+
+
+def similar_architectures(name="", names=[], thresh=0.1, topk=10):
+ """Find the most similar topk model names.
+ """
+ scores = []
+ for idx, n in enumerate(names):
+ if n.startswith("__"):
+ continue
+ score = SequenceMatcher(None, n.lower(), name.lower()).quick_ratio()
+ if score > thresh:
+ scores.append((idx, score))
+ scores.sort(key=lambda x: x[1], reverse=True)
+ similar_names = [names[s[0]] for s in scores[:min(topk, len(scores))]]
+ return similar_names
+
+
+def download_with_progressbar(url, save_path):
+ """Download from url with progressbar.
+ """
+ if os.path.isfile(save_path):
+ os.remove(save_path)
+ response = requests.get(url, stream=True)
+ total_size_in_bytes = int(response.headers.get("content-length", 0))
+ block_size = 1024 # 1 Kibibyte
+ progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
+ with open(save_path, "wb") as file:
+ for data in response.iter_content(block_size):
+ progress_bar.update(len(data))
+ file.write(data)
+ progress_bar.close()
+ if total_size_in_bytes == 0 or progress_bar.n != total_size_in_bytes or not os.path.isfile(
+ save_path):
+ raise Exception(
+ f"Something went wrong while downloading file from {url}")
+
+
+def check_model_file(model_name):
+ """Check the model files exist and download and untar when no exist.
+ """
+ storage_directory = partial(os.path.join, BASE_INFERENCE_MODEL_DIR,
+ model_name)
+ url = BASE_DOWNLOAD_URL.format(model_name)
+
+ tar_file_name_list = [
+ "inference.pdiparams", "inference.pdiparams.info", "inference.pdmodel"
+ ]
+ model_file_path = storage_directory("inference.pdmodel")
+ params_file_path = storage_directory("inference.pdiparams")
+ if not os.path.exists(model_file_path) or not os.path.exists(
+ params_file_path):
+ tmp_path = storage_directory(url.split("/")[-1])
+ print(f"download {url} to {tmp_path}")
+ os.makedirs(storage_directory(), exist_ok=True)
+ download_with_progressbar(url, tmp_path)
+ with tarfile.open(tmp_path, "r") as tarObj:
+ for member in tarObj.getmembers():
+ filename = None
+ for tar_file_name in tar_file_name_list:
+ if tar_file_name in member.name:
+ filename = tar_file_name
+ if filename is None:
+ continue
+ file = tarObj.extractfile(member)
+ with open(storage_directory(filename), "wb") as f:
+ f.write(file.read())
+ os.remove(tmp_path)
+ if not os.path.exists(model_file_path) or not os.path.exists(
+ params_file_path):
+ raise Exception(
+ f"Something went wrong while praparing the model[{model_name}] files!"
+ )
+
+ return storage_directory()
+
+
+class PaddleClas(object):
+ """PaddleClas.
+ """
+
+ print_info()
+
+ def __init__(self,
+ model_name: str=None,
+ inference_model_dir: str=None,
+ use_gpu: bool=True,
+ batch_size: int=1,
+ topk: int=5,
+ **kwargs):
+ """Init PaddleClas with config.
+
+ Args:
+ model_name (str, optional): The model name supported by PaddleClas. If specified, override config. Defaults to None.
+ inference_model_dir (str, optional): The directory that contained model file and params file to be used. If specified, override config. Defaults to None.
+ use_gpu (bool, optional): Whether use GPU. If specified, override config. Defaults to True.
+ batch_size (int, optional): The batch size to pridict. If specified, override config. Defaults to 1.
+ topk (int, optional): Return the top k prediction results with the highest score. Defaults to 5.
+ """
+ super().__init__()
+ self._config = init_config(model_name, inference_model_dir, use_gpu,
+ batch_size, topk, **kwargs)
+ self._check_input_model()
+ self.cls_predictor = ClsPredictor(self._config)
+
+ def get_config(self):
+ """Get the config.
+ """
+ return self._config
+
+ def _check_input_model(self):
+ """Check input model name or model files.
+ """
+ candidate_model_names = get_model_names()
+ input_model_name = self._config.Global.get("model_name", None)
+ inference_model_dir = self._config.Global.get("inference_model_dir",
+ None)
+ if input_model_name is not None:
+ similar_names = similar_architectures(input_model_name,
+ candidate_model_names)
+ similar_names_str = ", ".join(similar_names)
+ if input_model_name not in candidate_model_names:
+ err = f"{input_model_name} is not provided by PaddleClas. \nMaybe you want: [{similar_names_str}]. \nIf you want to use your own model, please specify inference_model_dir!"
+ raise InputModelError(err)
+ self._config.Global.inference_model_dir = check_model_file(
+ input_model_name)
+ return
+ elif inference_model_dir is not None:
+ model_file_path = os.path.join(inference_model_dir,
+ "inference.pdmodel")
+ params_file_path = os.path.join(inference_model_dir,
+ "inference.pdiparams")
+ if not os.path.isfile(model_file_path) or not os.path.isfile(
+ params_file_path):
+ err = f"There is no model file or params file in this directory: {inference_model_dir}"
+ raise InputModelError(err)
+ return
+ else:
+ err = f"Please specify the model name supported by PaddleClas or directory contained model files(inference.pdmodel, inference.pdiparams)."
+ raise InputModelError(err)
+ return
+
+ def predict(self, input_data: Union[str, np.array],
+ print_pred: bool=False) -> Generator[list, None, None]:
+ """Predict input_data.
+
+ Args:
+ input_data (Union[str, np.array]):
+ When the type is str, it is the path of image, or the directory containing images, or the URL of image from Internet.
+ When the type is np.array, it is the image data whose channel order is RGB.
+ print_pred (bool, optional): Whether print the prediction result. Defaults to False.
+
+ Raises:
+ ImageTypeError: Illegal input_data.
+
+ Yields:
+ Generator[list, None, None]:
+ The prediction result(s) of input_data by batch_size. For every one image,
+ prediction result(s) is zipped as a dict, that includs topk "class_ids", "scores" and "label_names".
+ The format of batch prediction result(s) is as follow: [{"class_ids": [...], "scores": [...], "label_names": [...]}, ...]
+ """
+
+ if isinstance(input_data, np.ndarray):
+ yield self.cls_predictor.predict(input_data)
+ elif isinstance(input_data, str):
+ if input_data.startswith("http") or input_data.startswith("https"):
+ image_storage_dir = partial(os.path.join, BASE_IMAGES_DIR)
+ if not os.path.exists(image_storage_dir()):
+ os.makedirs(image_storage_dir())
+ image_save_path = image_storage_dir("tmp.jpg")
+ download_with_progressbar(input_data, image_save_path)
+ input_data = image_save_path
+ warnings.warn(
+ f"Image to be predicted from Internet: {input_data}, has been saved to: {image_save_path}"
+ )
+ image_list = get_image_list(input_data)
+
+ batch_size = self._config.Global.get("batch_size", 1)
+ topk = self._config.PostProcess.Topk.get('topk', 1)
+
+ img_list = []
+ img_path_list = []
+ cnt = 0
+ for idx, img_path in enumerate(image_list):
+ img = cv2.imread(img_path)
+ if img is None:
+ warnings.warn(
+ f"Image file failed to read and has been skipped. The path: {img_path}"
+ )
+ continue
+ img = img[:, :, ::-1]
+ img_list.append(img)
+ img_path_list.append(img_path)
+ cnt += 1
+
+ if cnt % batch_size == 0 or (idx + 1) == len(image_list):
+ preds = self.cls_predictor.predict(img_list)
+
+ if print_pred and preds:
+ for idx, pred in enumerate(preds):
+ pred_str = ", ".join(
+ [f"{k}: {pred[k]}" for k in pred])
+ print(
+ f"filename: {img_path_list[idx]}, top-{topk}, {pred_str}"
+ )
+
+ img_list = []
+ img_path_list = []
+ yield preds
+ else:
+ err = "Please input legal image! The type of image supported by PaddleClas are: NumPy.ndarray and string of local path or Ineternet URL"
+ raise ImageTypeError(err)
+ return
+
+
+# for CLI
+def main():
+ """Function API used for commad line.
+ """
+ cfg = args_cfg()
+ clas_engine = PaddleClas(**cfg)
+ res = clas_engine.predict(cfg["infer_imgs"], print_pred=True)
+ for _ in res:
+ pass
+ print("Predict complete!")
+ return
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/PaddleClas/requirements.txt b/src/PaddleClas/requirements.txt
new file mode 100644
index 0000000..79f548c
--- /dev/null
+++ b/src/PaddleClas/requirements.txt
@@ -0,0 +1,11 @@
+prettytable
+ujson
+opencv-python==4.4.0.46
+pillow
+tqdm
+PyYAML
+visualdl >= 2.2.0
+scipy
+scikit-learn==0.23.2
+gast==0.3.3
+faiss-cpu==1.7.1.post2
diff --git a/src/PaddleClas/setup.py b/src/PaddleClas/setup.py
new file mode 100644
index 0000000..57045d3
--- /dev/null
+++ b/src/PaddleClas/setup.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from io import open
+from setuptools import setup
+
+with open('requirements.txt', encoding="utf-8-sig") as f:
+ requirements = f.readlines()
+
+
+def readme():
+ with open(
+ 'docs/en/inference_deployment/whl_deploy_en.md',
+ encoding="utf-8-sig") as f:
+ README = f.read()
+ return README
+
+
+setup(
+ name='paddleclas',
+ packages=['paddleclas'],
+ package_dir={'paddleclas': ''},
+ include_package_data=True,
+ entry_points={
+ "console_scripts": ["paddleclas= paddleclas.paddleclas:main"]
+ },
+ version='0.0.0',
+ install_requires=requirements,
+ license='Apache License 2.0',
+ description='Awesome Image Classification toolkits based on PaddlePaddle ',
+ long_description=readme(),
+ long_description_content_type='text/markdown',
+ url='https://github.com/PaddlePaddle/PaddleClas',
+ download_url='https://github.com/PaddlePaddle/PaddleClas.git',
+ keywords=[
+ 'A treasure chest for image classification powered by PaddlePaddle.'
+ ],
+ classifiers=[
+ 'Intended Audience :: Developers',
+ 'Operating System :: OS Independent',
+ 'Natural Language :: Chinese (Simplified)',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.2',
+ 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7', 'Topic :: Utilities'
+ ], )