-澳门游戏娱乐场棋牌

��<html xmlns:v="urn:schemas-microsoft-com:vml" xmlns:o="urn:schemas-microsoft-com:office:office" xmlns:w="urn:schemas-microsoft-com:office:word" xmlns:m="http://schemas.microsoft.com/office/2004/12/omml" xmlns="http://www.w3.org/tr/rec-html40"> <head> <meta http-equiv=content-type content="text/html; charset=unicode"> <meta name=progid content=word.document> <meta name=generator content="microsoft word 15"> <meta name=originator content="microsoft word 15"> <link rel=file-list href="2020.files/filelist.xml"> <link rel=edit-time-data href="2020.files/editdata.mso"> <!--[if !mso]> <style> v\:* {behavior:url(/clunwen/2020.html);} o\:* {behavior:url(/clunwen/2020.html);} w\:* {behavior:url(/clunwen/2020.html);} .shape {behavior:url(/clunwen/2020.html);} </style> <![endif]--> <title>2020���e</title> <!--[if gte mso 9]><xml> <o:documentproperties> <o:author>cd</o:author> <o:lastauthor>cd</o:lastauthor> <o:revision>6</o:revision> <o:totaltime>7</o:totaltime> <o:created>2020-05-26t02:56:00z</o:created> <o:lastsaved>2021-03-29t02:46:00z</o:lastsaved> <o:pages>21</o:pages> <o:words>14513</o:words> <o:characters>82730</o:characters> <o:lines>689</o:lines> <o:paragraphs>194</o:paragraphs> <o:characterswithspaces>97049</o:characterswithspaces> <o:version>16.00</o:version> </o:documentproperties> <o:officedocumentsettings> <o:allowpng/> </o:officedocumentsettings> </xml><![endif]--> <link rel=themedata href="2020.files/themedata.thmx"> <link rel=colorschememapping href="2020.files/colorschememapping.xml"> <!--[if gte mso 9]><xml> <w:worddocument> <w:view>print</w:view> <w:zoom>90</w:zoom> <w:trackmoves>false</w:trackmoves> <w:trackformatting/> <w:validateagainstschemas/> <w:saveifxmlinvalid>false</w:saveifxmlinvalid> <w:ignoremixedcontent>false</w:ignoremixedcontent> <w:alwaysshowplaceholdertext>false</w:alwaysshowplaceholdertext> <w:donotpromoteqf/> <w:lidthemeother>en-us</w:lidthemeother> <w:lidthemeasian>zh-cn</w:lidthemeasian> <w:lidthemecomplexscript>x-none</w:lidthemecomplexscript> <w:compatibility> <w:breakwrappedtables/> <w:splitpgbreakandparamark/> <w:usefelayout/> </w:compatibility> <w:browserlevel>microsoftinternetexplorer4</w:browserlevel> <m:mathpr> <m:mathfont m:val="cambria math"/> <m:brkbin m:val="before"/> <m:brkbinsub m:val="&#45;-"/> <m:smallfrac m:val="off"/> <m:dispdef/> <m:lmargin m:val="0"/> <m:rmargin m:val="0"/> <m:defjc m:val="centergroup"/> <m:wrapindent m:val="1440"/> <m:intlim m:val="subsup"/> <m:narylim m:val="undovr"/> </m:mathpr></w:worddocument> </xml><![endif]--><!--[if gte mso 9]><xml> <w:latentstyles deflockedstate="false" defunhidewhenused="false" defsemihidden="false" defqformat="false" defpriority="99" latentstylecount="371"> <w:lsdexception locked="false" priority="0" qformat="true" name="normal"/> <w:lsdexception locked="false" priority="9" qformat="true" name="heading 1"/> <w:lsdexception locked="false" priority="9" semihidden="true" unhidewhenused="true" qformat="true" name="heading 2"/> <w:lsdexception locked="false" priority="9" semihidden="true" unhidewhenused="true" qformat="true" name="heading 3"/> <w:lsdexception locked="false" priority="0" semihidden="true" unhidewhenused="true" qformat="true" name="heading 4"/> <w:lsdexception locked="false" priority="9" semihidden="true" unhidewhenused="true" qformat="true" name="heading 5"/> <w:lsdexception locked="false" priority="9" semihidden="true" unhidewhenused="true" qformat="true" name="heading 6"/> <w:lsdexception locked="false" priority="9" semihidden="true" unhidewhenused="true" qformat="true" name="heading 7"/> <w:lsdexception locked="false" priority="9" semihidden="true" unhidewhenused="true" qformat="true" name="heading 8"/> <w:lsdexception locked="false" priority="9" semihidden="true" unhidewhenused="true" qformat="true" name="heading 9"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="index 1"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="index 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="index 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="index 4"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="index 5"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="index 6"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="index 7"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="index 8"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="index 9"/> <w:lsdexception locked="false" priority="39" semihidden="true" unhidewhenused="true" name="toc 1"/> <w:lsdexception locked="false" priority="39" semihidden="true" unhidewhenused="true" name="toc 2"/> <w:lsdexception locked="false" priority="39" semihidden="true" unhidewhenused="true" name="toc 3"/> <w:lsdexception locked="false" priority="39" semihidden="true" unhidewhenused="true" name="toc 4"/> <w:lsdexception locked="false" priority="39" semihidden="true" unhidewhenused="true" name="toc 5"/> <w:lsdexception locked="false" priority="39" semihidden="true" unhidewhenused="true" name="toc 6"/> <w:lsdexception locked="false" priority="39" semihidden="true" unhidewhenused="true" name="toc 7"/> <w:lsdexception locked="false" priority="39" semihidden="true" unhidewhenused="true" name="toc 8"/> <w:lsdexception locked="false" priority="39" semihidden="true" unhidewhenused="true" name="toc 9"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" qformat="true" name="normal indent"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="footnote text"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="annotation text"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="header"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" qformat="true" name="footer"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="index heading"/> <w:lsdexception locked="false" priority="35" semihidden="true" unhidewhenused="true" qformat="true" name="caption"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table of figures"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="envelope address"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="envelope return"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="footnote reference"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="annotation reference"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="line number"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="page number"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="endnote reference"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="endnote text"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table of authorities"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="macro"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="toa heading"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list bullet"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list number"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list 4"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list 5"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list bullet 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list bullet 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list bullet 4"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list bullet 5"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list number 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list number 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list number 4"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list number 5"/> <w:lsdexception locked="false" qformat="true" name="title"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="closing"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="signature"/> <w:lsdexception locked="false" priority="1" semihidden="true" unhidewhenused="true" name="default paragraph font"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="body text"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="body text indent"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list continue"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list continue 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list continue 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list continue 4"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="list continue 5"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="message header"/> <w:lsdexception locked="false" priority="11" qformat="true" name="subtitle"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="salutation"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="date"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="body text first indent"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="body text first indent 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="note heading"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="body text 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="body text 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="body text indent 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="body text indent 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="block text"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" qformat="true" name="hyperlink"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="followedhyperlink"/> <w:lsdexception locked="false" priority="22" qformat="true" name="strong"/> <w:lsdexception locked="false" priority="20" qformat="true" name="emphasis"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="document map"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" qformat="true" name="plain text"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="e-mail signature"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="html top of form"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="html bottom of form"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="normal (web)"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="html acronym"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="html address"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="html cite"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="html code"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="html definition"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="html keyboard"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="html preformatted"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="html sample"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="html typewriter"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="html variable"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="normal table"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="annotation subject"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="no list"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="outline list 1"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="outline list 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="outline list 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table simple 1"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table simple 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table simple 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table classic 1"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table classic 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table classic 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table classic 4"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table colorful 1"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table colorful 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table colorful 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table columns 1"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table columns 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table columns 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table columns 4"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table columns 5"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table grid 1"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table grid 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table grid 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table grid 4"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table grid 5"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table grid 6"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table grid 7"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table grid 8"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table list 1"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table list 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table list 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table list 4"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table list 5"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table list 6"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table list 7"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table list 8"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table 3d effects 1"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table 3d effects 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table 3d effects 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table contemporary"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table elegant"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table professional"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table subtle 1"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table subtle 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table web 1"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table web 2"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table web 3"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="balloon text"/> <w:lsdexception locked="false" priority="39" name="table grid"/> <w:lsdexception locked="false" semihidden="true" unhidewhenused="true" name="table theme"/> <w:lsdexception locked="false" semihidden="true" name="placeholder text"/> <w:lsdexception locked="false" priority="1" qformat="true" name="no spacing"/> <w:lsdexception locked="false" priority="60" name="light shading"/> <w:lsdexception locked="false" priority="61" name="light list"/> <w:lsdexception locked="false" priority="62" name="light grid"/> <w:lsdexception locked="false" priority="63" name="medium shading 1"/> <w:lsdexception locked="false" priority="64" name="medium shading 2"/> <w:lsdexception locked="false" priority="65" name="medium list 1"/> <w:lsdexception locked="false" priority="66" name="medium list 2"/> <w:lsdexception locked="false" priority="67" name="medium grid 1"/> <w:lsdexception locked="false" priority="68" name="medium grid 2"/> <w:lsdexception locked="false" priority="69" name="medium grid 3"/> <w:lsdexception locked="false" priority="70" name="dark list"/> <w:lsdexception locked="false" priority="71" name="colorful shading"/> <w:lsdexception locked="false" priority="72" name="colorful list"/> <w:lsdexception locked="false" priority="73" name="colorful grid"/> <w:lsdexception locked="false" priority="60" name="light shading accent 1"/> <w:lsdexception locked="false" priority="61" name="light list accent 1"/> <w:lsdexception locked="false" priority="62" name="light grid accent 1"/> <w:lsdexception locked="false" priority="63" name="medium shading 1 accent 1"/> <w:lsdexception locked="false" priority="64" name="medium shading 2 accent 1"/> <w:lsdexception locked="false" priority="65" name="medium list 1 accent 1"/> <w:lsdexception locked="false" semihidden="true" name="revision"/> <w:lsdexception locked="false" priority="34" qformat="true" name="list paragraph"/> <w:lsdexception locked="false" priority="29" qformat="true" name="quote"/> <w:lsdexception locked="false" priority="30" qformat="true" name="intense quote"/> <w:lsdexception locked="false" priority="66" name="medium list 2 accent 1"/> <w:lsdexception locked="false" priority="67" name="medium grid 1 accent 1"/> <w:lsdexception locked="false" priority="68" name="medium grid 2 accent 1"/> <w:lsdexception locked="false" priority="69" name="medium grid 3 accent 1"/> <w:lsdexception locked="false" priority="70" name="dark list accent 1"/> <w:lsdexception locked="false" priority="71" name="colorful shading accent 1"/> <w:lsdexception locked="false" priority="72" name="colorful list accent 1"/> <w:lsdexception locked="false" priority="73" name="colorful grid accent 1"/> <w:lsdexception locked="false" priority="60" name="light shading accent 2"/> <w:lsdexception locked="false" priority="61" name="light list accent 2"/> <w:lsdexception locked="false" priority="62" name="light grid accent 2"/> <w:lsdexception locked="false" priority="63" name="medium shading 1 accent 2"/> <w:lsdexception locked="false" priority="64" name="medium shading 2 accent 2"/> <w:lsdexception locked="false" priority="65" name="medium list 1 accent 2"/> <w:lsdexception locked="false" priority="66" name="medium list 2 accent 2"/> <w:lsdexception locked="false" priority="67" name="medium grid 1 accent 2"/> <w:lsdexception locked="false" priority="68" name="medium grid 2 accent 2"/> <w:lsdexception locked="false" priority="69" name="medium grid 3 accent 2"/> <w:lsdexception locked="false" priority="70" name="dark list accent 2"/> <w:lsdexception locked="false" priority="71" name="colorful shading accent 2"/> <w:lsdexception locked="false" priority="72" name="colorful list accent 2"/> <w:lsdexception locked="false" priority="73" name="colorful grid accent 2"/> <w:lsdexception locked="false" priority="60" name="light shading accent 3"/> <w:lsdexception locked="false" priority="61" name="light list accent 3"/> <w:lsdexception locked="false" priority="62" name="light grid accent 3"/> <w:lsdexception locked="false" priority="63" name="medium shading 1 accent 3"/> <w:lsdexception locked="false" priority="64" name="medium shading 2 accent 3"/> <w:lsdexception locked="false" priority="65" name="medium list 1 accent 3"/> <w:lsdexception locked="false" priority="66" name="medium list 2 accent 3"/> <w:lsdexception locked="false" priority="67" name="medium grid 1 accent 3"/> <w:lsdexception locked="false" priority="68" name="medium grid 2 accent 3"/> <w:lsdexception locked="false" priority="69" name="medium grid 3 accent 3"/> <w:lsdexception locked="false" priority="70" name="dark list accent 3"/> <w:lsdexception locked="false" priority="71" name="colorful shading accent 3"/> <w:lsdexception locked="false" priority="72" name="colorful list accent 3"/> <w:lsdexception locked="false" priority="73" name="colorful grid accent 3"/> <w:lsdexception locked="false" priority="60" name="light shading accent 4"/> <w:lsdexception locked="false" priority="61" name="light list accent 4"/> <w:lsdexception locked="false" priority="62" name="light grid accent 4"/> <w:lsdexception locked="false" priority="63" name="medium shading 1 accent 4"/> <w:lsdexception locked="false" priority="64" name="medium shading 2 accent 4"/> <w:lsdexception locked="false" priority="65" name="medium list 1 accent 4"/> <w:lsdexception locked="false" priority="66" name="medium list 2 accent 4"/> <w:lsdexception locked="false" priority="67" name="medium grid 1 accent 4"/> <w:lsdexception locked="false" priority="68" name="medium grid 2 accent 4"/> <w:lsdexception locked="false" priority="69" name="medium grid 3 accent 4"/> <w:lsdexception locked="false" priority="70" name="dark list accent 4"/> <w:lsdexception locked="false" priority="71" name="colorful shading accent 4"/> <w:lsdexception locked="false" priority="72" name="colorful list accent 4"/> <w:lsdexception locked="false" priority="73" name="colorful grid accent 4"/> <w:lsdexception locked="false" priority="60" name="light shading accent 5"/> <w:lsdexception locked="false" priority="61" name="light list accent 5"/> <w:lsdexception locked="false" priority="62" name="light grid accent 5"/> <w:lsdexception locked="false" priority="63" name="medium shading 1 accent 5"/> <w:lsdexception locked="false" priority="64" name="medium shading 2 accent 5"/> <w:lsdexception locked="false" priority="65" name="medium list 1 accent 5"/> <w:lsdexception locked="false" priority="66" name="medium list 2 accent 5"/> <w:lsdexception locked="false" priority="67" name="medium grid 1 accent 5"/> <w:lsdexception locked="false" priority="68" name="medium grid 2 accent 5"/> <w:lsdexception locked="false" priority="69" name="medium grid 3 accent 5"/> <w:lsdexception locked="false" priority="70" name="dark list accent 5"/> <w:lsdexception locked="false" priority="71" name="colorful shading accent 5"/> <w:lsdexception locked="false" priority="72" name="colorful list accent 5"/> <w:lsdexception locked="false" priority="73" name="colorful grid accent 5"/> <w:lsdexception locked="false" priority="60" name="light shading accent 6"/> <w:lsdexception locked="false" priority="61" name="light list accent 6"/> <w:lsdexception locked="false" priority="62" name="light grid accent 6"/> <w:lsdexception locked="false" priority="63" name="medium shading 1 accent 6"/> <w:lsdexception locked="false" priority="64" name="medium shading 2 accent 6"/> <w:lsdexception locked="false" priority="65" name="medium list 1 accent 6"/> <w:lsdexception locked="false" priority="66" name="medium list 2 accent 6"/> <w:lsdexception locked="false" priority="67" name="medium grid 1 accent 6"/> <w:lsdexception locked="false" priority="68" name="medium grid 2 accent 6"/> <w:lsdexception locked="false" priority="69" name="medium grid 3 accent 6"/> <w:lsdexception locked="false" priority="70" name="dark list accent 6"/> <w:lsdexception locked="false" priority="71" name="colorful shading accent 6"/> <w:lsdexception locked="false" priority="72" name="colorful list accent 6"/> <w:lsdexception locked="false" priority="73" name="colorful grid accent 6"/> <w:lsdexception locked="false" priority="19" qformat="true" name="subtle emphasis"/> <w:lsdexception locked="false" priority="21" qformat="true" name="intense emphasis"/> <w:lsdexception locked="false" priority="31" qformat="true" name="subtle reference"/> <w:lsdexception locked="false" priority="32" qformat="true" name="intense reference"/> <w:lsdexception locked="false" priority="33" qformat="true" name="book title"/> <w:lsdexception locked="false" priority="37" semihidden="true" unhidewhenused="true" name="bibliography"/> <w:lsdexception locked="false" priority="39" semihidden="true" unhidewhenused="true" qformat="true" name="toc heading"/> <w:lsdexception locked="false" priority="41" name="plain table 1"/> <w:lsdexception locked="false" priority="42" name="plain table 2"/> <w:lsdexception locked="false" priority="43" name="plain table 3"/> <w:lsdexception locked="false" priority="44" name="plain table 4"/> <w:lsdexception locked="false" priority="45" name="plain table 5"/> <w:lsdexception locked="false" priority="40" name="grid table light"/> <w:lsdexception locked="false" priority="46" name="grid table 1 light"/> <w:lsdexception locked="false" priority="47" name="grid table 2"/> <w:lsdexception locked="false" priority="48" name="grid table 3"/> <w:lsdexception locked="false" priority="49" name="grid table 4"/> <w:lsdexception locked="false" priority="50" name="grid table 5 dark"/> <w:lsdexception locked="false" priority="51" name="grid table 6 colorful"/> <w:lsdexception locked="false" priority="52" name="grid table 7 colorful"/> <w:lsdexception locked="false" priority="46" name="grid table 1 light accent 1"/> <w:lsdexception locked="false" priority="47" name="grid table 2 accent 1"/> <w:lsdexception locked="false" priority="48" name="grid table 3 accent 1"/> <w:lsdexception locked="false" priority="49" name="grid table 4 accent 1"/> <w:lsdexception locked="false" priority="50" name="grid table 5 dark accent 1"/> <w:lsdexception locked="false" priority="51" name="grid table 6 colorful accent 1"/> <w:lsdexception locked="false" priority="52" name="grid table 7 colorful accent 1"/> <w:lsdexception locked="false" priority="46" name="grid table 1 light accent 2"/> <w:lsdexception locked="false" priority="47" name="grid table 2 accent 2"/> <w:lsdexception locked="false" priority="48" name="grid table 3 accent 2"/> <w:lsdexception locked="false" priority="49" name="grid table 4 accent 2"/> <w:lsdexception locked="false" priority="50" name="grid table 5 dark accent 2"/> <w:lsdexception locked="false" priority="51" name="grid table 6 colorful accent 2"/> <w:lsdexception locked="false" priority="52" name="grid table 7 colorful accent 2"/> <w:lsdexception locked="false" priority="46" name="grid table 1 light accent 3"/> <w:lsdexception locked="false" priority="47" name="grid table 2 accent 3"/> <w:lsdexception locked="false" priority="48" name="grid table 3 accent 3"/> <w:lsdexception locked="false" priority="49" name="grid table 4 accent 3"/> <w:lsdexception locked="false" priority="50" name="grid table 5 dark accent 3"/> <w:lsdexception locked="false" priority="51" name="grid table 6 colorful accent 3"/> <w:lsdexception locked="false" priority="52" name="grid table 7 colorful accent 3"/> <w:lsdexception locked="false" priority="46" name="grid table 1 light accent 4"/> <w:lsdexception locked="false" priority="47" name="grid table 2 accent 4"/> <w:lsdexception locked="false" priority="48" name="grid table 3 accent 4"/> <w:lsdexception locked="false" priority="49" name="grid table 4 accent 4"/> <w:lsdexception locked="false" priority="50" name="grid table 5 dark accent 4"/> <w:lsdexception locked="false" priority="51" name="grid table 6 colorful accent 4"/> <w:lsdexception locked="false" priority="52" name="grid table 7 colorful accent 4"/> <w:lsdexception locked="false" priority="46" name="grid table 1 light accent 5"/> <w:lsdexception locked="false" priority="47" name="grid table 2 accent 5"/> <w:lsdexception locked="false" priority="48" name="grid table 3 accent 5"/> <w:lsdexception locked="false" priority="49" name="grid table 4 accent 5"/> <w:lsdexception locked="false" priority="50" name="grid table 5 dark accent 5"/> <w:lsdexception locked="false" priority="51" name="grid table 6 colorful accent 5"/> <w:lsdexception locked="false" priority="52" name="grid table 7 colorful accent 5"/> <w:lsdexception locked="false" priority="46" name="grid table 1 light accent 6"/> <w:lsdexception locked="false" priority="47" name="grid table 2 accent 6"/> <w:lsdexception locked="false" priority="48" name="grid table 3 accent 6"/> <w:lsdexception locked="false" priority="49" name="grid table 4 accent 6"/> <w:lsdexception locked="false" priority="50" name="grid table 5 dark accent 6"/> <w:lsdexception locked="false" priority="51" name="grid table 6 colorful accent 6"/> <w:lsdexception locked="false" priority="52" name="grid table 7 colorful accent 6"/> <w:lsdexception locked="false" priority="46" name="list table 1 light"/> <w:lsdexception locked="false" priority="47" name="list table 2"/> <w:lsdexception locked="false" priority="48" name="list table 3"/> <w:lsdexception locked="false" priority="49" name="list table 4"/> <w:lsdexception locked="false" priority="50" name="list table 5 dark"/> <w:lsdexception locked="false" priority="51" name="list table 6 colorful"/> <w:lsdexception locked="false" priority="52" name="list table 7 colorful"/> <w:lsdexception locked="false" priority="46" name="list table 1 light accent 1"/> <w:lsdexception locked="false" priority="47" name="list table 2 accent 1"/> <w:lsdexception locked="false" priority="48" name="list table 3 accent 1"/> <w:lsdexception locked="false" priority="49" name="list table 4 accent 1"/> <w:lsdexception locked="false" priority="50" name="list table 5 dark accent 1"/> <w:lsdexception locked="false" priority="51" name="list table 6 colorful accent 1"/> <w:lsdexception locked="false" priority="52" name="list table 7 colorful accent 1"/> <w:lsdexception locked="false" priority="46" name="list table 1 light accent 2"/> <w:lsdexception locked="false" priority="47" name="list table 2 accent 2"/> <w:lsdexception locked="false" priority="48" name="list table 3 accent 2"/> <w:lsdexception locked="false" priority="49" name="list table 4 accent 2"/> <w:lsdexception locked="false" priority="50" name="list table 5 dark accent 2"/> <w:lsdexception locked="false" priority="51" name="list table 6 colorful accent 2"/> <w:lsdexception locked="false" priority="52" name="list table 7 colorful accent 2"/> <w:lsdexception locked="false" priority="46" name="list table 1 light accent 3"/> <w:lsdexception locked="false" priority="47" name="list table 2 accent 3"/> <w:lsdexception locked="false" priority="48" name="list table 3 accent 3"/> <w:lsdexception locked="false" priority="49" name="list table 4 accent 3"/> <w:lsdexception locked="false" priority="50" name="list table 5 dark accent 3"/> <w:lsdexception locked="false" priority="51" name="list table 6 colorful accent 3"/> <w:lsdexception locked="false" priority="52" name="list table 7 colorful accent 3"/> <w:lsdexception locked="false" priority="46" name="list table 1 light accent 4"/> <w:lsdexception locked="false" priority="47" name="list table 2 accent 4"/> <w:lsdexception locked="false" priority="48" name="list table 3 accent 4"/> <w:lsdexception locked="false" priority="49" name="list table 4 accent 4"/> <w:lsdexception locked="false" priority="50" name="list table 5 dark accent 4"/> <w:lsdexception locked="false" priority="51" name="list table 6 colorful accent 4"/> <w:lsdexception locked="false" priority="52" name="list table 7 colorful accent 4"/> <w:lsdexception locked="false" priority="46" name="list table 1 light accent 5"/> <w:lsdexception locked="false" priority="47" name="list table 2 accent 5"/> <w:lsdexception locked="false" priority="48" name="list table 3 accent 5"/> <w:lsdexception locked="false" priority="49" name="list table 4 accent 5"/> <w:lsdexception locked="false" priority="50" name="list table 5 dark accent 5"/> <w:lsdexception locked="false" priority="51" name="list table 6 colorful accent 5"/> <w:lsdexception locked="false" priority="52" name="list table 7 colorful accent 5"/> <w:lsdexception locked="false" priority="46" name="list table 1 light accent 6"/> <w:lsdexception locked="false" priority="47" name="list table 2 accent 6"/> <w:lsdexception locked="false" priority="48" name="list table 3 accent 6"/> <w:lsdexception locked="false" priority="49" name="list table 4 accent 6"/> <w:lsdexception locked="false" priority="50" name="list table 5 dark accent 6"/> <w:lsdexception locked="false" priority="51" name="list table 6 colorful accent 6"/> <w:lsdexception locked="false" priority="52" name="list table 7 colorful accent 6"/> </w:latentstyles> </xml><![endif]--> <style> <!-- /* font definitions */ @font-face {font-family:�[so; panose-1:2 1 6 0 3 1 1 1 1 1; mso-font-alt:simsun; mso-font-charset:134; mso-generic-font-family:auto; mso-font-pitch:variable; mso-font-signature:3 680460288 22 0 262145 0;} @font-face {font-family:ўso; panose-1:2 1 6 9 6 1 1 1 1 1; mso-font-alt:simhei; mso-font-charset:134; mso-generic-font-family:modern; mso-font-pitch:fixed; mso-font-signature:-2147482945 953122042 22 0 262145 0;} @font-face {font-family:"cambria math"; panose-1:2 4 5 3 5 4 6 3 2 4; mso-font-charset:0; mso-generic-font-family:roman; mso-font-pitch:variable; mso-font-signature:-536869121 1107305727 33554432 0 415 0;} @font-face {font-family:i{�~; panose-1:2 1 6 0 3 1 1 1 1 1; mso-font-alt:dengxian; mso-font-charset:134; mso-generic-font-family:auto; mso-font-pitch:variable; mso-font-signature:-1610612033 953122042 22 0 262159 0;} @font-face {font-family:"arial unicode ms"; panose-1:2 11 6 4 2 2 2 2 2 4; mso-font-charset:134; mso-generic-font-family:swiss; mso-font-pitch:variable; mso-font-signature:-134238209 -371195905 63 0 4129279 0;} @font-face {font-family:cambria; panose-1:2 4 5 3 5 4 6 3 2 4; mso-font-charset:0; mso-generic-font-family:roman; mso-font-pitch:variable; mso-font-signature:-536869121 1107305727 33554432 0 415 0;} @font-face {font-family:verdana; panose-1:2 11 6 4 3 5 4 4 2 4; mso-font-charset:0; mso-generic-font-family:swiss; mso-font-pitch:variable; mso-font-signature:-1610610945 1073750107 16 0 415 0;} @font-face {font-family:�n�[_gb2312; panose-1:2 1 6 9 3 1 1 1 1 1; mso-font-charset:134; mso-generic-font-family:modern; mso-font-pitch:fixed; mso-font-signature:1 135135232 16 0 262144 0;} @font-face {font-family:calibri; panose-1:2 15 5 2 2 2 4 3 2 4; mso-font-charset:0; mso-generic-font-family:swiss; mso-font-pitch:variable; mso-font-signature:-536859905 -1073732485 9 0 511 0;} @font-face {font-family:cmbx9; mso-font-alt:cambria; mso-font-charset:0; mso-generic-font-family:roman; mso-font-pitch:auto; mso-font-signature:0 0 0 0 0 0;} @font-face {font-family:"\@�[so"; panose-1:2 1 6 0 3 1 1 1 1 1; mso-font-charset:134; mso-generic-font-family:auto; mso-font-pitch:variable; mso-font-signature:3 680460288 22 0 262145 0;} @font-face {font-family:"\@i{�~"; panose-1:2 1 6 0 3 1 1 1 1 1; mso-font-charset:134; mso-generic-font-family:auto; mso-font-pitch:variable; mso-font-signature:-1610612033 953122042 22 0 262159 0;} @font-face {font-family:"\@ўso"; panose-1:2 1 6 0 3 1 1 1 1 1; mso-font-charset:134; mso-generic-font-family:modern; mso-font-pitch:fixed; mso-font-signature:-2147482945 953122042 22 0 262145 0;} @font-face {font-family:"\@�n�[_gb2312"; mso-font-charset:134; mso-generic-font-family:modern; mso-font-pitch:fixed; mso-font-signature:1 135135232 16 0 262144 0;} @font-face {font-family:"\@arial unicode ms"; panose-1:2 11 6 4 2 2 2 2 2 4; mso-font-charset:134; mso-generic-font-family:swiss; mso-font-pitch:variable; mso-font-signature:-134238209 -371195905 63 0 4129279 0;} /* style definitions */ p.msonormal, li.msonormal, div.msonormal {mso-style-unhide:no; mso-style-qformat:yes; mso-style-parent:""; margin:0cm; margin-bottom:.0001pt; text-align:justify; text-justify:inter-ideograph; mso-pagination:none; font-size:10.5pt; mso-bidi-font-size:11.0pt; font-family:i{�~; mso-ascii-font-family:i{�~; mso-ascii-theme-font:minor-latin; mso-fareast-font-family:i{�~; mso-fareast-theme-font:minor-fareast; mso-hansi-font-family:i{�~; mso-hansi-theme-font:minor-latin; mso-bidi-font-family:"times new roman"; mso-bidi-theme-font:minor-bidi; mso-font-kerning:1.0pt;} h1 { mso-style-priority: 9; mso-style-unhide: no; mso-style-qformat: yes; mso-style-link: "h�� 1 w[&{"; mso-style-next: ck�e; margin-top: 17.0pt; margin-right: 0cm; margin-bottom: 16.5pt; margin-left: 0cm; text-align: justify; text-justify: inter-ideograph; line-height: 240%; mso-pagination: lines-together; page-break-after: avoid; mso-outline-level: 1; font-size: 22pt; font-family: i{�~; mso-ascii-font-family: i{�~; mso-ascii-theme-font: minor-latin; mso-fareast-font-family: i{�~; mso-fareast-theme-font: minor-fareast; mso-hansi-font-family: i{�~; mso-hansi-theme-font: minor-latin; mso-bidi-font-family: "times new roman"; mso-bidi-theme-font: minor-bidi; mso-font-kerning: 22.0pt; font-weight: bold; } h2 { mso-style-noshow: yes; mso-style-priority: 9; mso-style-qformat: yes; mso-style-link: "h�� 2 w[&{"; mso-style-next: ck�e; margin-top: 13.0pt; margin-right: 0cm; margin-bottom: 13.0pt; margin-left: 0cm; text-align: justify; text-justify: inter-ideograph; line-height: 170%; mso-pagination: lines-together; page-break-after: avoid; mso-outline-level: 2; font-size: 16pt; font-family: "i{�~ light"; mso-ascii-font-family: "i{�~ light"; mso-ascii-theme-font: major-latin; mso-fareast-font-family: "i{�~ light"; mso-fareast-theme-font: major-fareast; mso-hansi-font-family: "i{�~ light"; mso-hansi-theme-font: major-latin; mso-bidi-font-family: "times new roman"; mso-bidi-theme-font: major-bidi; mso-font-kerning: 1.0pt; font-weight: bold; } h3 { mso-style-noshow: yes; mso-style-priority: 9; mso-style-qformat: yes; mso-style-link: "h�� 3 w[&{"; mso-style-next: ck�e; margin-top: 13.0pt; margin-right: 0cm; margin-bottom: 13.0pt; margin-left: 0cm; text-align: justify; text-justify: inter-ideograph; line-height: 170%; mso-pagination: lines-together; page-break-after: avoid; mso-outline-level: 3; font-size: 16pt; font-family: i{�~; mso-ascii-font-family: i{�~; mso-ascii-theme-font: minor-latin; mso-fareast-font-family: i{�~; mso-fareast-theme-font: minor-fareast; mso-hansi-font-family: i{�~; mso-hansi-theme-font: minor-latin; mso-bidi-font-family: "times new roman"; mso-bidi-theme-font: minor-bidi; mso-font-kerning: 1.0pt; font-weight: bold; } h4 { mso-style-noshow: yes; mso-style-qformat: yes; mso-style-link: "h�� 4 w[&{1"; mso-style-next: ck�e; margin-top: 14.0pt; margin-right: 0cm; margin-bottom: 14.5pt; margin-left: 0cm; text-align: justify; text-justify: inter-ideograph; line-height: 155%; mso-pagination: lines-together; page-break-after: avoid; mso-outline-level: 4; font-size: 14pt; font-family: "cambria",serif; mso-fareast-font-family: �[so; mso-bidi-font-family: "times new roman"; mso-font-kerning: 1.0pt; font-weight: bold; } h6 { mso-style-noshow: yes; mso-style-priority: 9; mso-style-qformat: yes; mso-style-link: "h�� 6 w[&{"; mso-style-next: ck�e; margin-top: 12.0pt; margin-right: 0cm; margin-bottom: 3.2pt; margin-left: 0cm; text-align: justify; text-justify: inter-ideograph; line-height: 130%; mso-pagination: lines-together; page-break-after: avoid; mso-outline-level: 6; font-size: 12pt; font-family: "i{�~ light"; mso-ascii-font-family: "i{�~ light"; mso-ascii-theme-font: major-latin; mso-fareast-font-family: "i{�~ light"; mso-fareast-theme-font: major-fareast; mso-hansi-font-family: "i{�~ light"; mso-hansi-theme-font: major-latin; mso-bidi-font-family: "times new roman"; mso-bidi-theme-font: major-bidi; mso-font-kerning: 1.0pt; font-weight: bold; } p.msotoc1, li.msotoc1, div.msotoc1 {mso-style-update:auto; mso-style-noshow:yes; mso-style-priority:39; mso-style-next:ck�e; margin:0cm; margin-bottom:.0001pt; text-align:justify; text-justify:inter-ideograph; mso-pagination:none; font-size:10.5pt; mso-bidi-font-size:11.0pt; font-family:i{�~; mso-ascii-font-family:i{�~; mso-ascii-theme-font:minor-latin; mso-fareast-font-family:i{�~; mso-fareast-theme-font:minor-fareast; mso-hansi-font-family:i{�~; mso-hansi-theme-font:minor-latin; mso-bidi-font-family:"times new roman"; mso-bidi-theme-font:minor-bidi; mso-font-kerning:1.0pt;} p.msotoc2, li.msotoc2, div.msotoc2 {mso-style-update:auto; mso-style-noshow:yes; mso-style-priority:39; mso-style-next:ck�e; margin-top:0cm; margin-right:0cm; margin-bottom:0cm; margin-left:21.0pt; margin-bottom:.0001pt; mso-para-margin-top:0cm; mso-para-margin-right:0cm; mso-para-margin-bottom:0cm; mso-para-margin-left:2.0gd; mso-para-margin-bottom:.0001pt; text-align:justify; text-justify:inter-ideograph; mso-pagination:none; font-size:10.5pt; mso-bidi-font-size:11.0pt; font-family:i{�~; mso-ascii-font-family:i{�~; mso-ascii-theme-font:minor-latin; mso-fareast-font-family:i{�~; mso-fareast-theme-font:minor-fareast; mso-hansi-font-family:i{�~; mso-hansi-theme-font:minor-latin; mso-bidi-font-family:"times new roman"; mso-bidi-theme-font:minor-bidi; mso-font-kerning:1.0pt;} p.msotoc3, li.msotoc3, div.msotoc3 {mso-style-update:auto; mso-style-noshow:yes; mso-style-priority:39; mso-style-next:ck�e; margin-top:0cm; margin-right:0cm; margin-bottom:0cm; margin-left:42.0pt; margin-bottom:.0001pt; mso-para-margin-top:0cm; mso-para-margin-right:0cm; mso-para-margin-bottom:0cm; mso-para-margin-left:4.0gd; mso-para-margin-bottom:.0001pt; text-align:justify; text-justify:inter-ideograph; mso-pagination:none; font-size:10.5pt; mso-bidi-font-size:11.0pt; font-family:i{�~; mso-ascii-font-family:i{�~; mso-ascii-theme-font:minor-latin; mso-fareast-font-family:i{�~; mso-fareast-theme-font:minor-fareast; mso-hansi-font-family:i{�~; mso-hansi-theme-font:minor-latin; mso-bidi-font-family:"times new roman"; mso-bidi-theme-font:minor-bidi; mso-font-kerning:1.0pt;} p.msonormalindent, li.msonormalindent, div.msonormalindent {mso-style-name:"ck�e)ۏ\,ck�e���l�)ۏ$nw[ �"; mso-style-noshow:yes; mso-style-priority:99; mso-style-qformat:yes; margin:0cm; margin-bottom:.0001pt; text-align:justify; text-justify:inter-ideograph; text-indent:21.0pt; line-height:20.0pt; mso-line-height-rule:exactly; mso-pagination:none; font-size:12.0pt; mso-bidi-font-size:10.0pt; font-family:�[so; mso-hansi-font-family:"times new roman"; mso-bidi-font-family:"times new roman"; mso-font-kerning:1.0pt;} p.msocommenttext, li.msocommenttext, div.msocommenttext {mso-style-noshow:yes; mso-style-priority:99; mso-style-link:"yb�l�ew[ w[&{"; margin:0cm; margin-bottom:.0001pt; mso-pagination:none; font-size:10.5pt; mso-bidi-font-size:11.0pt; font-family:i{�~; mso-ascii-font-family:i{�~; mso-ascii-theme-font:minor-latin; mso-fareast-font-family:i{�~; mso-fareast-theme-font:minor-fareast; mso-hansi-font-family:i{�~; mso-hansi-theme-font:minor-latin; mso-bidi-font-family:"times new roman"; mso-bidi-theme-font:minor-bidi; mso-font-kerning:1.0pt;} p.msoheader, li.msoheader, div.msoheader {mso-style-priority:99; mso-style-link:"u� w w[&{"; margin:0cm; margin-bottom:.0001pt; text-align:center; mso-pagination:none; tab-stops:center 207.65pt right 415.3pt; layout-grid-mode:char; border:none; mso-border-bottom-alt:solid windowtext .75pt; padding:0cm; mso-padding-alt:0cm 0cm 1.0pt 0cm; font-size:9.0pt; font-family:i{�~; mso-ascii-font-family:i{�~; mso-ascii-theme-font:minor-latin; mso-fareast-font-family:i{�~; mso-fareast-theme-font:minor-fareast; mso-hansi-font-family:i{�~; mso-hansi-theme-font:minor-latin; mso-bidi-font-family:"times new roman"; mso-bidi-theme-font:minor-bidi; mso-font-kerning:1.0pt;} p.msofooter, li.msofooter, div.msofooter {mso-style-priority:99; mso-style-qformat:yes; mso-style-link:"u�� w[&{"; margin:0cm; margin-bottom:.0001pt; mso-pagination:none; tab-stops:center 207.65pt right 415.3pt; layout-grid-mode:char; font-size:9.0pt; font-family:i{�~; mso-ascii-font-family:i{�~; mso-ascii-theme-font:minor-latin; mso-fareast-font-family:i{�~; mso-fareast-theme-font:minor-fareast; mso-hansi-font-family:i{�~; mso-hansi-theme-font:minor-latin; mso-bidi-font-family:"times new roman"; mso-bidi-theme-font:minor-bidi; mso-font-kerning:1.0pt;} p.msocaption, li.msocaption, div.msocaption {mso-style-noshow:yes; mso-style-priority:35; mso-style-qformat:yes; mso-style-next:ck�e; margin:0cm; margin-bottom:.0001pt; text-align:justify; text-justify:inter-ideograph; mso-pagination:none; font-size:10.0pt; font-family:"i{�~ light"; mso-ascii-font-family:"i{�~ light"; mso-ascii-theme-font:major-latin; mso-fareast-font-family:ўso; mso-hansi-font-family:"i{�~ light"; mso-hansi-theme-font:major-latin; mso-bidi-font-family:"times new roman"; mso-bidi-theme-font:major-bidi; mso-font-kerning:1.0pt;} span.msocommentreference {mso-style-noshow:yes; mso-style-priority:99; mso-ansi-font-size:10.5pt; mso-bidi-font-size:10.5pt;} p.msotitle, li.msotitle, div.msotitle {mso-style-noshow:yes; mso-style-priority:99; mso-style-unhide:no; mso-style-qformat:yes; mso-style-link:"h�� w[&{1"; margin:0cm; margin-bottom:.0001pt; text-align:center; line-height:15.6pt; mso-pagination:none; mso-layout-grid-align:none; font-size:14.0pt; mso-bidi-font-size:10.0pt; font-family:"times new roman",serif; mso-fareast-font-family:�[so; font-weight:bold; mso-bidi-font-weight:normal;} p.msobodytext, li.msobodytext, div.msobodytext {mso-style-noshow:yes; mso-style-priority:99; mso-style-link:"ck�e�e,g w[&{"; margin-top:0cm; margin-right:0cm; margin-bottom:6.0pt; margin-left:0cm; text-align:justify; text-justify:inter-ideograph; mso-pagination:none; font-size:10.5pt; mso-bidi-font-size:11.0pt; font-family:i{�~; mso-ascii-font-family:i{�~; mso-ascii-theme-font:minor-latin; mso-fareast-font-family:i{�~; mso-fareast-theme-font:minor-fareast; mso-hansi-font-family:i{�~; mso-hansi-theme-font:minor-latin; mso-bidi-font-family:"times new roman"; mso-bidi-theme-font:minor-bidi; mso-font-kerning:1.0pt;} p.msodate, li.msodate, div.msodate {mso-style-noshow:yes; mso-style-priority:99; mso-style-link:"�eg w[&{"; mso-style-next:ck�e; margin-top:0cm; margin-right:0cm; margin-bottom:0cm; margin-left:5.0pt; margin-bottom:.0001pt; mso-para-margin-top:0cm; mso-para-margin-right:0cm; mso-para-margin-bottom:0cm; mso-para-margin-left:25.0gd; mso-para-margin-bottom:.0001pt; text-align:justify; text-justify:inter-ideograph; mso-pagination:none; font-size:10.5pt; mso-bidi-font-size:11.0pt; font-family:i{�~; mso-ascii-font-family:i{�~; mso-ascii-theme-font:minor-latin; mso-fareast-font-family:i{�~; mso-fareast-theme-font:minor-fareast; mso-hansi-font-family:i{�~; mso-hansi-theme-font:minor-latin; mso-bidi-font-family:"times new roman"; mso-bidi-theme-font:minor-bidi; mso-font-kerning:1.0pt;} p.msobodytext3, li.msobodytext3, div.msobodytext3 {mso-style-noshow:yes; mso-style-priority:99; mso-style-link:"ck�e�e,g 3 w[&{"; margin:0cm; margin-bottom:.0001pt; text-align:center; mso-pagination:none; font-size:9.0pt; mso-bidi-font-size:10.0pt; font-family:"times new roman",serif; mso-fareast-font-family:�[so; mso-font-kerning:1.0pt;} a:link, span.msohyperlink {mso-style-noshow:yes; mso-style-priority:99; mso-style-qformat:yes; color:#0563c1; mso-themecolor:hyperlink; text-decoration:underline; text-underline:single;} a:visited, span.msohyperlinkfollowed {mso-style-noshow:yes; mso-style-priority:99; color:#954f72; mso-themecolor:followedhyperlink; text-decoration:underline; text-underline:single;} p.msoplaintext, li.msoplaintext, div.msoplaintext {mso-style-name:"�~�e,g\,nf��ew["; mso-style-noshow:yes; mso-style-priority:99; mso-style-qformat:yes; mso-style-link:"�~�e,g w[&{1\,nf��ew[ w[&{"; margin:0cm; margin-bottom:.0001pt; text-align:justify; text-justify:inter-ideograph; mso-pagination:none; font-size:10.5pt; mso-bidi-font-size:10.0pt; font-family:�[so; mso-hansi-font-family:"courier new"; mso-bidi-font-family:"times new roman"; mso-font-kerning:1.0pt;} p {mso-style-noshow:yes; mso-style-priority:99; mso-margin-top-alt:auto; margin-right:0cm; mso-margin-bottom-alt:auto; margin-left:0cm; mso-pagination:widow-orphan; font-size:12.0pt; font-family:�[so; mso-bidi-font-family:�[so;} pre {mso-style-noshow:yes; mso-style-priority:99; mso-style-link:"html ���� </style> <!--[if gte mso 10]> <style> /* style definitions */ table.msonormaltable {mso-style-name:nf�h� <![endif]--><!--[if gte mso 9]><xml> <o:shapedefaults v:ext="edit" spidmax="2049"/> </xml><![endif]--><!--[if gte mso 9]><xml> <o:shapelayout v:ext="edit"> <o:idmap v:ext="edit" data="1"/> </o:shapelayout></xml><![endif]--> </head> <body lang=zh-cn link="#0563c1" vlink="#954f72" style='tab-interval:21.0pt'> <div class=wordsection1> <p class=msonormal align=center style='text-align:center;mso-pagination:widow-orphan'><span lang=en-us style='font-size:12.0pt;font-family:�[so;mso-bidi-font-family:�[so; mso-font-kerning:0pt;mso-no-proof:yes'><img width=998 height=125 id="_x0000_i1025" src="../nlpr.jpg"></span><span lang=en-us style='font-size: 12.0pt;font-family:�[so;mso-bidi-font-family:�[so;mso-font-kerning:0pt'><o:p></o:p></span></p> <div align=center> <table class=msonormaltable border=1 cellspacing=0 cellpadding=0 width=1117 style='width:838.0pt;border-collapse:collapse;border:none;mso-border-alt:solid windowtext .5pt; mso-yfti-tbllook:1184;mso-padding-alt:0cm 0cm 0cm 0cm'> <colgroup><col width="1117" style="width: 838pt"></colgroup> <tr style='mso-yfti-irow:0;mso-yfti-firstrow:yes;height:15.0pt'> <td width=1117 style='width:838.0pt;border:solid windowtext 1.0pt;border-bottom: none;mso-border-top-alt:solid windowtext .5pt;mso-border-left-alt:solid windowtext .5pt; mso-border-right-alt:solid windowtext .5pt;padding:.75pt .75pt 0cm .75pt; height:15.0pt'> <p class=msonormal align=center style='text-align:center;mso-pagination:widow-orphan'><b><span lang=en-us style='font-size:12.0pt;font-family:"times new roman",serif; mso-fareast-font-family:�[so;color:black;mso-font-kerning:0pt'>2020</span></b><b><span style='font-size:12.0pt;font-family:�[so;mso-bidi-font-family:"times new roman"; color:black;mso-font-kerning:0pt'>���e�vu_</span></b><b><span lang=en-us style='font-size:12.0pt;font-family:"times new roman",serif;mso-fareast-font-family: �[so;color:black;mso-font-kerning:0pt'>&nbsp;<o:p></o:p></span></b></p> </td> </tr> <tr style='mso-yfti-irow:1;height:15.0pt'> <td width=1117 style='width:838.0pt;border-top:none;border-left:solid windowtext 1.0pt; border-bottom:none;border-right:solid windowtext 1.0pt;mso-border-left-alt: solid windowtext .5pt;mso-border-right-alt:solid windowtext .5pt;padding: .75pt .75pt 0cm .75pt;height:15.0pt'> <p class=msonormal align=center style='text-align:center;mso-pagination:widow-orphan'><b><span lang=en-us style='font-size:12.0pt;font-family:"times new roman",serif; mso-fareast-font-family:�[so;color:black;mso-font-kerning:0pt'>&nbsp;list of publications<o:p></o:p></span></b></p> </td> </tr> <tr style='mso-yfti-irow:2;mso-yfti-lastrow:yes;height:15.0pt'> <td width=1117 style='width:838.0pt;border:solid windowtext 1.0pt;border-top: none;mso-border-left-alt:solid windowtext .5pt;mso-border-bottom-alt:solid windowtext .5pt; mso-border-right-alt:solid windowtext .5pt;padding:.75pt .75pt 0cm .75pt; height:15.0pt'> <p class=af0 style='mso-outline-level:2'><span lang=en-us style='font-size: 12.0pt;line-height:110%;mso-bidi-font-family:"times new roman";color:black; mso-font-kerning:0pt'>&nbsp;</span><a name="_toc66800735"></a><a name="_toc24098141"></a><a name="_toc532562225"></a><a name="_toc532391194"><span style='mso-bookmark:_toc532562225'><span style='mso-bookmark:_toc24098141'><span style='mso-bookmark:_toc66800735'><b style='mso-bidi-font-weight:normal'><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>�n �w�</span></b></span></span></span></a><a name="_toc24465890"></a><span style='mso-bookmark:_toc24465890'><span style='mso-bookmark:_toc66800735'><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt;line-height:110%'>books / chapters</span></b></span></span><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt; line-height:110%'><o:p></o:p></span></b></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l0 level1 lfo2'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>jun wan, guodong guo, sergio escalera, hugo jair escalante, stan z. li (eds.), multi-modal face presentation attack detection, synthesis lectures on computer vision, isbn:9781681739229, morgan &amp; claypool publishers, 2020.<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l0 level1 lfo2'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>thomas moeslund, sergio escalera, gholamreza anbarjafari, kamal nasrollahi,&nbsp;jun wan&nbsp;(eds.), statistical machine learning for human behaviour analysis, isbn:978-3-03936-228-8, mdpi, 2020.<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l0 level1 lfo2'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yi li, huaibo huang, ran he, tieniu tan, heterogeneous facial analysis and synthesis, isbn: 978-981-13-9148-4, springer, 2020<o:p></o:p></span></p> <p class=af0><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></b></p> <p class=af0 style='mso-outline-level:2'><a name="_toc66800736"></a><a name="_toc24098142"></a><a name="_toc532562227"></a><a name="_toc532391196"><span style='mso-bookmark:_toc532562227'><span style='mso-bookmark:_toc24098142'><span style='mso-bookmark:_toc66800736'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�ve� rir</span></b></span></span></span></a><a name="_toc24465891"></a><a name="_toc24360999"></a><a name="_toc532562397"></a><span style='mso-bookmark:_toc532562397'><span style='mso-bookmark:_toc24360999'><span style='mso-bookmark:_toc24465891'><span style='mso-bookmark:_toc66800736'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>international journals</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800737"></a><a name="_toc24098143"></a><a name="_toc532562228"></a><a name="_toc532391197"><span style='mso-bookmark:_toc532562228'><span style='mso-bookmark:_toc24098143'><span style='mso-bookmark:_toc66800737'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>���{:gɖɉ</span></b></span></span></span></a><a name="_toc24465892"></a><a name="_toc24361000"></a><a name="_toc532562398"></a><span style='mso-bookmark:_toc532562398'><span style='mso-bookmark:_toc24361000'><span style='mso-bookmark:_toc24465892'><span style='mso-bookmark:_toc66800737'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>computer vision</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>ruijin chen, wei gao. color-guided depth map super-resolution using a dual-branch multi-scale residual network with channel interaction. sensors, 20(6), 1560, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>wei wang, wei gao, hainan cui, zhanyi hu. reconstruction of lines and planes of urban buildings with angle regularization. isprs journal of photogrammetry and remote sensing, 165, pp 54-66, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>q. dong*, b. liu and z. hu. non-uniqueness phenomenon of object representation in modelling it cortex by deep convolutional neural network (dcnn). frontiers in computational neuroscience, 14:35, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>4.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>l. chang, l. jin, l. weng, w. chao, x. wang, x. deng, and q. dong*. face-sketch learning with human sketch-drawing order enforcement. science china information sciences, 63, 219103:1 219103:3, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>5.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>hainan cui, tianxin shi, jun zhang, pengfei xu, yiping meng, shuhan shen. view-graph construction framework for robust and efficient structure-from-motion. pattern recognition, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>6.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yu chen, shuhan shen, yisong chen, guoping wang. graph-based parallel large scale structure from motion. pattern recognition, 107: 107537, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>7.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>hongmin liu, xincheng tang, shuhan shen. depth-map completion for large indoor scene reconstruction. pattern recognition, 99: 107112, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>8.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>lingjie zhu, shuhan shen, xiang gao, zhanyi hu. urban scene vectorized modeling based on contour deformation. isprs international journal of geo-information, 9(3), 162, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>9.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xiang gao, shuhan shen, lingjie zhu, tianxin shi, zhiheng wang, and zhanyi hu. complete scene reconstruction by merging images and laser scans. ieee transactions on circuits and systems for video technology, 30(10): 3688-3701, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>10.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>fulin tang, yihong wu, xiaohui hou, haibin ling. 3d mapping and 6d pose computation for real time augmented reality on cylindrical objects. ieee transactions on circuits and systems for video technology, vol. 30, issue 9, pp. 2887-2899, 2020. <o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>11.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>shan zhang, xiaoshan yang, yanxia liu, changsheng xu, asymmetric multi-stage cnns for small-scale pedestrian detection. neurocomputing, vol. 409, pp. 12-26, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>12.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yuyang zhang, shibiao xu, baoyuan wu, jian shi, weiliang meng and xiaopeng zhang. unsupervised multi-view constrained convolutional network for accurate depth estimation. ieee transactions on image processing. 2020, 29: 7019-7031.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>13.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>wei ma, chaofan gong, shibiao xu, xiaopeng zhang: multi-scale spatial context-based semantic edge detection. inf. fusion 64: 238-251 (2020)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>14.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>wei ma, mana zheng, wenguang ma, shibiao xu, xiaopeng zhang: learning across views for stereo image completion. iet comput. vis. 14(7): 482-492 (2020)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>15.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yong zhao, yuqi cheng, xishan zhang, shibiao xu, shuhui bu, hongkai jiang, pengcheng han, ke li, gang wan: real-time orthophoto mosaicing on mobile devices for sequential aerial images with low overlap. remote. sens. 12(22): 3739 (2020)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>16.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>jianbo liu, ying wang, yongcheng liu, shiming xiang and chunhong pan. 3d posturenet: a unified framework for skeleton-based posture recognition, pattern recognition letters, vol. 140, pp 143-149, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>17.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xiyan liu, gaofeng meng, bin fan, shiming xiang, chunhong pan. geometric rectification of document images using adversarial gated unwarping network, pattern recognition, vol. 108, pp. 107576, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>18.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>jin gao, qiang wang, junliang xing, haibin ling, weiming hu, and stephen maybank  tracking-by-fusion via gaussian process regression extended to transfer learning, ieee transactions on pattern analysis and machine intelligence (pami), vol. 42, no. 4, pp. 939-955, april 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>19.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>weiming hu, xinchu shi, zongwei zhou, junliang xing, haibin ling, and stephen maybank,  dual l1-normalized context aware tensor power iteration and its applications to multi-object tracking and multi-graph matching, international journal of computer vision (ijcv), vol. 128, no. 2, pp. 360-392, february 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>20.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>weiming hu, jun gao, bing li, ou wu, junping du, and stephen maybank,  anomaly detection using local kernel density estimation and context-based regression, ieee transactions on knowledge and data engineering (kde), vol. 32, no. 2, pp. 218-233, february 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>21.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xinwei huang, bing li, shuai li, wenjuan li, weihua xiong, xuanwu yin, weiming hu, and hong qin,  multi-cue semi-supervised color constancy with limited training samples, ieee transactions on image processing (tip), vol. 29, pp. 7875-7888, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>22.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>wenjuan li, bing li, chunfeng yuan, haohao wu, yangxi li, weiming hu, and fangshi wang,  anisotropic convolution for image classi�cation, ieee transactions on image processing (tip), vol. 29, no. 1, pp. 5584-5595, december 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>23.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>hao yang, chunfeng yuan, li zhang, yunda sun, weiming hu, stephen j. maybank,  sta-cnn: convolutional spatial-temporal attention learning for action recognition, ieee transactions on image processing (tip), vol. 29, no. 1, pp. 5783-5793, december 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>24.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>guan luo, jiutong wei, weiming hu, and stephen j. maybank,  tangent fisher vector on matrix manifolds for action recognition, ieee transactions on image processing (tip), vol. 29, no. 1, pp. 3052-3064, 2020.<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>25.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>songyan liu, haiyun guo, jian-guo hu, xu zhao, chaoyang zhao, tong wang, yousong zhu, jinqiao wang, ming tang: a novel data augmentation scheme for pedestrian detection with attribute preserving gan. neurocomputing 401: 123-132 ,2020<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>26.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xiang wu, ran he, yibo hu, zhenan sun,  learning an evolutionary embedding via massive knowledge distillation, international journal of computer vision, vol. 128, no. 8-9, pp. 2089-2106, 2020.<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>27.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yi li, huaibo huang, jie cao, ran he, tieniu tan,  disentangled representation learning of makeup portraits in the wild, international journal of computer vision, vol. 128, no. 8-9, pp. 2166-2184, 2020.<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>28.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>qi li, zhenan sun, ran he, tieniu tan,  a general framework for deep supervised discrete hashing, international journal of computer vision, vol. 128, no. 8-9, pp. 2204-2222, 2020.<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>29.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xin zheng, yanqing guo, huaibo huang, yi li, ran he,  a survey of deep facial attribute analysis, international journal of computer vision, vol. 128, no. 8-9, pp. 2002-2034, 2020.<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>30.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>jie cao, yibo hu, hongwen zhang, ran he, zhenan sun,  towards high fidelity face frontalization in the wild, international journal of computer vision, vol. 128, no. 5, pp. 1485-1504, 2020.<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>31.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>wei xue, hong ai, tianyu sun, chunfeng song, yan huang, liang wang,  frame-gan: increasing the frame rate of gait videos with generative adversarial networks, neurocomputing, vol. 380, pp. 95-104, 2020.<o:p></o:p></span></p> <p class=af0><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></b></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800738"></a><a name="_toc24098144"></a><a name="_toc532562229"></a><a name="_toc532391198"><span style='mso-bookmark:_toc532562229'><span style='mso-bookmark:_toc24098144'><span style='mso-bookmark:_toc66800738'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�va�</span></b></span></span></span></a><span style='mso-bookmark:_toc532391198'><span style='mso-bookmark:_toc532562229'><span style='mso-bookmark:_toc24098144'><span style='mso-bookmark:_toc66800738'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>/</span></b></span></span></span></span><span style='mso-bookmark:_toc532391198'><span style='mso-bookmark:_toc532562229'><span style='mso-bookmark:_toc24098144'><span style='mso-bookmark:_toc66800738'><b><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>ɖ��ytnr�g</span></b></span></span></span></span><a name="_toc24465893"></a><a name="_toc24361001"></a><a name="_toc532562399"></a><span style='mso-bookmark:_toc532562399'><span style='mso-bookmark:_toc24361001'><span style='mso-bookmark:_toc24465893'><span style='mso-bookmark:_toc66800738'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>image/video processing </span></b></span></span></span></span><span style='mso-bookmark:_toc24465893'><span style='mso-bookmark:_toc66800738'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>and analysis</span></b></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>32.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xingjia pan, fan tang, weiming dong, yang gu, zhichao song, yiping meng, pengfei xu, oilver deussen, changsheng xu: self-supervised feature augmentation for large image object detection. ieee transactions on image processing 29: 6745-6758 ,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>33.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>feifei zhang, tianzhu zhang, qirong mao, changsheng xu,  geometry guided pose-invariant facial expression recognition . ieee trans. image process. 29: 4445-4460 ,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>34.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>feifei zhang, tianzhu zhang, qirong mao, changsheng xu,  a unified deep model for joint facial expression recognition, face synthesis, and face alignment . ieee trans. image process. 29: 6574-6589 ,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>35.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>lei shi, yifan zhang, jian cheng, hanqing lu. skeleton-based action recognition with multi-stream adaptive graph convolutional networks. ieee transactions on image processing (tip), vol.29, pp.9532-9545, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>36.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>chunjie zhang, jian cheng, qi tian. multi-view image classification with visual, semantic and view consistency. ieee transactions on image processing (tip), vol.29, pp.617-627, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>37.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yousong zhu, xu zhao, chaoyang zhao, jinqiao wang, hanqing lu: food det: detecting foods in refrigerator with supervised transformer network. neurocomputing 379: 162-171 ,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>38.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>linyu zheng, yingying chen, ming tang, jinqiao wang, hanqing lu: siamese deformable cross-correlation network for real-time visual tracking. neurocomputing 401: 36-47 ,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>39.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xiaomei zhang, yingying chen, bingke zhu, jinqiao wang, ming tang: semantic-spatial fusion network for human parsing. neurocomputing 402: 375-383 ,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>40.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>kai niu, yan huang, wanli ouyang, liang wang,  improving description-based person re-identification by multi-granularity image-text alignments, ieee trans. on image processing, vol. 29, pp. 5542-5556, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>41.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>jingyu liu, wei wang, liang wang, ming-hsuan yang,  attribute-guided attention for referring expression generation and comprehension, ieee trans. on image processing, vol. 29, pp. 5244-5258, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>42.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>zhen jia, zhang zhang, liang wang, caifeng shan, tieniu tan,  deep unbiased embedding transfer for zero-shot learning, ieee trans. on image processing, vol. 29, pp. 1958-1971, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>43.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yuqi zhang, yongzhen huang, shiqi yu, liang wang,  cross-view gait recognition by discriminative feature learning, ieee trans. on image processing, vol. 29, pp. 1001-1015, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>44.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>fei liu, shubo zhou, yunlong wang, guangqi hou, zhenan sun, tieniu tan,  binocular light-field: imaging theory and occlusion-robust depth perception application, ieee trans. on image processing, vol. 29, pp. 1628-1640, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>45.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yuxi wang, zhaoxiang zhang, wangli hao, chunfeng song,  multi-domain image-to-image translation via a unified circular framework, ieee trans. on image processing, vol. 30, pp. 670-684, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>46.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yuxi wang, zhaoxiang zhang, wangli hao, chunfeng song,  attention guided multiple source and target domain adaptation, ieee trans. on image processing, vol. 30, pp. 892-906, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>47.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>wangli hao, ian max andolina, wei wang, zhaoxiang zhang,  biologically inspired visual computing: the state of the art, frontiers of computer science, vol. 15, no. 1, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>48.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yunlong wang, fei liu, kunbo zhang, zilei wang, zhenan sun, tieniu tan,  high-fidelity view synthesis for light field imaging with extended pseudo 4dcnn, ieee trans. on computational imaging, vol. 6, pp. 830-842, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>49.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>weining wang, qi li, liang wang,  robust object tracking via information theoretic measures, international journal of automation and computing, vol. 17, no. 5, pp. 652-666, 2020.<o:p></o:p></span></p> <p class=af0><b><i><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></i></b></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800739"><b><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>�n�]zf��</span></b></a><span style='mso-bookmark:_toc66800739'><b><span lang=en-us style='font-size:12.0pt; line-height:110%'>artificial intelligence</span></b></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>50.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>feifei zhao, yi zeng, aike guo, haifeng su, bo xu,  a neural algorithm for drosophila linear and nonlinear decision-making , scientific reports, vol.10, pp. 18660, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>51.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>qian zhang, yi zeng and taoyi yang, computational investigation of contributions from different subtypes of interneurons in prefrontal cortex for information maintenance </span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>scientific reports, vol.10, pp. 4617, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>52.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>feifei zhao, qingqun kong, yi zeng, bo xu,  a brain-inspired visual fear responses model for uav emergent obstacle dodging , ieee transactions on cognitive and developmental systems, vol. 12, no. 1, pp. 124-132, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>53.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>qian liang, yi zeng and bo xu,  temporal-sequential learning with a brain-inspired spiking neural network and its application to musical memory , frontiers in computational neuroscience, vol. 14, pp. 51, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>54.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>bin fan, qingqun kong, xinchao wang, zhiheng wang, shiming xiang, chunhong pan, and pascal fua. &quot; a brain-inspired model of theory of mind &quot;, frontiers in neurorobotics, vol. 14, no.60, pp. 1 - 17, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>55.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>dongcheng zhao, yi zeng, tielin zhang, mengting shi, and feifei zhao,  glsnn: a multi-layer spiking neural network based on global feedback alignment and local stdp plasticity , frontiers in computational neuroscience, vol. 14, pp. 576841, 2020<o:p></o:p></span></p> <p class=af0><b><i><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></i></b></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800740"></a><a name="_toc24098145"></a><a name="_toc532562230"></a><a name="_toc532391199"><span style='mso-bookmark:_toc532562230'><span style='mso-bookmark:_toc24098145'><span style='mso-bookmark:_toc66800740'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>!j_ƌ r</span></b></span></span></span></a><a name="_toc24465894"></a><a name="_toc24361002"></a><a name="_toc532562400"></a><span style='mso-bookmark:_toc532562400'><span style='mso-bookmark:_toc24361002'><span style='mso-bookmark:_toc24465894'><span style='mso-bookmark:_toc66800740'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>pattern recognition</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>56.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xuelin ma,shuang qiu,huiguang he, multi-channel eeg recording during motor imagery of different joints from the same limb ,scientific data, 2020. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>57.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>jiezhen xing,shuang qiu,xuelin ma,chenyao wu,jinpeng li,shengpei wang,huiguang he, a cnn-based comparing network for the detection of steady-state visual evoked potential responses,  neurocomputing, vol.403,pp.452-461,2020. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>58.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>jinpeng li,shuang qiu,changde du,yixin wang,huiguang he, domain adaptation for eeg emotion recognition based on latent representation similarity,  ieee transactions on cognitive and developmental systems,vol.12,no.2,pp.344-353,2020. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>59.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xuelin ma,shuang qiu,wei wei,shengpei wang,huiguang he , deep channel-correlation network for motor imagery decoding from same limb,  ieee transactions on neural systems and rehabilitation engineering,vol.28,no.1,pp.297-306,2020. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>60.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>weize quan, kai wang, dong-ming yan, xiaopeng zhang, dennis pellerin. learn with diversity and from harder samples: improving the generalization of cnn-based detection of computer-generated images, forensic science international digital investigation, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>61.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>ruisong zhang, weize quan, lubin fan, liming hu, dong-ming yan. distinguishing computer-generated images from natural images using channel and pixel correlation, journal of computer science and technology, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>62.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xiaolong yang, xiaohong jia, mengke yuan, dong-ming yan. real-time facial pose estimation and tracking by coarse-to-fine iterative optimization, tsinghua science and technology, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>63.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xiaohan liu, lei ma, jianwei guo, dong-ming yan. parallel computation of 3d clipped voronoi diagrams. parallel computation of 3d clipped voronoi diagrams.2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>64.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>parallel computation of 3d clipped voronoi diagrams. realistic procedural plant modeling from multiple view images.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt'><span lang=en-us style='font-size: 12.0pt;line-height:110%'>ieee transactions on visualization and computer graphics.2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>65.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yuxin chen, gaoqun ma, chunfeng yuana, bing li, hui zhang, fangshi wang, and weiming hu,  graph convolutional network with structure pooling and joint-wise channel attention for action recognition, pattern recognition, vol. 103, pp. 107321: 1-13, july 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>66.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>zongwei zhou, wenhan luo, qiangwang, junliang xing, and weiming hu,  distractor- aware discrimination learning for online multiple object tracking, pattern recognition, vol. 107, pp. 1-10 ,november 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>67.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>qiang chen, peisong wang, anda cheng, wanguo wang, yifan zhang, jian cheng. robust one-stage object detection with location-aware classifiers. pattern recognition (pr), vol.105, september,107334, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>68.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yifan zhang, lei shi, yi wu, ke cheng, jian cheng, hanqing lu: gesture recognition based on deep deformable 3d convolutional neural networks. pattern recognit. 107: 107416 ,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>69.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>jun fu, jing liu, yong li, yongjun bao, weipeng yan, zhiwei fang, hanqing lu: contextual deconvolution network for semantic segmentation. pattern recognit. 101: 107152 ,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>70.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>wen-hao he, xu-yao zhang, fei yin, zhenbo luo, jean-marc ogier, cheng-lin liu, real-time multi-scale scene text detection with scale-based region proposal network, pattern recognition, vol. 98, pp. 107026, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>71.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>chee-kheng ch ng, chee seng chan, cheng-lin liu, total-text: towards orientation robustness in scene text detection, int. j. document analysis and recognition, vol. 23, pp. 31-52, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>72.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xu-yao zhang, cheng-lin liu, ching y. suen, towards robust pattern recognition: a review, proceedings of the ieee, vol. 108(6), pp. 894-922, 2020. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>73.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>jun-yu ye, yan-ming zhang, qing yang, cheng-lin liu, contextual stroke classification in online handwritten documents with edge graph attention networks, sn computer science, vol. 1, pp. 163, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>74.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yunxue shao, cheng-lin liu, teaching machines to write like humans using l-attributed grammar, engineering applications of artificial intelligence, vol. 90, pp. 103489, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>75.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>wujiaheimaiti simayi, mayire ibrahim, xu-yao zhang, cheng-lin liu, askar hamdulla, a benchmark for unconstrained online handwritten uyghur word recognition, int. j. document analysis and recognition, vol.23, pp. 205-218, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>76.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>zhuo chen, fei yin, xu-yao zhang, qing yang, cheng-lin liu, multilingual handwritten text recognition via multi-task learning of recurrent neural networks, pattern recognition, vol. 108, pp. 107555, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>77.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>jin-wen wu, fei yin, yan-ming zhang, xu-yao zhang, cheng-lin liu, handwritten mathematical expression recognition via paired adversarial learning, int. j. computer vision, vol. 128, pp. 2386-2401, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>78.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>ran he, jie cao, lingxiao song, zhenan sun, tieniu tan,  adversarial cross-spectral face completion for nir-vis face recognition, ieee trans. on pattern analysis and machine intelligence, vol. 42, no. 5, pp. 1025-1037, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>79.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yan huang, qi wu, wei wang, liang wang,  image and sentence matching via semantic concepts and order learning, ieee trans. on pattern analysis and machine intelligence, vol. 42, no. 3, pp. 636-650, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>80.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>ya jing, junbo wang, wei wang, liang wang, tieniu tan,  relational graph neural network for situation recognition, pattern recognition, vol. 108, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>81.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>chenyang si, ya jing, wei wang, liang wang, tieniu tan,  skeleton-based action recognition with hierarchical spatial reasoning and temporal stack learning network, pattern recognition, vol. 107, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>82.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>kai niu, yan huang, liang wang,  re-ranking image-text matching by adaptive metric fusion, pattern recognition, vol. 104, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>83.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>weining wang, yan huang, liang wang,  long video question answering: a matching-guided attention model, pattern recognition, vol. 102, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>84.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>peipei li, yibo hu, xiang wu, ran he, zhenan sun,  deep label refinement for age estimation, pattern recognition, vol. 100, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>85.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xin zheng, huaibo huang, yanqing guo, bo wang, ran he,  blan: bi-directional ladder attentive network for facial attribute prediction, pattern recognition, vol. 100, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>86.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>zhihang li, yibo hu, ran he, zhenan sun,  learning disentangling and fusing networks for face completion under structured occlusions, pattern recognition, vol. 99, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>87.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>junbo wang, wei wang, liang wang, zhiyong wang, david dagan feng, tieniu tan,  learning visual relationship and context-aware attention for image captioning, pattern recognition, vol. 98, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>88.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>caiyong wang, yunlong wang, yunfan liu, zhaofeng he, ran he, zhenan sun,  sclerasegnet: an attention assisted u-net model for accurate sclera segmentation, ieee trans. on biometrics, behavior, and identity science (tbiom), vol. 2, no. 1, pp. 40-54, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>89.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>caiyong wang, jawad muhammad, yunlong wang, zhaofeng he, zhenan sun,  towards complete and accurate iris segmentation using deep multi-task attention network for non-cooperative iris recognition, ieee trans. on information forensics and security, vol. 15, pp. 2944-2959, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>90.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yunlian sun, jinhui tang, zhenan sun, massimo tistarelli,  facial age and expression synthesis using ordinal ranking adversarial networks, ieee trans. on information forensics and security, vol. 15, pp. 2960-2972, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>91.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yunlian sun, jinhui tang, xiangbo shu, zhenan sun, massimo tistarelli,  facial age synthesis with label distribution-guided generative adversarial network, ieee trans. on information forensics and security, vol. 15, pp. 2679-2691, 2020.<o:p></o:p></span></p> <p class=af0><b style='mso-bidi-font-weight:normal'><i><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></i></b></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800741"></a><a name="_toc24098146"></a><a name="_toc532562231"></a><a name="_toc532391200"><span style='mso-bookmark:_toc532562231'><span style='mso-bookmark:_toc24098146'><span style='mso-bookmark:_toc66800741'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>:ghvf[`n</span></b></span></span></span></a><a name="_toc24465895"></a><a name="_toc24361003"></a><a name="_toc532562401"></a><span style='mso-bookmark:_toc532562401'><span style='mso-bookmark:_toc24361003'><span style='mso-bookmark:_toc24465895'><span style='mso-bookmark:_toc66800741'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>machine learning</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>92.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>jinpeng li,shuang qiu,yuanyuan shen,chenglin liu,huiguang he, multisource transfer learning for cross-subject eeg emotion recognition,  ieee transactions on cybernetics, vol.50,no.7, pp.3281-3293, 2020. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>93.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>dan li,changde du,huiguang he, semi-supervised cross-modal image generation with generative adversarial networks ,  pattern recognition ,vol.100,pp.107085, 2020. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>94.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>wenjing l</span><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'> �</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>shuhao zhang</span><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>ge yang, dynamic organization of intracellular organelle networks, wiley interdisciplinary reviews-systems biology and medicine, e1505, 2020. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>95.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>liangyong yu</span><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>ran li, xiangrui zeng</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>hongyi wang</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>jie jin</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>ge yang</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>rui jiang</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>min xu, few shot domain adaptation for in situ macromolecule structural classification in cryo-electron tomograms, bioinformatics, btaa671, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>96.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>jing liu, linlin li, yang yang, bei hong, xi chen, qiwei xie, hua han</span><span style='font-size:12.0pt;line-height:110%;font-family: �[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>automatic reconstruction of mitochondria and endoplasmic reticulum in electron microscopy volumes by deep learning. frontiers in neuroscience, 2020 (3</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>:s^�</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>top)<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>97.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>jinqiao wang , linyu zheng, ming tang, jiayi feng: a comparison of correlation filter-based trackers and struck trackers. ieee trans. circuits syst. video technol. 30(9): 3106-3118 ,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>98.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>guoqiang zhong, tao li, wencong jiao, li-na wang, junyu dong, cheng-lin liu, dna computing inspired deep networks design, neurocomputing, vol. 382, pp. 140-147, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>99.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>zhengya sun, cheng-lin liu, jinghao niu, wensheng zhang, discriminative structure learning of sum-product networks for data stream classification neural networks, vol. 123, pp. 163-175, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>100.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yuan-yuan shen, yan-ming zhang, xu-yao zhang, cheng-lin liu, online semi-supervised learning with learning vector quantization, neurocomputing, vol. 399, pp. 467-278, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>101.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yanbo fan, baoyuan wu, ran he, bao-gang hu, yong zhang, siwei lyu,  groupwise ranking loss for multi-label learning, ieee access, vol. 8, pp. 21717-21727, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>102.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jufeng yang, xiaoping wu, jie liang, xiaoxiao sun, ming-ming cheng, paul l. rosin,  self-paced balance learning for clinical skin disease recognition, ieee trans. on neural networks and learning systems, vol. 31, no. 8, pp. 2832-2846, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>103.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhihang li, xu tang, xiang wu, jingtuo liu, ran he  progressively refined face detection through semantics-enriched representation learning, ieee trans. on information forensics and security, vol. 15, pp. 1394-1406, 2020.<o:p></o:p></span></p> <p class=af0><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></b></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800742"></a><a name="_toc24098147"></a><a name="_toc532562232"></a><a name="_toc532391201"><span style='mso-bookmark:_toc532562232'><span style='mso-bookmark:_toc24098147'><span style='mso-bookmark:_toc66800742'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>pencc�c</span></b></span></span></span></a><a name="_toc24465896"></a><a name="_toc24361004"></a><a name="_toc532562402"></a><span style='mso-bookmark:_toc532562402'><span style='mso-bookmark:_toc24361004'><span style='mso-bookmark:_toc24465896'><span style='mso-bookmark:_toc66800742'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>data mining</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>104.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>qiang cui, shu wu, qiang liu, wen zhong, liang wang,  mv-rnn: a multi-view recurrent neural network for sequential recommendation, ieee trans. on knowledge and data engineering, vol. 32, no. 2, pp. 317-331, 2020.<o:p></o:p></span></p> <p class=af0><b style='mso-bidi-font-weight:normal'><i><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></i></b></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800743"></a><a name="_toc24098148"></a><a name="_toc532562233"></a><a name="_toc532391202"><span style='mso-bookmark:_toc532562233'><span style='mso-bookmark:_toc24098148'><span style='mso-bookmark:_toc66800743'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>���{:g�vb_f[</span></b></span></span></span></a><a name="_toc24465897"></a><a name="_toc24361005"></a><a name="_toc532562403"></a><span style='mso-bookmark:_toc532562403'><span style='mso-bookmark:_toc24361005'><span style='mso-bookmark:_toc24465897'><span style='mso-bookmark:_toc66800743'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>computer graphics</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>105.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jianwei guo, haiyong jiang, bedrich benes, oliver deussen, xiaopeng zhang, dani lischinski, hui huang. inverse procedural modeling of branching structures by inferring l-systems. acm transactions on graphics (present at siggraph 2020), 39, 5, article 155, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>106.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yiqun wang, jing ren, dong-ming yan, jianwei guo, xiaopeng zhang, and peter wonka. mgcn: descriptor learning using multiscale gcns. acm trans. graph (proc. siggraph). 39, (4), 2020, 15 pages.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>107.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>.jianwei guo, shibiao xu, dong-ming yan, zhanglin cheng, marc jaeger, xiaopeng zhang. realistic procedural plant modeling from multiple view images. ieee transactions on visualization and computer graphics, vol. 26, no. 2, pp. 1372-1384, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>108.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhengda, lu and guo, jianwei and xiao, jun and wang, ying and zhang, xiaopeng and yan, dong-ming. extracting cycle-aware feature curve networks from 3d models. computer-aided design, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>109.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jianwei guo, hanyu wang, zhanglin cheng, xiaopeng zhang, dong-ming yan. learning local shape descriptors for computing non-rigid dense correspondence [j]. computational visual media, 6, 95-112, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>110.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>lei you, jianwei guo, yong pang, xinyu song, xiaopeng zhang. 3d stem model construction with geometry consistency using terrestrial laser scanning data. international journal of remote sensing, 42:2, 714-737, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>111.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>ruisong zhang, weize quan, baoyuan wu, zhifeng li, dong-ming yan. pixel wise dense detector for image inpainting, computer graphics forum (pacific graphics), 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>112.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>hao-xiang guo, xiaohan liu, dong-ming yan, yang liu. cut-enhanced polycube-maps for feature-aware all-hex meshing, acm transactions on graphics (proc. of siggraph), 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>113.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>haiyong jiang, dong-ming yan, xiaopeng zhang, peter wonka. selection expressions for procedural modeling, ieee transactions on visualization and computer graphics, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>114.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>pengfei wang, shiqing xin, changhe tu, dong-ming yan, yuanfeng zhou, caiming zhang. robustly computing restricted voronoi diagrams (rvd) on thin-plate models, computer-aided geometric design, 2020<o:p></o:p></span></p> <p class=af0><span lang=en-us style='font-size:12.0pt;line-height:110%; mso-bidi-font-weight:bold'><o:p>&nbsp;</o:p></span></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800744"></a><a name="_toc24098149"></a><a name="_toc532562234"></a><a name="_toc532391203"><span style='mso-bookmark:_toc532562234'><span style='mso-bookmark:_toc24098149'><span style='mso-bookmark:_toc66800744'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>y�zso���{</span></b></span></span></span></a><a name="_toc24465898"></a><a name="_toc24361006"></a><a name="_toc532562404"></a><span style='mso-bookmark:_toc532562404'><span style='mso-bookmark:_toc24361006'><span style='mso-bookmark:_toc24465898'><span style='mso-bookmark:_toc66800744'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>multimedia computing</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>115.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>fan tang, weiming dong, yiping meng, chongyang ma, fuzhang wu, xinrui li, tong-yee lee: image retargetability. ieee transactions on multimedia 22(3): 641-654 (2020)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>116.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>shaobo min, hantao yao, hongtao xie, zheng-jun zha, yongdong zhang: multi-objective matrix normalization for fine-grained visual recognition. ieee trans. image process. 29: 4996-5009 (2020)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>117.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>fan qi, xiaoshan yang, tianzhu zhang, changsheng xu: discriminative multimodal embedding for event classification. neurocomputing vol. 395, pp. 160-169, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>118.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yi huang, xiaoshan yang, junyu gao, jitao sang, changsheng xu, knowledge-driven egocentric multimodal activity recognition, acm transactions on multimedia computing, communications, and applications (tomm), vol. 16, no. 4, pp. 133:1-133:21, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>119.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>fudong nian, teng li, bing-kun bao, changsheng xu, relative coordinates constraint for face alignment . neurocomputing 395: 119-127 ,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>120.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jinguang wang, jun hu, shengsheng qian, quan fang, changsheng xu, multimodal graph convolutional networks for high quality content recognition . neurocomputing 412: 42-51,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>121.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yingying zhang, quan fang, shengsheng qian, changsheng xu, knowledge-aware attentive wasserstein adversarial dialogue response generation . acm trans. intell. syst. technol. 11(4): 37:1-37:20,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>122.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>feng xue, richang hong, xiangnan he, jianwei wang, shengsheng qian, changsheng xu, knowledge-based topic model for multi-modal social event analysis . ieee trans. multim. 22(8): 2098-2110 ,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>123.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>junyu gao, changsheng xu, ci-gnn: building a category-instance graph for zero-shot video classification . ieee trans. multim. 22(12): 3088-3100 ,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>124.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>xinhong ma, tianzhu zhang, changsheng xu, multi-level correlation adversarial hashing for cross-modal retrieval . ieee trans. multim. 22(12): 3101-3114 ,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>125.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>xiaowen huang, shengsheng qian, quan fang, jitao sang, changsheng xu, meta-path augmented sequential recommendation with contextual co-attention network . acm trans. multim. comput. commun. appl. 16(2): 52:1-52:24 ,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>126.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>longteng guo, jing liu, shichen lu, hanqing lu: show, tell, and polish: ruminant decoding for image captioning. ieee trans. multim. 22(8): 2149-2162 ,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>127.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>lingxiang wu, min xu, jinqiao wang, stuart w. perry: recall what you see continually using gridlstm in image captioning. ieee trans. multimedia 22(3): 808-818 ,2020<o:p></o:p></span></p> <p class=af0><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800745"></a><a name="_toc24098150"></a><a name="_toc532562236"></a><a name="_toc532391205"><span style='mso-bookmark:_toc532562236'><span style='mso-bookmark:_toc24098150'><span style='mso-bookmark:_toc66800745'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>;sf[�v�pr�g</span></b></span></span></span></a><a name="_toc24465899"></a><a name="_toc24361007"></a><a name="_toc532562406"></a><span style='mso-bookmark:_toc532562406'><span style='mso-bookmark:_toc24361007'><span style='mso-bookmark:_toc24465899'><span style='mso-bookmark:_toc66800745'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>medical image analysis</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>128.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>shengpei wang,hongwei wen,xiaopeng hu,peng xie,shuang qiu,yinfeng qian,jiang qiu,huiguang he, transition and dynamic reconfiguration of whole-brain network in major depressive disorder,  molecular neurobiology,vol.57,pp.4031-4044, 2020. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>129.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>longwei fang,zuowei wang,zhiqiang chen,fengzeng jian,shuo li,huiguang he, 3d shape reconstruction of lumbar vertebra from two x-ray images and a ct model,  ieee/caa journal of automatica sinica,vol.7,no.4,pp.1124-1133,2020. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>130.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>shuang qiu, weibo yi, shengpei wang, chuncheng zhang, jing zhang, huiguang he, the lasting effects of low-frequency repetitive transcranial magnetic stimulation on resting state eeg in healthy subjects,  ieee transactions on neural systems and rehabilitation engineering,vol.28,no.4,pp.832-841,2020. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>131.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>shengpei wang,yun li,shuang qiu,chuncheng zhang,guyan wang,junfang xian,tianzuo li,huiguang he, reorganization of rich-clubs in functional brain networks during propofol-induced unconsciousness and natural sleep,  neuroimage: clinical,vol.25, pp.102188,2020. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>132.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>wei wei, shuang qiu, xuelin ma, dan li, bo wang, and huiguang he*,  reducing calibration efforts in rsvp tasks with multi-source adversarial domain adaptation , ieee transactions on neural systems and rehabilitation engineering (tnsre), vol.28, no.11, pp:2344-2355, 2020<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>133.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>danqian liu, weifu li, chenyan ma, weitong zheng, yuanyuan yao, chak foon tso, peng zhong, xi chen, jun ho song, woochul choi, se-bum paik, hua han, yang dan, a common hub for sleep and motor control in the substantia nigra. science, vol. 367(6476), pp. 440-445, 2020 (1</span><span style='font-size:12.0pt;line-height:110%;font-family: �[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>:s</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>top)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>134.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>li a, zalesky a, yue w, howes o, yan h, liu y, fan l, whitaker kj, xu k, rao g, li j, liu s, wang m, sun y, song m, li p, chen j, chen y, wang h, liu w, li z, yang y, guo h, wan p, lv l, lu l, yan j, song y, wang h, zhang h, wu h, ning y, du y, cheng y, xu j, xu x, zhang d, wang x, jiang t, liu b. 2020. a neuroimaging biomarker for striatal dysfunction in schizophrenia. nat med 26:558-565.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>135.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>chen j, shu h, wang z, zhan y, liu d, liu y, zhang z. 2020. intrinsic connectivity identifies the sensory-motor network as a main cross-network between remitted late-life depression- and amnestic mild cognitive impairment-targeted networks. brain imaging behav 14:1130-1142.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>136.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>cui y, dong j, yang y, yu h, li w, liu y, si j, xie s, sui j, lv l, jiang t. 2020. white matter microstructural differences across major depressive disorder, bipolar disorder and schizophrenia: a tract-based spatial statistics study. j affect disord 260:281-286.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>137.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>deng x, wei x, zhang y, wang b, zhang d, yu s, jiang t, zhao j. 2020. impact of avm location on language cortex right-hemisphere reorganization: a voxel-based lesion-symptom mapping study. clin neurol neurosurg 189:105628.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>138.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>dou x, yao h, feng f, wang p, zhou b, jin d, yang z, li j, zhao c, wang l, an n, liu b, zhang x, liu y. 2020. characterizing white matter connectivity in alzheimer's disease and mild cognitive impairment: an automated fiber quantification analysis with two independent datasets. cortex 129:390-405.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>139.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>duan y, zhang j, zhuo z, ding j, ju r, wang j, ma t, haller s, liu y, liu y. 2020. accelerating brain 3d t1-weighted turbo field echo mri using compressed sensing-sensitivity encoding (cs-sense). eur j radiol 131:109255.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>140.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>grandjean j, canella c, anckaerts c, ayranci g, bougacha s, bienert t, buehlmann d, coletta l, gallino d, gass n, garin cm, nadkarni na, hubner ns, karatas m, komaki y, kreitz s, mandino f, mechling ae, sato c, sauer k, shah d, strobelt s, takata n, wank i, wu t, yahata n, yeow ly, yee y, aoki i, chakravarty mm, chang wt, dhenain m, von elverfeldt d, harsan la, hess a, jiang t, keliris ga, lerch jp, meyer-lindenberg a, okano h, rudin m, sartorius a, van der linden a, verhoye m, weber-fahr w, wenderoth n, zerbi v, gozzi a. 2020. common functional networks in the mouse brain revealed by multi-centre resting-state fmri analysis. neuroimage 205:116278.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>141.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>guo x, yao d, cao q, liu l, zhao q, li h, huang f, wang y, qian q, wang y, calhoun vd, johnstone sj, sui j, sun l. 2020. shared and distinct resting functional connectivity in children and adults with attention-deficit/hyperactivity disorder. transl psychiatry 10:65.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>142.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>he b, cao l, xia x, zhang b, zhang d, you b, fan l, jiang t. 2020. fine-grained topography and modularity of the macaque frontal pole cortex revealed by anatomical connectivity profiles. neurosci bull 36:1454-1473.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>143.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>he b, yang z, fan l, gao b, li h, ye c, you b, jiang t. 2020. monkeycbp: a toolbox for connectivity-based parcellation of monkey brain. front neuroinform 14:14.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>144.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>he y, wu s, chen c, fan l, li k, wang g, wang h, zhou y. 2020. organized resting-state functional dysconnectivity of the prefrontal cortex in patients with schizophrenia. neuroscience 446:14-27.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>145.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jiang r, calhoun vd, cui y, qi s, zhuo c, li j, jung r, yang j, du y, jiang t, sui j. 2020. multimodal data revealed different neurobiological correlates of intelligence between males and females. brain imaging behav 14:1979-1993.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>146.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jiang r, calhoun vd, fan l, zuo n, jung r, qi s, lin d, li j, zhuo c, song m, fu z, jiang t, sui j. 2020. gender differences in connectome-based predictions of individualized intelligence quotient and sub-domain scores. cereb cortex 30:888-900.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>147.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jiang r, zuo n, ford jm, qi s, zhi d, zhuo c, xu y, fu z, bustillo j, turner ja, calhoun vd, sui j. 2020. task-induced brain connectivity promotes the detection of individual differences in brain-behavior relationships. neuroimage 207:116370.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>148.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jin d, wang p, zalesky a, liu b, song c, wang d, xu k, yang h, zhang z, yao h, zhou b, han t, zuo n, han y, lu j, wang q, yu c, zhang x, zhang x, jiang t, zhou y, liu y. 2020. grab-ad: generalizability and reproducibility of altered brain activity and diagnostic classification in alzheimer's disease. hum brain mapp 41:3379-3391.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>149.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jin d, zhou b, han y, ren j, han t, liu b, lu j, song c, wang p, wang d, xu j, yang z, yao h, yu c, zhao k, wintermark m, zuo n, zhang x, zhou y, zhang x, jiang t, wang q, liu y. 2020. generalizable, reproducible, and neuroscientifically interpretable imaging biomarkers for alzheimer's disease. adv sci (weinh) 7:2000675.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>150.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>kim ws, shen g, liu c, kang ni, lee kh, sui j, chung yc. 2020. altered amygdala-based functional connectivity in individuals with attenuated psychosis syndrome and first-episode schizophrenia. sci rep 10:17711.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>151.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>li l, song m, zhang c, qian z, li y, li r, li c, yang z, zhou d. 2020. hemangiopericytomas: spatial intracranial location in a voxel-based mapping study. j neuroimaging 30:370-377.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>152.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>li m, li y, jin j, yang z, zhang b, liu y, song m, freakly c, weber e, liu f, jiang t, crozier s. 2020. a dedicated eight-channel receive rf coil array for monkey brain mri at 9.4 t. nmr biomed 33:e4369.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>153.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>liu cy, yan s, hou b, li ml, gao s, li a, liu b, xu wh. 2020. mismatch of cognition and neural networks in asymptomatic middle cerebral artery steno-occlusive disease. eur j neurol 27:1062-1065.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>154.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>liu s, li a, liu y, li j, wang m, sun y, qin w, yu c, jiang t, liu b. 2020. mir137 polygenic risk is associated with schizophrenia and affects functional connectivity of the dorsolateral prefrontal cortex. psychol med 50:1510-1518.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>155.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>liu s, li a, liu y, yan h, wang m, sun y, fan l, song m, xu k, chen j, chen y, wang h, guo h, wan p, lv l, yang y, li p, lu l, yan j, wang h, zhang h, wu h, ning y, zhang d, jiang t, liu b. 2020. polygenic effects of schizophrenia on hippocampal grey matter volume and hippocampus-medial prefrontal cortex functional connectivity. br j psychiatry 216:267-274.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>156.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>luo n, sui j, abrol a, chen j, turner ja, damaraju e, fu z, fan l, lin d, zhuo c, xu y, glahn dc, rodrigue al, banich mt, pearlson gd, calhoun vd. 2020. structural brain architectures match intrinsic functional networks and vary across domains: a study from 15 000 individuals. cereb cortex 30:5460-5470.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>157.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>luo n, sui j, abrol a, lin d, chen j, vergara vm, fu z, du y, damaraju e, xu y, turner ja, calhoun vd. 2020. age-related structural and functional variations in 5,967 individuals across the adult lifespan. hum brain mapp 41:1725-1737.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>158.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>niu w, jiang y, zhang x, jiang t, zhang y, yu s. 2020. changes of effective connectivity in the alpha band characterize differential processing of audiovisual information in cross-modal selective attention. neurosci bull 36:1009-1022.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>159.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>qi s, abbott cc, narr kl, jiang r, upston j, mcclintock sm, espinoza r, jones t, zhi d, sun h, yang x, sui j, calhoun vd. 2020. electroconvulsive therapy treatment responsive multimodal brain networks. hum brain mapp 41:1775-1785.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>160.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>qi s, bustillo j, turner ja, jiang r, zhi d, fu z, deramus tp, vergara v, ma x, yang x, stevens m, zhuo c, xu y, calhoun vd, sui j. 2020. the relevance of transdiagnostic shared networks to the severity of symptoms and cognitive deficits in schizophrenia: a multimodal brain imaging fusion study. transl psychiatry 10:149.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>161.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>qi s, morris r, turner ja, fu z, jiang r, deramus tp, zhi d, calhoun vd, sui j. 2020. common and unique multimodal covarying patterns in autism spectrum disorder subtypes. mol autism 11:90.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>162.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>qin q, tang y, dou x, qu y, xing y, yang j, chu t, liu y, jia j. 2020. default mode network integrity changes contribute to cognitive deficits in subcortical vascular cognitive impairment, no dementia. brain imaging behav.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>163.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>rao s, luo n, sui j, xu q, zhang f. 2020. effect of the sirt1 gene on regional cortical grey matter density in the han chinese population. br j psychiatry 216:254-258.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>164.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>song m. 2020. imaging three-dimensional microvascular networks of brain with synchrotron radiation microangiography. neurosci bull 36:331-332.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>165.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>song m, yang y, yang z, cui y, yu s, he j, jiang t. 2020. prognostic models for prolonged disorders of consciousness: an integrative review. cell mol life sci 77:3945-3961.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>166.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>sui j, jiang r, bustillo j, calhoun v. 2020. neuroimaging-based individualized prediction of cognition and behavior for mental disorders and health: methods and promises. biol psychiatry 88:818-828.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>167.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>sui j, li x, bell rp, towe sl, gadde s, chen nk, meade cs. 2020. structural and functional brain abnormalities in hiv disease revealed by multimodal mri fusion: association with cognitive function. clin infect dis.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>168.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>sui j, liu m, lee jh, zhang j, calhoun v. 2020. deep learning methods and applications in neuroimaging. j neurosci methods 339:108718.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>169.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>sun h, jiang r, qi s, narr kl, wade bs, upston j, espinoza r, jones t, calhoun vd, abbott cc, sui j. 2020. preliminary prediction of individual response to electroconvulsive therapy using whole-brain functional magnetic resonance imaging data. neuroimage clin 26:102080.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>170.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>tao q, yang y, yu h, fan l, luan s, zhang l, zhao h, lv l, jiang t, song x. 2020. anatomical connectivity-based strategy for targeting transcranial magnetic stimulation as antidepressant therapy. front psychiatry 11:236.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>171.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>wang l, li j, zhang s, zhang x, zhang q, chan mf, yang r, sui j. 2020. multi-task autoencoder based classification-regression model for patient-specific vmat qa. phys med biol 65:235023.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>172.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>wang p, zhou b, yao h, xie s, feng f, zhang z, guo y, an n, zhou y, zhang x, liu y. 2020. aberrant hippocampal functional connectivity is associated with fornix white matter integrity in alzheimer's disease and mild cognitive impairment. j alzheimers dis 75:1153-1168.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>173.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>wu d, chen j, hussain m, wu l, shi j, wu c, ma y, zhang m, yang q, fu y, duan y, ma c, yan f, zhu z, he x, yao t, song m, zhi x, wang c, cai l, li c, li s, zhang y, ding y, ji x. 2020. selective intra-arterial brain cooling improves long-term outcomes in a non-human primate model of embolic stroke: efficacy depending on reperfusion status. j cereb blood flow metab 40:1415-1426.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>174.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>wu d, fan l, song m, wang h, chu c, yu s, jiang t. 2020. hierarchy of connectivity-function relationship of the human cortex revealed through predicting activity across functional domains. cereb cortex 30:4607-4616.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>175.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>wu d, jiang t. 2020. schizophrenia-related abnormalities in the triple network: a meta-analysis of working memory studies. brain imaging behav 14:971-980.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>176.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>wu d, li x, jiang t. 2020. reconstruction of behavior-relevant individual brain activity: an individualized fmri study. sci china life sci 63:410-418.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>177.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>xing y, zhu z, du y, zhang j, qu q, sun l, li y, guo y, peng g, liu y, yu y, qiao y, xie b, shi x, lu j, jia j, tang y. 2020. the efficacy of cognitive training in patients with amnestic mild cognitive impairment (cog-reagent): protocol for a multi-center randomized controlled trial. j alzheimers dis 75:779-787.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>178.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>xu q, liu f, qin w, jiang t, yu c. 2020. multiscale neurobiological correlates of human neuroticism. hum brain mapp 41:4730-4743.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>179.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yang y, zhang l, guo d, zhang l, yu h, liu q, su x, shao m, song m, zhang y, ding m, lu y, liu b, li w, yue w, fan x, yang g, lv l. 2020. association of dtnbp1 with schizophrenia: findings from two independent samples of han chinese population. front psychiatry 11:446.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>180.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhao j, huang j, zhi d, yan w, ma x, yang x, li x, ke q, jiang t, calhoun vd, sui j. 2020. functional network connectivity (fnc)-based generative adversarial network (gan) and its applications in classification of mental disorders. j neurosci methods 341:108756.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>181.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhi d, wu w, xiao b, qi s, jiang r, yang x, yang j, xiao w, liu c, long h, calhoun vd, long l, sui j. 2020. nr4a1 methylation associated multimodal neuroimaging patterns impaired in temporal lobe epilepsy. front neurosci 14:727.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>182.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zuo n, hu t, liu h, sui j, liu y, jiang t. 2020. gray matter-based age prediction characterizes different regional patterns. neurosci bull.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>183.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zuo n, salami a, liu h, yang z, jiang t. 2020. functional maintenance in the multiple demand network characterizes superior fluid intelligence in aging. neurobiol aging 85:145-153.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt'><span lang=en-us style='font-size: 12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800746"><b><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>uir�v�pr�g</span></b></a><span style='mso-bookmark:_toc66800746'><b><span lang=en-us style='font-size:12.0pt; line-height:110%'>biology image analysis</span></b></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>184.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>chi xiao, xi chen, qiwei xie , guoqing li, hao xiao, jingdong song, hua han, virus identification in electron microscopy images by residual mixed attention network, computer methods and programs in biomedicine, vol. 198</span><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>2020 (2</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>:s^�</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>top)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>185.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>qiwei xie, yanfei liu, hui huang, bei hong, jinxin wang, hua han, yue liu, an innovative method for screening and evaluating the degree of diabetic retinopathy and possible drug treatment options based on artifical algorithms, pharmacological research, vol. 159</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>2020 (2</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>:s^�</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>top)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt'><span lang=en-us style='font-size: 12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800747"></a><a name="_toc40189717"><span style='mso-bookmark:_toc66800747'><b><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>pg�e�yf[</span></b></span></a><span style='mso-bookmark:_toc40189717'><span style='mso-bookmark:_toc66800747'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>materials science</span></b></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>186.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jie zhao, lijun shen, fang liu, pan zhao, qi huang, hua han, lianmao peng, xuelei liang, quality metrology of carbon nanotube thin films and its application for carbon nanotube-based electronics, nano research</span><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'> �</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>vol. 13, pp. 1749-1755, 2020 (1</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>:s</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>top)<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>187.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zi wang, lina zhang, weifu li, zijun qin, zexin wang, zihang li, liming tan, lilong zhu, feng liu, hua han, liang jiang, a high-throughput approach to explore the multi-component alloy space: a case study of nickel-based superalloys, journal of alloys and compounds, vol. 828. pp. 158100, 2020 (1</span><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>:s</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>top)<o:p></o:p></span></p> <p class=af0><b style='mso-bidi-font-weight:normal'><i style='mso-bidi-font-style: normal'><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></i></b></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800748"></a><a name="_toc24098151"></a><a name="_toc532562237"></a><a name="_toc532391206"><span style='mso-bookmark:_toc532562237'><span style='mso-bookmark:_toc24098151'><span style='mso-bookmark:_toc66800748'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�����b/g</span></b></span></span></span></a><a name="_toc24465900"></a><a name="_toc24361008"></a><a name="_toc532562407"></a><span style='mso-bookmark:_toc532562407'><span style='mso-bookmark:_toc24361008'><span style='mso-bookmark:_toc24465900'><span style='mso-bookmark:_toc66800748'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>speech and language technology</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>188.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jiajun zhang, long zhou, yang zhao and chengqing zong. synchronous bidirectional inference for neural sequence generation. artificial intelligence, 281(2020)103234, pp.1-19<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>189.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>shaonan wang, jiajun zhang, haiyan wang, nan lin and chengqing zong. fine-grained neural decoding with distributed word representations. information sciences, 507 (2020) 256 272<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>190.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>kexin wang, yu zhou, jiajun zhang, shaonan wang, and chengqing zong. structurally-comparative hinge loss for dependency-based neural text representation. acm transactions on asian and low-resource language information processing (tallip), vol. 19, no. 4, article 58, may 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>191.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>qianlong du, chengqing zong and keh-yih su. conducting natural language inference with word-pair-dependency and local context. acm transactions on asian and low-resource language information processing (tallip), volume 19, issue 3 april 2020, article no.: 47, pp 1-23.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>192.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>cunhang fan, jianhua tao, bin liu, jiangyan yi, zhengqi wen and xuefei liu,  end-to-end post-filter for speech separation with deep attention fusion features , ieee/acm transactions on audio, speech and language processing, vol.28, pp. 1303-1314, 2020-03.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>193.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>bocheng zhao, jianhua tao, minghao yang, zhengkun tian, cunhang fan, ye bai,  deep imitator: handwriting calligraphy imitation via deep attention networks , pattern recognition, vol. 104, august 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>194.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yongwei li, ken-ichi sakakibara, masato akagi,  simultaneous estimation of glottal source waveforms and vocal tract shapes from speech signals based on arx-lf model, journal of signal processing systems, vol. 92, no. 8, pp.831-838, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>195.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>bai ye ,yi<span style='mso-spacerun:yes'>� </span>jiangyan,tao<span style='mso-spacerun:yes'>� </span>jianhua, wen<span style='mso-spacerun:yes'>� </span>zhengqi,fan cunhang,  a public chinese dataset for language model adaptation , journal of signal processing systems for signal image and video technology, vol. 92, no. 8, pp.839-851, </span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>yr r</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>: si, 2020-08.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>196.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>lian zheng; li ya; tao jianhua; huang jian; niu mingyue,  expression analysis based on face regions in real-world conditions , international journal of automation and computing, vol. 17, no. 1, pp. 96-107, 2020-02.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo4'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>197.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>ziping zhao , zhongtian bao , zixing zhang , nicholas cummins , haishuai wang , jianhua tao , bj�rn schuller,  automatic assessment of depression from speech via a hierarchical attention transfer network and attention autoencoder , ieee journal of selected topics in signal processing, vol. 14, no. 2, pp. 423-434, 2020-02.<o:p></o:p></span></p> <p class=af0><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></p> <p class=af0 style='mso-outline-level:2'><a name="_toc66800749"></a><a name="_toc24098152"></a><a name="_toc532562238"></a><a name="_toc532391207"><span style='mso-bookmark:_toc532562238'><span style='mso-bookmark:_toc24098152'><span style='mso-bookmark:_toc66800749'><b style='mso-bidi-font-weight:normal'><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>�v�q rir</span></b></span></span></span></a><a name="_toc24465901"></a><a name="_toc24361009"></a><a name="_toc532562408"></a><span style='mso-bookmark:_toc532562408'><span style='mso-bookmark:_toc24361009'><span style='mso-bookmark:_toc24465901'><span style='mso-bookmark:_toc66800749'><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt; line-height:110%'>national journals</span></b></span></span></span></span><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt; line-height:110%'><o:p></o:p></span></b></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l2 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�[b�^ ��n{|���b/gu\g �-n�v�n�]zf��f[o��� �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>2020</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>t^,{</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>10</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>ws,{</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>1</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>g �,{</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>1-5</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>u�</span><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l2 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>\g)p�� ��[b�^ �ςkq�k �)r(u n n�e�v<span lang=en-us style='font-size:12.0pt;line-height:110%'>2020, 2(1): 26-35<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l2 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�nsf�r ��[b�^ ���t�{�z�~�gmonx�v^y�~:ghv�ы �zf���yf[n�b/gf[�b �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>2020, 2(2): 144-152<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l2 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>4.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>0u�y � _�[�o</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>. </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�w�n�����~!j�w�v:ghv�ыы�e�hkm�e�l</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>[j]. </span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�`�b�] z</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, 2020, 6(5):015-026<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l2 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>5.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�sux �]ywm_l �ng�ts^ �4t�k�~</span><span lang=en-us style='font-size: 12.0pt;line-height:110%'>. </span><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�w�nqqb�w�vݍ�y oahvn�v:g�v�v�[mo�yh�[</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>. </span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>ꁨrsf[�b �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>vol. 46, no. 6, pp. 1154-1165, 2020.<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l2 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>6.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�nwm�m �hg\ul ��_8^܀ ��w�ny!j`��eq�v�[�b_ɖ��ub�e�l</span><span lang=en-us style='font-size:12.0pt;line-height:110%'> [j]</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'> ����{:gxvzn�su\ �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>57(07):1522-1530, 2020.<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l2 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>7.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'> _� ��s~� �bs?e�� �hg`�[ �_[�~�n ���^o � _sfo�</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>. </span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�w�n</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>cnn</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�voo�[s^b��vcq }ƌ rn^@\�inr�g</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>. </span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>-n�vsoɖf[n�v�pr�g</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>. 2020, 25(2): 174-182.<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l2 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>8.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>m. yuan, l. dai, d.-m. yan, l. zhang, x.-p. zhang. fast and error-bounded space-variant bilateral filtering. journal of computer science and technology.2020, 34(3): 550-568<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l2 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>9.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>kang liu<span style='mso-tab-count:1'>����� </span>, a survey on neural relation extraction,china technol.<span style='mso-tab-count: 1'>��� </span>1971 1989,2020.10<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l2 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>10.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yunze gao, yingying chen, jinqiao wang, hanqing lu: progressive rectification network for irregular text recognition. sci. china inf. sci. 63(2): 120101 (2020)<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l2 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>11.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>v��^ns ��pwzs �f_l�q ��sb�� �jl�m ���*o �nt�*o�v�su\ncb</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, </span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�oo`�[hqf[�b</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, </span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>ws</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>:5 </span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>g</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>:2 </span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>u�</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>:28-38</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>2020-04.<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l2 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>12.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�so �c�vf �uop[�e �y[�tws �</span><span lang=en-us style='font-size: 12.0pt;line-height:110%'> </span><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>ɖɉ�[�b7h,gub�b/g�i��</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�oo`�[hqf[�b</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, vol. 5, no. 2, pp. 39-48, 2020.<o:p></o:p></span></p> <p class=af0><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></b></p> <p class=af0 style='mso-outline-level:2'><a name="_toc66800750"></a><a name="_toc24098153"></a><a name="_toc532562239"></a><a name="_toc532391208"><span style='mso-bookmark:_toc532562239'><span style='mso-bookmark:_toc24098153'><span style='mso-bookmark:_toc66800750'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�ve�o��</span></b></span></span></span></a><a name="_toc24465902"></a><a name="_toc24361010"></a><a name="_toc532562409"></a><span style='mso-bookmark:_toc532562409'><span style='mso-bookmark:_toc24361010'><span style='mso-bookmark:_toc24465902'><span style='mso-bookmark:_toc66800750'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>international conferences</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800751"></a><a name="_toc24098154"></a><a name="_toc532562240"></a><a name="_toc532391209"><span style='mso-bookmark:_toc532562240'><span style='mso-bookmark:_toc24098154'><span style='mso-bookmark:_toc66800751'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>���{:gɖɉ</span></b></span></span></span></a><a name="_toc24465903"></a><a name="_toc24361011"></a><a name="_toc532562410"></a><span style='mso-bookmark:_toc532562410'><span style='mso-bookmark:_toc24361011'><span style='mso-bookmark:_toc24465903'><span style='mso-bookmark:_toc66800751'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>computer vision</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>sheng han, wei gao*, yiming wan, yihong wu. scene-unified image translation for visual localization. abu dhabi national exhibition center (adnec), abu dhabi, united arab emirates (uae), 10.25-10.28, 2266-2270, icip 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yiming wan, wei gao*, sheng han, yihong wu. boosting image-based localization via randomly geometric data augmentation. abu dhabi national exhibition center (adnec), abu dhabi, united arab emirates (uae), 10.25-10.28, 688-692, icip 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yiming wan, wei gao*, sheng han, yihong wu. dynamic object-aware monocular visual odometry with local and global information aggregation. abu dhabi national exhibition center (adnec), abu dhabi, united arab emirates (uae), 10.25-10.28, 603-607, icip 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>4.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>b. liu, q. dong*, and z. hu. zero-shot learning from adversarial feature residual to compact visual feature. usa 11547-11554 , aaai2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>5.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>mengqi rong, shuhan shen, zhanyi hu: 3d semantic labeling of photogrammetry meshes based on active learning. international conference on pattern recognition (icpr): milan, italy, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>6.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xiangwei dang, xingdong liang, yanlei li, zheng rong. moving objects elimination towards enhanced dynamic slam fusing lidar and mmw-radar. 2020 ieee mtt-s international conference on microwaves for intelligent mobility (icmim), 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>7.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xingjia pan, yuqiang ren, kekai sheng, weiming dong, haolei yuan, xiaowei guo, chongyang ma, changsheng xu: dynamic refinement network for oriented and densely packed object detection. ieee/cvf conference on computer vision and pattern recognition (cvpr) 2020: 11204-11213<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>8.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yiming li, xiaoshan yang, changsheng xu: structured neural motifs: scene graph parsing via enhanced context, multimedia modeling (mmm), pp. 175-188, daejeon, south korea, january 5-8, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>9.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xuecheng ning, xiaoshan yang, changsheng xu: multi-hop interactive cross-modal retrieval, multimedia modeling (mmm), pp. 681-693, daejeon, south korea, january 5-8, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>10.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>lin chen, yong zhao, shibiao xu, shuhui bu, pengcheng han, gang wan. densefusion: large-scale online dense pointcloud and dsm mapping for uavs, ieee/rsj international conference on intelligent robots and systems (iros), 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>11.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yuyang zhang, jinge wang, shibiao xu1, xiao liu and xiaopeng zhang. mlifeat: multi-level information fusion based deep local features, asian conference on computer vision, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>12.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jianbo liu, yongcheng liu, ying wang, v�ronique prinet, shiming xiang and chunhong pan. decoupled representation learning for skeleton-based gesture recognition, in proceedings of the ieee/cvf conference on computer vision and pattern recognition, pp. 5751-5760, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>13.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>xin zhang, chunlei huo, and chunhong pan, view-angle invariant object monitoring without image registration, in proceedings of the ieee international conference on acoustics, speech and signal processing, pp. 2283-2287,2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>14.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jin gao, weiming hu, and yan lu,  recursive least-squares estimator-aided online learning for visual tracking, ieee conference on computer vision and pattern recognition (cvpr), pp. 7386-7395, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>15.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>ziqi zhang, yaya shi, chunfeng yuan, bing li, peijinwang, weiming hu, and zhengjun zha,  object relational graph with teacher-recommended learning for video captioning, ieee conference on computer vision and pattern recognition (cvpr), pp. 13278-13288, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>16.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhipeng zhang, houwen peng, jianlong fu, bing li, and weiming hu,  ocean: object-aware anchor-free tracking, european conference on computer vision (eccv), vol. 21, pp. 771-787, 2020. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>17.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yufan liu, minglang qiao, mai xu, bing li, and weiming hu, and ali borji,  learning to predict salient faces: a novel visual-audio saliency model, european conference on computer vision (eccv), vol. 20, pp. 413-429, 2020. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>18.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>shaoru wang, yongchao gong, junling xing, lichao huang, chang huang, and weiming hu,  rdsnet: a new deep architecture for reciprocal object detection and instance segmentation, aaai conference on artificial intelligence (aaai), pp. 12208-12215, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>19.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhenbang li, qiang wang, jin gao, bing li, and weiming hu,  end-to-end temporal feature aggregation for siamese trackers, ieee international conference on image processing, pp. 2056-2060, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>20.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhenbang li, qiang wang, jin gao, bing li, and weiming hu,  globally spatial-temporal perception: a long-term tracking system, ieee international conference on image processing, pp. 2066-2070, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>21.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zongwei zhou, yangxi li, jin gao, junliang xing, liang li, and weiming hu,  anchor-free one-stage online multi-object tracking, chinese conference on pattern recognition and computer vision (prcv), vol. 2, pp. 55-68,2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>22.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>longteng guo, jing liu, xinxin zhu, peng yao, shichen lu, hanqing lu: normalized and geometry-aware self-attention network for image captioning. cvpr 2020: 10324-10333<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>23.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>ke cheng, yifan zhang, xiangyu he, weihan chen, jian cheng, hanqing lu:skeleton-based action recognition with shift graph convolutional network. cvpr 2020: 180-189<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>24.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>xiaomei zhang, yingying chen, bingke zhu, jinqiao wang and ming tang: part-aware context network for human parsin cvpr2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>25.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>shuai zheng, zhenfeng zhu, xingxing zhang, zhizhe liu, jian cheng, yao zhao. distribution-induced bidirectional generative adversarial network for graph representation learning. cvpr 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>26.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>xiangyu he, zitao mo, ke cheng, weixiang xu, qinghao hu, peisong wang, qingshan liu, jian cheng. proxybnn: learning binarized neural networks via proxy matrices. eccv 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>27.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>ke cheng, yifan zhang, congqi cao, lei shi, jian cheng, hanqing lu:decoupling gcn with dropgraph module for skeleton-based action recognition. eccv (24) 2020: 536-553<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>28.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>xin wen, biying li, haiyun guo, zhiwei liu, guosheng hu, ming tang, jinqiao wang: adaptive variance based label distribution learning for facial age estimation. eccv (23) 2020: 379-395<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>29.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>tong wang, yousong zhu, chaoyang zhao, wei zeng, yaowei wang, jinqiao wang, and ming tang. large batch optimization for object detection: training coco in 12 minutes. eccv 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>30.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>lu zhou, yingying chen, yunze gao, jinqiao wang, and hanqing lu</span><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>occlusion-aware siamese network for human pose estimation.eccv 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>31.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>xiaomei zhang, yingying chen, bingke zhu, jinqiao wang, and ming tang. blended grammar network for human parsing. eccv 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>32.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>kuan zhu, haiyun guo, zhiwei liu, ming tang, and jinqiao wang. identity-guided human semantic parsing for person re-identification. eccv 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>33.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>linyu zheng, ming tang, yingying chen, jinqiao wang, and hanqing lu. learning feature embeddings for discriminant model based tracking. eccv 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>34.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>lei shi, yifan zhang, jian cheng and hanqing lu: decoupled spatial-temporal attention network for skeleton-based action-gesture recognition, accv 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>35.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zerui chen, yan huang, hongyuan yu, bin xue, ke han, yiru guo, liang wang,  towards part-aware monocular 3d human pose estimation: an architecture search approach, proc. european conference on computer vision, pp. 715-732, august 2020, glasgow, uk.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>36.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>weilun chen, zhaoxiang zhang, xiaolin hu, baoyuan wu,  boosting decision-based black-box adversarial attacks with random sign flip, proc. european conference on computer vision, pp. 276-293, august 2020, glasgow, uk.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>37.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jie cao, huaibo huang, yi li, ran he, zhenan sun,  informative sample mining network for multi-domain image-to-image translation, proc. european conference on computer vision, pp. 404-419, august 2020, glasgow, uk.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>38.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yibo hu, xiang wu, ran he,  tf-nas: rethinking three search freedoms of latency-constrained differentiable neural architecture search, proc. european conference on computer vision, pp. 123-139, august 2020, glasgow, uk.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>39.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>chenyang si, xuecheng nie, wei wang, liang wang, tieniu tan, jiashi feng,  adversarial self-supervised learning for semi-supervised 3d action recognition, proc. european conference on computer vision, pp. 35-51, august 2020, glasgow, uk.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>40.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>junsong fan, zhaoxiang zhang, tieniu tan,  employing multi-estimations for weakly-supervised semantic segmentation, proc. european conference on computer vision, pp. 332-348, august 2020, glasgow, uk.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>41.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>ke han, yan huang, zerui chen, liang wang, tieniu tan,  prediction and recovery for adaptive low-resolution person re-identification, proc. european conference on computer vision, pp. 193 20209, august 2020, glasgow, uk.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>42.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>peipei li, huaibo huang, yibo hu, xiang wu, ran he, zhenan sun,  hierarchical face aging through disentangled latent characteristics, proc. european conference on computer vision, pp. 86-101, 44044, glasgow, uk.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>43.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>kunbo zhang, zhenteng shen, yunlong wang, and z. sun,  all-in-focus iris camera with a great capture volume, proc. international joint conference on biometrics, september 2020, houston, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>44.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>leyuan wang, kunbo zhang, min ren, yunlong wang, zhenan sun,  recognition oriented iris image quality assessment in the feature space, proc. international joint conference on biometrics, september 2020, houston, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>45.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yu tian, kunbo zhang, leyuan wang, and zhenan sun,  face anti-spoofing by learning polarization cues in a real-world scenario, proc. international conference on advances in image processing (icaip), november 2020, chengdu, china.<o:p></o:p></span></p> <p class=af0><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800752"></a><a name="_toc24098155"></a><a name="_toc532562241"></a><a name="_toc532391210"><span style='mso-bookmark:_toc532562241'><span style='mso-bookmark:_toc24098155'><span style='mso-bookmark:_toc66800752'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>!j_ƌ r</span></b></span></span></span></a><a name="_toc24465904"></a><a name="_toc24361012"></a><a name="_toc532562411"></a><span style='mso-bookmark:_toc532562411'><span style='mso-bookmark:_toc24361012'><span style='mso-bookmark:_toc24465904'><span style='mso-bookmark:_toc66800752'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>pattern recognition</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>46.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>qiongyi zhou, changde du, dan li, haibao wang, jian k. liu, huiguang he,  simultaneous neural spike encoding and decoding based on cross-modal dual deep generative model,  2020 international joint conference on neural networks,2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>47.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>wei wei,shuang qiu,xuelin ma,dan li,chuncheng zhang,huiguang he, a transfer learning framework for rsvp-based brain computer interface ieee engineering in medicine and biology society,2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>48.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>chenyao wu,shuang qiu,jiezhen xing,huiguang he, a cnn-based compare network for classification of ssveps in human walkingieee engineering in medicine and biology society , 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>49.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>guirong bai, shizhu he, kang liu, jun zhao, zaiqing nie, pre-trained language model based active learning for sentence matching<span style='mso-tab-count:1'> </span>, the 28th international conference on computational linguistics (coling 2020), 1495 1504,barcelona(online), 12.8-12.13, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>50.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>xinyu zuo, yubo chen, kang liu and jun zhao, knowdis: knowledge enhanced data augmentation for event causality detection via distant supervision,the 28th international conference on computational linguistics (coling 2020) ,1544-1550,barcelona(online),12.8-12.13,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>51.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jian liu, dianbo sui, kang liu, jun zhao, graph-based knowledge integration for question answering over dialogue, the 28th international conference on computational linguistics (coling 2020),1544-1550,barcelona(online),12.8-12.13,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>52.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>pei chen, hang yang, kang liu, ruihong huang, yubo chen, taifeng wang, jun zhao,reconstructing event regions for event extraction via graph attention networks,proceedings of the 1st conference of the asia-pacific chapter of the association for computational linguistics and the 10th international joint conference on natural language processing (aacl 2020),811 820,suzhou,china,11.6-11.9,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>53.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>pengfei cao, yubo chen, jun zhao, taifeng wang,incremental event detection via knowledge consolidation networks,proceedings of the 2020 conference on empirical methods in natural language processing (emnlp 2020),707-717<span style='mso-tab-count:1'>����� </span>,the dominican republic(online),11.16-11.20,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>54.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhixing tian, yuanzhe zhang, kang liu, jun zhao,yantao jia, zhicheng sheng,scene restoring for narrative machine reading comprehension<span style='mso-tab-count:1'>����� </span>,the 2020 conference on empirical methods in natural language processing (emnlp 2020),3063-3073,the dominican republic(online),11.16-11.20,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>55.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jian liu, yubo chen, kang liu, yantao jia, zhicheng sheng, how does context matter? on the robustness of event detection with context-selective mask generalization,proceedings of the 2020 conference on empirical methods in natural language processing (emnlp),2523-2532,the dominican republic(online),11.16-11.20,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>56.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jian liu, yubo chen, kang liu, wei bi, xiaojiang liu,event extraction as machine reading comprehension,proceedings of the 2020 conference on empirical methods in natural language processing (emnlp),1641-1651,the dominican republic(online),11.16-11.20,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>57.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>dianbo sui, yubo chen, jun zhao, yantao jia, yuantao xie, weijian sun,feded: federated learning via ensemble distillation for medical relation extraction,proceedings of the 2020 conference on empirical methods in natural language processing (emnlp),2118-2128,the dominican republic(online),11.16-11.20,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>58.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhixing tian, yuanzhe zhang, xinwei feng, wenbin jiang, yajuan lyu, kang liu, jun zhao,capturing sentence relations for answer sentence selection with multi-perspective graph encoding,the thirty-fourth aaai conference on artificial intelligence (aaai 2020)<span style='mso-tab-count:1'>����� </span>,9032-9039,newyork,2.7-2.12,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>59.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jian liu, yubo chen, jun zhao,knowledge enhanced event causality identification with mention masking generalizations,proceedings of the twenty-ninth international joint conference on artificial intelligence (ijcai-2020),3608-3614,japan(online),6.11-6.17,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>60.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>pengfei cao, chenwei yan, xiangling fu, yubo chen, kang liu, jun zhao, shengping liu, weifeng chong,clinical-coder: assigning interpretable icd-10 codes to chinese clinical notes,proceedings of the 59th annual meeting of the association for computational linguistics(acl 2020)<span style='mso-spacerun:yes'>� </span>demo,294-301,seattle(online),7.5-7.10,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>61.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>pengfei cao, yubo chen, kang liu, jun zhao, shengping liu, weifeng chong,hypercore: hyperbolic and co-graph representation for automatic icd coding,proceedings of the 59th annual meeting of the association for computational linguistics(acl 2020)<span style='mso-tab-count:1'>����� </span>,3105-3114,seattle(online),7.5-7.10,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>62.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yuanzhe zhang, zhongtao jiang, tao zhang, shiwan liu, jiarun cao, kang liu, shengping liu, jun zhao,mie: a medical information extractor towards medical dialogues,<span style='mso-tab-count:1'>��� </span>proceedings of the 59th annual meeting of the association for computational linguistics(acl 2020),6460-6469,seattle(online),7.5-7.10,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>63.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yu zhao, anxiang zhang, ruobing xie, kang liu, xiaojie wang,connecting embeddings for knowledge graph entity typing,proceedings of the 58th annual meeting of the association for computational linguistics (acl 2020),6419-6428,seattle(online),7.5-7.10,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>64.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jiaxing wang, haoli bai, jiaxiang wu, xupeng shi, junzhou huang, irwin king, michael lyu, jian cheng. revisiting parameter sharing for automatic neural channel number search. neurips 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>65.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>peisong wang, qiang chen, xiangyu he, jian cheng. towards accurate post-training network quantization via bit-split and stitching. icml 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>66.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>gang li, peisong wang, zejian liu, cong leng, jian cheng. hardware acceleration of cnn with one-hot quantization of weights and activations. date 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>67.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>cong wang, cheng-lin liu, scene text recognition by attention network with gated embedding, international joint conference on neural networks (ijcnn), glasgow, uk, july 19-24, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>68.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>xiao-hui li, fei yin, cheng-lin liu, page segmentation using convolutional neural network and graphical model, iapr workshop on document analysis systems (das), wuhan, china, july 26-29, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>69.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>guo-wang xie, fei yin, xu-yao zhang, cheng-lin liu, dewarping document image by displacement flow estimation with fully convolutional network, iapr workshop on document analysis systems (das), wuhan, china, july 26-29, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>70.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>ming-chao xu, fei yin, cheng-lin liu, srr-gan: super-resolution recognition gan for low-resolved text images, international conference on frontiers of handwriting recognition (icfhr), dortmund, germany, september 7-10, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>71.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhen-xing wang, qiu-feng wang, fei yin, cheng-lin liu, weakly supervised learning for over-segmentation based handwritten chinese text recognition, international conference on frontiers of handwriting recognition (icfhr), dortmund, germany, september 7-10, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>72.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>da-han wang, fei-yin, jin-wen wu, yu-pei yan, zhi-cai huang, gui-yun chen, yao wang, cheng-lin liu, icfhr 2020 competition on offline recognition and spotting of handwritten mathematical expressions  offrashme, international conference on frontiers of handwriting recognition (icfhr), dortmund, germany, september 7-10, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>73.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>longfei xie, xu-yao zhang, gate-fusion transformer for multimodal sentiment analysis, international conference on pattern recognitin and aritificial intelligence (icprai), october 19-23, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>74.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>caiyong wang, yunlong wang, boqiang xu, yong he, zhiwei dong, zhenan sun,  a lightweight multi-label segmentation network for mobile iris biometrics, proc. ieee international conference on acoustics, speech, and signal processing, pp. 1006-1010, may 2020, barcelona, spain.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>75.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>qi zhang, zhen lei, zhaoxiang zhang, stan z. li,  context-aware attention network for image-text retrieval, proc. ieee conference on computer vision and pattern recognition, pp. 3533-3542, june 2020, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>76.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>wenkai dong, zhaoxiang zhang, chunfeng song, tieniu tan,  instance guided proposal network for person search, proc. ieee conference on computer vision and pattern recognition, pp. 2582-2591, june 2020, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>77.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhihang li, teng xi, jiankang deng, gang zhang, shengzhao wen, ran he,  gp-nas: gaussian process based neural architecture search, proc. ieee conference on computer vision and pattern recognition, pp. 11930-11939, june 2020, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>78.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>junran peng, xingyuan bu, ming sun, zhaoxiang zhang, tieniu tan, junjie yan,  large-scale object detection in the wild from imbalanced multi-labels, proc. ieee conference on computer vision and pattern recognition, pp. 9706-9715, june 2020, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>79.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>ya jing, wei wang, liang wang, tieniu tan,  cross-modal cross-domain moment alignment network for person search, proc. ieee conference on computer vision and pattern recognition, pp. 10675-10683, june 2020, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>80.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>boyan duan, chaoyou fu, yi li, xingguang song, ran he,  cross-spectral face hallucination via disentangling independent factors, proc. ieee conference on computer vision and pattern recognition, pp. 7927-7935, june 2020, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>81.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>wenkai dong, zhaoxiang zhang, chunfeng song, tieniu tan,  bi-directional interaction network for person search, proc. ieee conference on computer vision and pattern recognition, pp. 2836-2845, june 2020, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>82.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>wentao jiang, si liu, chen gao, jie cao, ran he, jiashi feng, shuicheng yan,  psgan: pose and expression robust spatial-aware gan for customizable makeup transfer, proc. ieee conference on computer vision and pattern recognition, pp. 5193-5201, june 2020, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>83.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>junsong fan, zhaoxiang zhang, chunfeng song, tieniu tan,  learning integral objects with intra-class discriminator for weakly-supervised semantic segmentation, proc. ieee conference on computer vision and pattern recognition, pp. 4282-4291, june 2020, usa.<o:p></o:p></span></p> <p class=af0><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></b></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800753"></a><a name="_toc24098156"></a><a name="_toc532562242"></a><a name="_toc532391211"><span style='mso-bookmark:_toc532562242'><span style='mso-bookmark:_toc24098156'><span style='mso-bookmark:_toc66800753'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�n�]zf��</span></b></span></span></span></a><a name="_toc24465905"></a><a name="_toc24361013"></a><a name="_toc532562412"></a><span style='mso-bookmark:_toc532562412'><span style='mso-bookmark:_toc24361013'><span style='mso-bookmark:_toc24465905'><span style='mso-bookmark:_toc66800753'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>artificial intelligence</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>84.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>kekai sheng, weiming dong, menglei chai, guohui wang, peng zhou, feiyue huang, bao-gang hu, rongrong ji, chongyang ma: revisiting image aesthetic assessment via self-supervised feature learning. the thirty-fourth aaai conference on artificial intelligence (aaai) 2020: 5709-5716<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>85.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jiaxing wang, jiaxiang wu, haoli bai, jian cheng. m-nas: meta neural architecture search. aaai 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>86.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>peisong wang, xiangyu he, gang li, tianli zhao, jian cheng. sparsity-inducing binarized neural networks. aaai 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>87.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>lu zhou, yingying chen,jinqiao wang, hanqing lu: progressive bi c3d pose grammar for human pose estimation aaai2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>88.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>ya jing, chenyang si, junbo wang, wei wang, liang wang, tieniu tan,  pose-guided multi-granularity attention network for text-based person search, proc. aaai conference on artificial intelligence, pp.<span style='mso-spacerun:yes'>� </span>11189-11196, february 2020, new york, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>89.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>qi li*, yunfan liu*, zhenan sun,  age progression and regression with spatial attention modules, proc. aaai conference on artificial intelligence, pp. 11378-11385, february 2020, new york, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>90.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>linjiang huang, yan huang, wanli ouyang and liang wang,  part-level graph convolutional network for skeleton-based action recognition, proc. aaai conference on artificial intelligence, pp. 11045-11052, february 2020, new york, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>91.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>linjiang huang, yan huang, wanli ouyang and liang wang,  relational prototypical network for weakly supervised temporal action localization, proc. aaai conference on artificial intelligence, pp. 11053-11060, february 2020, new york, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>92.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>min ren, yunlong wang, zhenan sun, tieniu tan,  dynamic graph representation for occlusion handling in biometrics, proc. aaai conference on artificial intelligence, pp. 11940-11947, february 2020, new york, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>93.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yanbei liu, xiao wang, shu wu, zhitao xiao,  independence promoted graph disentangled networks, proc. aaai conference on artificial intelligence, pp. 4916-4923, february 2020, new york, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>94.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>junsong fan, zhaoxiang zhang, tieniu tan, chunfeng song, jun xiao,  cian: cross-image affinity net for weakly supervised semantic segmentation, proc. aaai conference on artificial intelligence, pp. 10762-10769, february 2020, new york, usa.<o:p></o:p></span></p> <p class=af0><a name="ole_link2"><s><span lang=en-us style='font-size:12.0pt; line-height:110%'><o:p><span style='text-decoration:none'>&nbsp;</span></o:p></span></s></a></p> <p class=af0 style='mso-outline-level:3'><span style='mso-bookmark:ole_link2'><a name="_toc66800754"></a><a name="_toc40189724"><span style='mso-bookmark: _toc66800754'><b><span style='font-size:12.0pt;line-height:110%;font-family: �[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>:ghvf[`n</span></b></span></a></span><span style='mso-bookmark:ole_link2'><span style='mso-bookmark:_toc40189724'><span style='mso-bookmark:_toc66800754'><b><span lang=en-us style='font-size:12.0pt; line-height:110%'>machine learning</span></b></span></span></span><span style='mso-bookmark:ole_link2'><b><span lang=en-us style='font-size:12.0pt; line-height:110%'><o:p></o:p></span></b></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><span style='mso-bookmark:ole_link2'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>95.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>changde du, changying du, lijie huang, huiguang he,  conditional generative neural decoding with structured cnn feature prediction, aaai, pp:2629-2636, 2020.<o:p></o:p></span></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><span style='mso-bookmark:ole_link2'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>96.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhengxiong luo, yan huang, shang li, liang wang, and tieniu tan,  unfolding the alternating optimization for blind super resolution, conference on neural information processing systems, december 2020, vancouver, canada.<o:p></o:p></span></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><span style='mso-bookmark:ole_link2'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>97.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>hao zhu, chaoyou fu, qianyi wu, wayne wu, chen qian, ran he,  aot: appearance optimal transport based identity swapping for forgery detection, proc. conference on neural information processing systems, december 2020, vancouver, canada.<o:p></o:p></span></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><span style='mso-bookmark:ole_link2'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>98.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>bojing feng, wenfang xue, bindang xue, zeyu liu,  every corporation owns its image: corporate credit ratinings via convolutional neural networks, proc. international conference on computer and communications, pp. 1578-1583, december 2020, chengdu, china.<o:p></o:p></span></span></p> <p class=msonormal><span style='mso-bookmark:ole_link2'><span lang=en-us style='font-size:12.0pt'><o:p>&nbsp;</o:p></span></span></p> <p class=af0 style='mso-outline-level:3'><span style='mso-bookmark:ole_link2'><a name="_toc66800755"></a><a name="_toc40189725"><span style='mso-bookmark: _toc66800755'><b><span style='font-size:12.0pt;line-height:110%;font-family: �[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>pencc�c</span></b></span></a></span><span style='mso-bookmark:ole_link2'><span style='mso-bookmark:_toc40189725'><span style='mso-bookmark:_toc66800755'><b><span lang=en-us style='font-size:12.0pt; line-height:110%'>data mining</span></b></span></span></span><span style='mso-bookmark:ole_link2'><b><span lang=en-us style='font-size:12.0pt; line-height:110%'><o:p></o:p></span></b></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><span style='mso-bookmark:ole_link2'><a name="_hlk11231759"><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>99.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xiaohan li, mengqi zhang, shu wu, zheng liu, liang wang, philip s yu,  dynamic graph collaborative filtering, proc. ieee international conference on data mining, november 2020, sorento, italy.<o:p></o:p></span></a></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><span style='mso-bookmark:ole_link2'><span style='mso-bookmark:_hlk11231759'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>100.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yufeng zhang, xueli yu, zeyu cui, shu wu, zhongzhen wen, liang wang,  every document owns its structure: inductive text classification via graph neural networks, proc. conference of the association for computational linguistics, pp. 334-339, july 2020, los angeles, usa.<o:p></o:p></span></span></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><span style='mso-bookmark:ole_link2'><span style='mso-bookmark:_hlk11231759'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>101.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yanqiao zhu, yichen xu, feng yu, qiang liu, shu wu, and liang wang,  deep graph contrastive representation learning, proc. icml 2020 workshop on graph representation learning and beyond, july 2020, vienna, austria.</span></span></span><span style='mso-bookmark:ole_link2'><span lang=en-us style='font-size:12.0pt; line-height:110%'><o:p></o:p></span></span></p> <p class=af0><s><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p><span style='text-decoration:none'>&nbsp;</span></o:p></span></s></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800756"></a><a name="_toc24098157"></a><a name="_toc532562245"></a><a name="_toc532391214"><span style='mso-bookmark:_toc532562245'><span style='mso-bookmark:_toc24098157'><span style='mso-bookmark:_toc66800756'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>���{:g�vb_f[</span></b></span></span></span></a><a name="_toc24465906"></a><a name="_toc24361014"></a><a name="_toc532562415"></a><span style='mso-bookmark:_toc532562415'><span style='mso-bookmark:_toc24361014'><span style='mso-bookmark:_toc24465906'><span style='mso-bookmark:_toc66800756'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>computer graphics</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>102.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>mingxin yang, jianwei guo, juntao ye, xiaopeng zhang. (2020). detailed 3d face reconstruction from single images via self-supervised attribute learning. in siggraph asia 2020 posters (pp. 1-2).<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>103.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhongqi wu, chuanqing zhuang, jun xiao, and jianwei guo. 2020. deep specular highlight removal for single real-world image. in siggraph asia 2020 (sa  20 posters), december 04-13, 2020. acm, new york, ny, usa, 2 pages<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>104.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zeyu shen, mingyang zhao, xiaohong jia, dong-ming yan. using convex hull for fast and accurate ellipse detection, siggraph posters 2020 <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>105.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>youxin pang, mengke yuan, qiang fu, dong-ming yan. reflection removal via realistic training data generation, siggraph posters 2020<o:p></o:p></span></p> <p class=af0><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800757"></a><a name="_toc24098158"><span style='mso-bookmark:_toc66800757'><b><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>y�zso���{</span></b></span></a><a name="_toc24465907"></a><a name="_toc24361015"></a><span style='mso-bookmark: _toc24361015'><span style='mso-bookmark:_toc24465907'><span style='mso-bookmark: _toc66800757'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>multimedia computing</span></b></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>106.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yingying deng, fan tang, weiming dong, wen sun, feiyue huang, changsheng xu: arbitrary style transfer via multi-adaptation network. acm multimedia 2020: 2719-2727<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>107.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>minxuan lin, yingying deng, fan tang, weiming dong, changsheng xu:<span style='mso-spacerun:yes'>� </span>multi-attribute guided painting generation. the 2nd ieee workshop on artificial intelligence for art creation (aiart) 2020: 400-403<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>108.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yukun zuo, hantao yao, changsheng xu: category-level adversarial self-ensembling for domain adaptation. icme 2020: 1-6<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>109.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>shaobo min, hongtao xie, hantao yao*, xuran deng, zheng-jun zha, yongdong zhang: hierarchical granularity transfer learning. neurips 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>110.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>shaobo min, hantao yao, hongtao xie, chaoqun wang, zheng-jun zha, yongdong zhang: domain-aware visual bias eliminating for generalized zero-shot learning. cvpr 2020: 12661-12670<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>111.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yanfei han, quan fang, jun hu, shengsheng qian, changsheng xu, gaeat: graph auto-encoder attention networks for knowledge graph completion.  cikm 2020: 2053-2056<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>112.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>chunyang li, zhineng chen, caiyan jia, hongyun bao, changsheng xu, autosoccer: an automatic soccer live broadcasting generator . icme workshops 2020: 1-2<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>113.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>youze wang, shengsheng qian, jun hu, quan fang, changsheng xu, fake news detection via knowledge-driven multimodal graph convolutional networks . icmr 2020: 540-547<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>114.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jun hu, quan fang, shengsheng qian, changsheng xu, multi-modal attentive graph pooling model for community question answer matching . acm multimedia 2020: 3505-3513<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>115.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yingying zhang, quan fang, shengsheng qian, changsheng xu, multi-modal multi-relational feature aggregation network for medical knowledge representation learning . acm multimedia 2020: 3956-3965<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>116.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>chengpeng fu, jinqiang wang, jitao sang, jian yu, changsheng xu, beyond literal visual modeling: understanding image metaphor based on literal-implied concept mapping . mmm (1) 2020: 111-123<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>117.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>chengcheng ma, weiliang meng, baoyuan wu, shibiao xu and xiaopeng zhang. efficient joint gradient based atack against sor defense for 3d point cloud classification. proceedings of the 28th acm international conference on multimedia (mm  20). seattle,wa,usa. acm 2020: 1819-1827.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>118.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yanyuan qiao, zheng yu, jing liu: rankvqa: answer re-ranking for visual question answering. icme 2020: 1-6<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>119.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>lei shi, yifan zhang, wanguo wang, jian cheng, hanqing lu: rethinking the pid optimizer for stochastic optimization of deep networks. icme 2020: 1-6<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>120.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>linyu zheng, ming tang, yingying chen, jinqiao wang, hanqing lu: high-speed and accurate scale estimation for visual tracking with gaussian process regression. icme 2020: 1-6<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>121.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>fei liu, jing liu, xinxin zhu, richang hong, hanqing lu: dual hierarchical temporal convolutional network with qa-aware dynamic normalization for video story question answering. acm multimedia 2020: 4253-4261<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>122.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>fei zhao, ting zhang, chao ma, ming tang, jinqiao wang, xiaobo wang: siamese attentive graph tracking. acm multimedia 2020: 1542-1550<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>123.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>xiaoqing liang, xu zhao, chaoyang zhao, nanfei jiang, ming tang, jinqiao wang: task decoupled knowledge distillation for lightweight face detectors. acm multimedia 2020: 2184-2192<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>124.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>longteng guo, jing liu, xinxin zhu, xingjian he, jie jiang, hanqing lu: non-autoregressive image captioning with counterfactuals-critical multi-agent learning. ijcai 2020: 767-773<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>125.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>kai niu, yan huang, liang wang,  textual dependency embedding for person search by language, proc. acm multimedia conference, pp. 4032-4040, october 2020, seattle, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>126.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yifan song, zhang zhang, caifeng shan, liang wang,  stronger, faster and more explainable: a graph convolutional baseline for skeleton-based action recognition, proc. acm multimedia conference, pp. 1625 1633, october 2020, seattle, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>127.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>peipei li, yinglu liu, hailin shi, xiang wu, yibo hu, ran he, zhenan sun,  dual-structure disentangling variational generation for data-limited face parsing, proc. acm multimedia conference, pp. 556-564, october 2020, seattle, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>128.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>boqiang xu, lingxiao he, xingyu liao, wu liu, zhenan sun, tao mei, black re-id,  a head-shoulder descriptor for the challenging problem of person re-identification, proc. acm multimedia conference, pp. 673-681, october 2020, seattle, usa.<o:p></o:p></span></p> <p class=af0><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800758"></a><a name="_toc24098159"></a><a name="_toc532562246"></a><a name="_toc532391215"><span style='mso-bookmark:_toc532562246'><span style='mso-bookmark:_toc24098159'><span style='mso-bookmark:_toc66800758'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�va�</span></b></span></span></span></a><span style='mso-bookmark:_toc532391215'><span style='mso-bookmark:_toc532562246'><span style='mso-bookmark:_toc24098159'><span style='mso-bookmark:_toc66800758'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>/</span></b></span></span></span></span><span style='mso-bookmark:_toc532391215'><span style='mso-bookmark:_toc532562246'><span style='mso-bookmark:_toc24098159'><span style='mso-bookmark:_toc66800758'><b><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>ɖ��ytnr�g</span></b></span></span></span></span><a name="_toc24465908"></a><a name="_toc24361016"></a><a name="_toc532562416"></a><span style='mso-bookmark:_toc532562416'><span style='mso-bookmark:_toc24361016'><span style='mso-bookmark:_toc24465908'><span style='mso-bookmark:_toc66800758'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>image/video processing and analysis</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>129.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yingying zhang, junyu gao, xiaoshan yang, chang liu, yan li, changsheng xu, find objects and focus on highlights: mining object semantics for video highlight detection via graph neural networks, aaai, pp. 12902-12909, new york, ny, usa, february 7-12, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>130.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>feifei zhang, mingliang xu, qirong mao, changsheng xu, joint attribute manipulation and modality alignment learning for composing text and image to image retrieval . acm multimedia 2020: 3367-3376<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>131.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jie jiang, jing liu, jun fu, xinxin zhu, hanqing lu</span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>point set attention network for semantic segmentation. icip 2020: 2186-2190<o:p></o:p></span></p> <p class=af0><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800759"></a><a name="_toc24098160"></a><a name="_toc532562247"></a><a name="_toc532391216"><span style='mso-bookmark:_toc532562247'><span style='mso-bookmark:_toc24098160'><span style='mso-bookmark:_toc66800759'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>;sf[�va�r�g</span></b></span></span></span></a><a name="_toc24465909"></a><a name="_toc24361017"></a><a name="_toc532562417"></a><span style='mso-bookmark:_toc532562417'><span style='mso-bookmark:_toc24361017'><span style='mso-bookmark:_toc24465909'><span style='mso-bookmark:_toc66800759'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>medical image analysis</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>132.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>shuang qiu, shengpei wang, weibo yi, chuncheng zhang, huiguang he,  changes of resting-state eeg microstates induced by low-frequency repetitive transcranial magnetic stimulation, ieee engineering in medicine and biology society , 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>133.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>chuncheng zhang,shuang qiu,shengpei wang,wei wei,huiguang he, temporal dynamics on decoding target stimuli in rapid serial visual presentation using magnetoencephalography,  ieee engineering in medicine and biology society ,2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>134.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yuanhao guo, fons j. verbeek, ge yang, probabilistic inference for camera calibration in light microscopy under circular motion, proc. ieee international symposium on biomedical imaging (isbi), pp. 149-153, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>135.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>d. yao, j. sui, e. yang, pt. yap, d. shen, and m. liu. temporal-adaptive graph convolutional network for automated identification of major depressive disorder using resting-state fmri. international workshop on machine learning in medical imaging, pp. 1-10, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>136.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>shuang gao, vince d. calhoun, and jing sui, multi-modal component subspace-similarity-based multi-kernel svm for schizophrenia classification, in medical imaging 2020: computer-aided diagnosis, vol. 11314, proceedings of spie, h. k. hahn and m. a. mazurowski eds., 2020.<o:p></o:p></span></p> <p class=af0><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800760"><b><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>uir�va�r�g</span></b></a><span style='mso-bookmark:_toc66800760'><b><span lang=en-us style='font-size:12.0pt; line-height:110%'>biology image analysis</span></b></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>137.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-bidi-font-family: "times new roman"'>yanan lv, xichen, chang shu, hua han. robust global optimized affine registration method for microscopic images of biological tissue, ieee international conference on acoustics, speech and signal processing (icassp2020). 1070-1074, barcelona,&nbsp;spain, may 4-8 2020.<o:p></o:p></span></p> <p class=af0><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></p> <p class=af0 style='mso-outline-level:3'><a name="_toc66800761"></a><a name="_toc24098161"></a><a name="_toc532562248"></a><a name="_toc532391217"><span style='mso-bookmark:_toc532562248'><span style='mso-bookmark:_toc24098161'><span style='mso-bookmark:_toc66800761'><b><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�����b/g</span></b></span></span></span></a><a name="_toc24465910"></a><a name="_toc24361018"></a><a name="_toc532562418"></a><span style='mso-bookmark:_toc532562418'><span style='mso-bookmark:_toc24361018'><span style='mso-bookmark:_toc24465910'><span style='mso-bookmark:_toc66800761'><b><span lang=en-us style='font-size:12.0pt;line-height:110%'>speech and language technology</span></b></span></span></span></span><b><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>138.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>haoran li, junnan zhu, jiajun zhang, chengqing zong and xiaodong he. keywords-guided abstractive sentence summarization. the thirty-fourth aaai conference on artificial intelligence (aaai), new york, usa, feb. 7-12, 2020, pp. 8196-8203<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>139.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yuchen liu, jiajun zhang, hao xiong, long zhou, zhongjun he, hua wu, haifeng wang and chengqing zong. synchronous speech recognition and speech-to-text translation with interactive decoding. the thirty-fourth aaai conference on artificial intelligence (aaai), new york, usa, feb. 7-12, 2020, pp. 8417-8424<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>140.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>shaonan wang, jiajun zhang, nan lin and chengqing zong. probing brain activation patterns by dissociating semantics and syntax in sentences. the thirty-fourth aaai conference on artificial intelligence (aaai), new york, usa, feb. 7-12, 2020, pp. 9201-9208<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>141.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>junnan zhu, yu zhou, jiajun zhang, haoran li, chengqing zong and changliang li. multimodal summarization with guidance of multimodal reference. the thirty-fourth aaai conference on artificial intelligence (aaai), new york, usa, feb. 7-12, 2020, pp. 9749-9756<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>142.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yang zhao, jiajun zhang, yu zhou and chengqing zong. knowledge graphs enhanced neural machine translation. the 29th international joint conference on artificial intelligence and the 17th pacific rim international conference on artificial intelligence (ijcai-pricai 2020), yokohama, japan, july 11-17, 2020, pp. 4039-4045<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>143.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>junnan zhu, yu zhou, jiajun zhang, and chengqing zong. attend, translate and summarize: an efficient method for neural cross-lingual summarization. in proceedings of the 58th annual meeting of the association for computational linguistics (acl), online, july 5-july 10, 2020, pp. 1309-1321.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>144.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>xiaomian kang, yang zhao, jiajun zhang and chengqing zong. dynamic context selection for document-level neural machine translation via reinforcement learning. in proceedings of the 2020 conference on empirical methods in natural language processing (emnlp), november 16th  20th, 2020, pp. 2242-2254<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>145.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jinghui yan, yining wang, lu xiang, yu zhou and chengqing zong, a knowledge-driven generative model for multi-implication chinese medical procedure entity normalization. in proceedings of the 2020 conference on empirical methods in natural language processing (emnlp), november 16th  20th, 2020, pp. 1-11, pp. 1490-1499<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>146.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yang zhao, lu xiang, junnan zhu, jiajun zhang, yu zhou and chengqing zong. knowledge graph enhanced neural machine translation via multi-task learning on sub-entity granularity. proceedings of 28th international conference on computational linguistics (coling 2020), barcelona, spain (online), december 8-13, 2020, pp. 4495 4505.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>147.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jian sun, yu zhou, chengqing zong. dual attention network for cross-lingual entity alignment. the 28th international conference on computational linguistics (coling 2020), barcelona, spain (online), december 8-13, 2020, pp. 3190-3201<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>148.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jingyuan sun, shaonan wang, jiajun zhang, and chengqing zong. distill and replay for continual language learning.the 28th international conference on computational linguistics (coling 2020), online, december 8-13, 2020, pp. 3569-3579<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>149.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>qian wang, jiajun zhang, lemao liu, guoping huang and chengqing zong, touch editing: a flexible one-time interaction approach for translation. in proceedings of the 1st conference of the asia-pacific chapter of the association for computational linguistics (aacl) and the 10th international joint conference on natural language processing (ijcnlp), december 4 7, 2020, pp. 1-11<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>150.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhengkun tian, jiangyan yi, ye bai, jianhua tao, shuai zhang, zhengqi wen, synchronous transformers for end-to-end speech recognition, icassp 2020, 7884-7888, may 4-8,2020 barcelona, spain.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>151.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>ruibo fu</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>jianhua tao</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhengqi wen</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>jiangyan yi</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>tao wang, focusing on attention: prosody transfer and adaptative optimization strategy for multi-speaker end-to-end speech synthesis, icassp 2020, 6709-6713, may 4-8,2020 barcelona, spain.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>152.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jian huang,jianhua tao,bin liu,zheng lian,mingyue niu, multimodal transformer fusion for continuous emotion recognition, icassp 2020, 3507-3511, may 4-8,2020 barcelona, spain.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>153.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>donna erickson, shigeto kawahara, albert rilliard, ryoko hayashi, toshiyuki sadanobu, yongwei li, hayato daikuhara, jo�o de moraes, kerrie obert,  cross cultural differences in arousal and valence perceptions of voice quality, 10th international conference on speech prosody, pp.720-724, tokyo, japan, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>154.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>feihu che</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>dawei zhang, jianhua tao, mingyue niu,bocheng zhao, parame: regarding neural network parameters as relation embeddings for knowledge graph completion, aaai-2020, 2774-2781, new york, usa, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>155.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>licai sun, zheng lian, bin liu,jianhua tao,mingyue niu, multi-modal continuous dimensional emotion recognition, 1st international multimodal sentiment analysis in real-life media challenge and workshop (muse2020), 27-34, october 12 16, 2020, seattle, usa.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>156.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>cunhang fan, jianhua tao, bin liu, jiangyan yi and zhengqi wen, gated recurrent fusion of spatial and spectral features for multi-channel speech separation with deep embedding representations, interspeech 2020, pp. 3321-3325, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>157.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>cunhang fan, jianhua tao, bin liu, jiangyan yi and zhengqi wen, joint training for simultaneous speech denoising and dereverberation with deep embedding representations, interspeech 2020, pp. 4536-4540, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>158.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>ye bai, jiangyan yi, jianhua tao, zhengkun tian, zhengqi wen, shuai zhang, listen attentively, and spell once: whole sentence generation via a non-autoregressive architecture for low-latency speech recognition. proc. interspeech 2020, 3381-3385, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>159.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>yongwei li, jianhua tao, bin liu, donna erickson, masato akagi, comparison of glottal source parameter values in emotional vowels, interspeech, pp. 4103-4107, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>160.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhengkun tian, jiangyan yi, jianhua tao, ye bai, shuai zhang, zhengqi wen, spike-triggered non-autoregressive transformer for end-to-end speech recognition, interspeech 2020, 4395-4399, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>161.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>ruibo fu, jianhua tao, zhengqi wen , jiangyan yi, chunyu qiang, tao wang, dynamic soft windowing and language dependent style token for code-switching end-to-end speech synthesis, interspeech 2020, 2937-2941, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>162.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jiangyan yi, jianhua tao, zhengkun tian, ye bai, cunhang fan, focal loss for punctuation prediction, interspeech 2020, 721-725, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>163.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>tao wang, jianhua tao, ruibo fu, jiangyan yi, zhengqi wen, spoken content and voice factorization for few-shot speaker adaptation, interspeech 2020, 796-800, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>164.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>tao wang, xuefei liu, jianhua tao, jiangyan yi, ruibo fu, zhengqi wen, non-autoregressive end-to-end tts with coarse-to-fine decoding, interspeech 2020, 3984-3988, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>165.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>tao wang, jianhua tao, ruibo fu, jiangyan yi, zhengqi wen, chunyu qiang, bi-level speaker supervision for one-shot speech synthesis, interspeech 2020, 3989-3993, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>166.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>jian huang,jianhua tao,bin liu,zheng lian, learning utterance-level representations with label smoothing for speech, interspeech 2020, 4079-4083, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>167.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zheng lian, jianhua tao, bin liu, jian huang, zhanlei yang, rongjun li, context-dependent domain adversarial neural network for multimodal emotion recognition, interspeech 2020, 394-398, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>168.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zheng lian, jianhua tao, bin liu, jian huang, zhanlei yang, rongjun li, conversational emotion recognition using self-attention mechanisms and graph neural networks, interspeech 2020, 2347-2351, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>169.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zheng lian, zhengqi wen, xinyong zhou , songbai pu , shengkai zhang</span><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>jianhua tao, arvc: an auto-regressive voice conversion system without parallel training data, interspeech 2020, 4706-4710, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>170.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>ziping zhao , qifei li , nicholas cummins, bin liu,<span style='mso-spacerun:yes'>� </span>haishuai wang, jianhua tao, bj�orn w. schuller, hybrid network feature extraction for depression assessment from speech, interspeech 2020, 4956-4960, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>171.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>ruibo fu, jianhua tao, zhengqi wen , jiangyan yi, tao wang, chunyu qiang, dynamic speaker representations adjustment and decoder factorization for speaker adaptation in end-to-end speech synthesis, interspeech 2020, 4701-4705, shanghai, china, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>172.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>guanjun li, shan liang, shuai nie, wenju liu, zhanlei yang, longshuai xiao. deep neural network-based generalized sidelobe canceller for robust multi-channel speech recognition. interspeech 2020, shang hai, china, october. 25-october. 29, 2020, pp.51-55.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8; tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family:"times new roman"; mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>173.</span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>guanjun li, shan liang, shuai nie, wenju liu, zhanlei yang, longshuai xiao. microphone array post-filter for target speech enhancementwithout a prior information of point interferers. interspeech 2020, shang hai, china, october 25-october. 29, 2020, pp. 3306-3310.<o:p></o:p></span></p> <p class=af0><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></p> <p class=af0 style='mso-outline-level:2'><a name="_toc66800762"></a><a name="_toc24098162"></a><a name="_toc532562249"></a><a name="_toc532391218"><span style='mso-bookmark:_toc532562249'><span style='mso-bookmark:_toc24098162'><span style='mso-bookmark:_toc66800762'><b style='mso-bidi-font-weight:normal'><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>�v�qo��</span></b></span></span></span></a><a name="_toc24465911"></a><a name="_toc24361019"></a><a name="_toc532562419"></a><span style='mso-bookmark:_toc532562419'><span style='mso-bookmark:_toc24361019'><span style='mso-bookmark:_toc24465911'><span style='mso-bookmark:_toc66800762'><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt; line-height:110%'>national conference</span></b></span></span></span></span><span style='mso-bookmark:_toc24465911'><span style='mso-bookmark:_toc66800762'><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt; line-height:110%'>s</span></b></span></span><b style='mso-bidi-font-weight: normal'><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l7 level1 lfo10'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>ke tian and jiajun zhang. quality estimation for machine translation with multi-granularity interaction. the 16th china conference on machine translation. springer, singapore, october 11, 2020, pp. 1-11<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xinyu zuo, yubo chen, kang liu and jun zhao, towards causal explanation detection with pyramid salient-aware network<span style='mso-tab-count:1'> </span>the 19th china national conference on computational linguistics (ccl 2020), 904-915, online,10.30-11.1,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>pengfei cao, yubo chen, kang liu, jun zhao,chinese named entity recognition via adaptive multi-pass memory network with hierarchical tagging mechanism,the 19th china national conference on computational linguistics (ccl 2020),144-158,online,10.30-11.1,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>4.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>zhucong li, baoli zhang, yubo chen, kang liu, jun zhao and shengping liu,multi-specialty domain adaptation for chinese medical named entity recognition<span style='mso-tab-count:1'>������ </span>china conference on knowledge graph and semantic computing(ccks 2020</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>,nanchang,11.12-11.15,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>5.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�u\b ��[:c �uon�g �r�^ �u��q �rgss^</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>,</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�w�n�r`�wƌ ��b�v�����~!j�w</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>hq�v�wƌ�v1�n�in���{'yo</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>(ccks 2020</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>,</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>-n�vws f</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>,11.12-11.15,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>6.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>hang yang, yubo chen, kang liu, and jun zhao<span style='mso-tab-count:1'>����� </span>,meta learning for event argument extraction via domain-specific information enhanced,china conference on knowledge graph and semantic computing(ccks 2020</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, nanchang,11.12-11.15,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>7.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�[:c ��u\b �uon�g �r�^ �u��q �rgss^</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>,</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�w�n�r`͋n��c�v�sp[9sm������~!j�w</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>,</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>hq�v�wƌ�v1�n�in���{'yo</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>(ccks 2020</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>,</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>-n�vws f</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>,11.12-11.15,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>8.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�s^ey �uon�g �r�^ �yock�m �ؚ�vey �퐛q�q</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>,</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>��t�n�[�e,g�v�wƌ�^�t{�e�lxvz</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>,</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>hq�v�wƌ�v1�n�in���{'yo</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>(ccks 2020</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>,</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>-n�vws f</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>,11.12-11.15,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>9.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yinyu lan, shizhu he, kang liu, jun zhao, xiangrong zeng, shengping liu,path-based knowledge reasoning with textual semantic information for medical knowledge graph completion,the china conference on health information processing (chip 2020)<span style='mso-tab-count:1'>���� </span>,luzhou(online</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>,11.28-11.29,2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>10.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>cheng yan,yuanzhe zhang,kang liu,jun zhao,yafei shi,shengping liu,enhancing unsupervised medical entity linking with multi-instance learning,the china conference on health information processing (chip 2020),luzhou(online</span><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'> �</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>,11.28-11.29,2020<o:p></o:p></span></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l7 level1 lfo10'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman";color:black; mso-themecolor:text1'><span style='mso-list:ignore'>11.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-bidi-font-family: "times new roman";color:black;mso-themecolor:text1'>jianhua yang, yan huang, linjiang huang, yunbo wang, zhanyu ma, liang wang,  global context enhanced multi-modal fusion for referring image segmentation, proc. chinese conference on pattern recognition and computer vision, pp. 434-446, october 2020, nanjing, china.<o:p></o:p></span></p> <p class=af0><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></p> <p class=af0 style='mso-outline-level:2'><a name="_toc66800763"></a><a name="_toc24098163"></a><a name="_toc532562250"></a><a name="_toc532391219"><span style='mso-bookmark:_toc532562250'><span style='mso-bookmark:_toc24098163'><span style='mso-bookmark:_toc66800763'><b style='mso-bidi-font-weight:normal'><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>_>e�����[�^�sh����e</span></b></span></span></span></a><a name="_toc24465912"></a><span style='mso-bookmark:_toc24465912'><span style='mso-bookmark:_toc66800763'><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt;line-height:110%'>publications of external projects</span></b></span></span><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=af0><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p>&nbsp;</o:p></span></p> <p class=af0 style='mso-outline-level:2'><a name="_toc66800764"></a><a name="_toc24098164"></a><a name="_toc532562251"></a><a name="_toc532391220"><span style='mso-bookmark:_toc532562251'><span style='mso-bookmark:_toc24098164'><span style='mso-bookmark:_toc66800764'><b style='mso-bidi-font-weight:normal'><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>�ve� rir</span></b></span></span></span></a><a name="_toc24465913"></a><a name="_toc24361021"></a><a name="_toc532562421"></a><span style='mso-bookmark:_toc532562421'><span style='mso-bookmark:_toc24361021'><span style='mso-bookmark:_toc24465913'><span style='mso-bookmark:_toc66800764'><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt; line-height:110%'>international journals</span></b></span></span></span></span><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt; line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>shiming ge, chunhui zhang, shikun li, dan zeng, and dacheng tao. cascaded correlation refinement for robust deep tracking. ieee transactions on neural networks and learning systems (tnnls), 2020, 1-13.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>shiming ge, chenyu li, shengwei zhao, and dan zeng. occluded face recognition in the wild by identity-diversity inpainting. ieee transactions on circuits and systems for video technology (tcsvt), 2020, 3387-3397. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>congqi cao, jiakang li, runping xi and yanningzhang, club ideas and exertions: aggregating local predictions for action recognition, in ieee transactions on circuits and systems for video technology, 2020. doi: 10.1109/tcsvt.2020.3017203.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>4.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>qian sun, qiaojun li, lei chen, jianning quan, lvjie li, pattern recognition based on pulse scanning imaging and convolutional neural network for vibrational events in �-otdr, optik, vol. 219, no. june, pp. 165205, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>5.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>chen shanxiong, han xu, gao weizhe, liu xuxin, mo bofeng, a classification method of oracle materials based on local convolutional neural network framework, ieee computer graphics and applications, vol. 40, no.3, pp. 32-44, 2020. <o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>6.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>shiyu zhu, chen shanxiong, xihua peng, hailing xiong, a signal reconstruction method of wireless sensor network based on compressed sensing, eurasip journal on wireless communications and networking, vol. 4, no 1, pp. 1-27, 2020. (sci)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>7.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>wen-ze shao*, yuan-yuan liu, lu-yue ye, li-qian wang, qi ge, bing-kun bao, hai-bo li. deblurgan : revisiting blind motion deblurring using conditional adversarial networks[j]. signal processing, 2020, 168: 107338. (sci)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>8.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xiang-jun shen*, si-xing liu, bing-kun bao, chunhong pan, zheng-jun zha, jianping fan. a generalized least-squares approach regularized with graph embedding for dimensionality reduction[j]. pattern recognition, 2020, 98: 107023. (sci)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>9.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xi shao, xuan zhang, guijin tang, bingkun bao*. scene recognition based on recurrent memorized attention network[j]. electronics, 2020, 9(12): 2038. (sci)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>10.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>x. liao*, k. li, x. zhu, k. j. ray liu. robust detection of image operator chain with two-stream convolutional neural network[j]. ieee journal of selected topics in signal processing, 14(5): 955-968, 2020. [</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>�o�syt���w</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>top</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>g r</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>-n�yb�n:s</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>]<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>11.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>x. liao*, j. yin, m. chen, z. qin. adaptive payload distribution in multiple images steganography based on image texture features[j]. ieee transactions on dependable and secure computing, doi: 10.1109/tdsc.2020.3004708, 2020. [</span><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�oo`�[hq���w</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>top</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>g r</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, ccf</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�cp�</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>a</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>{|g r</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>]<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>12.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>x. liao*, y. yu, b. li, z. li, z. qin. a new payload partition strategy in color image steganography[j]. ieee transactions on circuits and systems for video technology, 30(3): 685-696, 2020. [</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>y�zso���wcgzg r</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>eq �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>esi</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>ؚ��_�p�p���e</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>]<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>13.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>j. yang, x. liao*. an embedding strategy on fusing multiple image features for data hiding in multiple images[j]. journal of visual communication and image representation, 71: 102822-102827, 2020. [sci, if: 2.479]<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>14.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>l. peng, x. liao*, m. chen. resampling parameter estimation via dual-filtering based convolutional neural network[j]. multimedia systems, doi: 10.1007/s00530-020-00697-y, 2020. [sci, if: 1.563]<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>15.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xiaoke hao, yongjin bao, yingchun guo, ming yu, daoqiang zhang, shannon l. risacher, andrew j. saykin, xiaohui yao, li shen, and for the alzheimer s disease neuroimaging initiative, multi-modal neuroimaging feature selection with consistent metric constraint for diagnosis of alzheimer's disease, medical image analysis, vol. 60, no. 101625, pp. 1-13, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>16.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>baiying lei, yujia zhao, zhongwei huang, xiaoke hao, feng zhoud, ahmed elazaba, jing qin, haijun lei, adaptive sparse learning using multi-template for neurodegenerative disease diagnosis, medical image analysis, vol. 61, no. 101632, pp. 1-13, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>17.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>aihua zheng, hongchao li, bo jiang, wei-shi zheng, bin luo. joint graph regularized dictionary learning and sparse ranking for multi-modal multi-shot person re-identification. pattern recognition, vol.104, 2020: 107352. (sci </span><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�n:s</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>18.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>aihua zheng, xianmin lin, jiacheng dong, wenzhong wang, jin tang, bin luo.multi-scale attention vehicle re-identification. neural computing and applications, vol.32, pp. 17489-17503, 2020. (sci </span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>�n:s</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>19.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>aihua zheng, naipeng ye, chenglong li*, xiaowang, jin tang. multi-modal foreground detection via inter- and intra-modality-consistent low-rank separation. neurocomputing, vol.371, pp. 27-38, 2020. (sci </span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>�n:s</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>20.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>cairong zhao, xinbi lv, zhang zhang, wangmeng zuo, jun wu and duoqian miao, deep fusion feature representation learning with hard mining center-triplet loss for person re-identification, ieee transactions on multimedia, 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>21.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>zhenbing zhao, hongyu qi, yincheng qi, ke zhang, yongjie zhai, wenqing zhao. detection method based on automatic visual shape clustering for pin-missing defect in transmission lines[j]. ieee transactions on instrumentation and measurement, 2020, 69(9): 6080-6091. (sci 2</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>:s</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>: 000559518800016)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>22.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>zhenbing zhao, hongyu qi, xiaoqing fan, guozhi xu, yincheng qi, yongjie zhai, ke zhang. image representation method based on relative layer entropy for insulator recognition[j]. entropy, 2020, 22, 419; doi:10.3390/e22040419. (sci 3</span><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>:s</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>: 000537222600005)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>23.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yun-peng xiao, yu-kun lai, fang-lue zhang, chunpeng li, lin gao. a survey on deep geometry learning: from a representation perspective. computational visual media, 2020, 6(2): 113-133.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>24.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>shuang liu, linlin duan, zhong zhang*, xiaozhong cao, tariq s. durrani,  multimodal ground-based remote sensing cloud classification via learning heterogeneous deep features, ieee transactions on geoscience and remote sensing, vol. 58, no. 11, pp. 7790-7800, nov. 2020<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>25.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>shuang liu, mei li, zhong zhang*, xiaozhong cao, tariq s. durrani,  ground-based cloud classification using task-based graph convolutional network, geophysical research letters, 47(5), e2020gl087338, 2020;<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>26.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>shuang liu, mei li, zhong zhang*, baihua xiao, tariq s. durrani,  multi-evidence and multi-modal fusion network for ground-based cloud recognition, remote sensing. 2020, 12(3), 464;<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>27.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>shuang liu, xiaolong hao, ronghua zhang, zhong zhang*, tariq s. durrani,  adversarial erasing attention for person re-identification in camera networks under complex environments, ieee access, vol. 8, no. 1, pp. 56469-56479, 2020;<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>28.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>tian q, sun h, ma c, et al. age estimation via selecting discriminated features and preserving geometry. ksii transactions on internet and information systems, 2020, 14(4): 1721-1737.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>29.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>tian q, zhang w, cao m, et al. moment-guided discriminative manifold correlation learning on ordinal data. acm transactions on intelligent systems and technology, 2020, 11(5): 1-18.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>30.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xiang gao, jiazheng luo, kunqian li, zexiao xie, hierarchical ransac-based rotation averaging, ieee signal processing letters, vol. 27, pp. 1874-1878, 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l1 level1 lfo12'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>31.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>qinqin zhou, bineng zhong*, xiangyuan lan, gan sun, yulun zhang, baochang zhang, rongrong ji. fine-grained spatial alignment model for person re-identification with focal triplet loss. ieee transactions on image processing, vol.29, no.6, pp.7578-7589, 2020.<o:p></o:p></span></p> <p class=af0 style='mso-outline-level:2'><a name="_toc66800765"></a><a name="_toc24098165"></a><a name="_toc532562252"></a><a name="_toc532391221"><span style='mso-bookmark:_toc532562252'><span style='mso-bookmark:_toc24098165'><span style='mso-bookmark:_toc66800765'><b style='mso-bidi-font-weight:normal'><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>�v�q rir</span></b></span></span></span></a><a name="_toc24465914"></a><a name="_toc24361022"></a><a name="_toc532562422"></a><span style='mso-bookmark:_toc532562422'><span style='mso-bookmark:_toc24361022'><span style='mso-bookmark:_toc24465914'><span style='mso-bookmark:_toc66800765'><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt; line-height:110%'>national journals</span></b></span></span></span></span><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt; line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo14'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>0uvt3t</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>h��uė</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>u��[so</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�g\n</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�qwmup</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>kb�qsohrb�r�g�tyθ<span lang=en-us style='font-size: 12.0pt;line-height:110%'>, </span><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>���{:g���r����n�vb_f[f[�b</span><span lang=en-us style='font-size: 12.0pt;line-height:110%'>, 32(7):1111-1200, 2020. (ei)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo14'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>h��uė</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>1gn�[</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>u��[so</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�s�[�e</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>r�n</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�qwmup</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>n�y�s$r rhvub�[�bq�~�v�s]_�e�o y�e�l</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, </span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>ꁨrsf[�b</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, 2020. (ei)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo14'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>h��uė</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>��e</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�g\n</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>r�n</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�sf5�</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�w�n</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>mser</span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�t</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>cnn</span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�v]_�e�sm|�e.s�vw[&{�hkm�e�l</span><span lang=en-us style='font-size: 12.0pt;line-height:110%'>, </span><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>nswst�]'yf[f[�b</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, 48 (6): 123-133, 2020. (ei)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo14'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>4.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�l�q</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�^k�</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>*, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�y�b</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>r�~]</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>. </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�w�nws�y^y�~q�~�vnol]eq�szz�w���qr�g�e�l</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>[j]. </span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>o��nf[�b</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, doi: 10.13328/j.cnki.jos.005980, 2020. [ccf</span><span style='font-size:12.0pt;line-height:110%;font-family: �[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�cp��v�qg r�c t,{</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>1]<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo14'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>5.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>zhao k, ding y, han y, fan y, alexander-bloch af, han t, jin d, liu b, lu j, song c, wang p, wang d, wang q, xu k, yang h, yao h, zheng y, yu c, zhou b, zhang x, zhou y, jiang t, zhang x, liu y. 2020. independent and reproducible hippocampal radiomic biomarkers for multisite alzheimer s disease: diagnosis, longitudinal progress and biological basis. science bulletin. 65:1103-1113.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo14'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>6.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>ѐ1rns</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�f\:_</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>_l�l</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>ğ�\</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>dlۏ</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>. </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�w�n@\�_(�os t�s�q�~�v�!j`l��n͑ƌ r</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>.</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>!j_ƌ rn�n�]zf��</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, 2020, 33(10): 867-878. (ei</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> ��v�qcgzg r</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo14'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>7.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>u�/cuq ��8lpg � _�s �t[�o �u��en �b���w �b�<span lang=en-us style='font-size:12.0pt;line-height:110%'>. </span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>5u�rɖɉ�b/g</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>[m]. </span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>-n�v5u�r�qhr>y �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo14'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>8.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>u�/cuq �_l1r� �b���w � _�� �u��en</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>. </span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>l]eqn�!csq�|!jww�v</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>ssd</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>!j�w�v��5u�~��v�pёwq�hkm</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>[j]. </span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>zf���|�~f[�b �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>2020, 15(4): 656-662. (</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>n�~f[�b</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo14'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>9.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>u�/cuq �ng�^�e �u�s ��8lpg � _�s �u��en</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>. </span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�~t</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>kl</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>ce�^�tb_�r�~_g�v</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>faster r-cnn</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>xq�wёwq�hkm�e�l</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>[j]. </span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>ؚ5u�s�b/g</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, 2020, 46(9): 3018-3026. (ei: 20204409425713)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo14'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>10.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>u�/cuq �ng�^�e �b���w �t[�o �b�<span lang=en-us style='font-size:12.0pt;line-height:110%'>. </span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>�w�n�r`&q�p_c1y�qpe�t7h,gs^a��e�l�v�~p[:w��hkm�e�l</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>[j]. </span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>5u�rꁨrs��y �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>2020, 40(10): 205-211. (ei: 20204509448008)<o:p></o:p></span></p> <p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo14'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>11.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>u�/cuq</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'> _��</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�8lpg</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>, </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>i{</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>. </span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>5u�rɖɉ�b/g�v�i�_0xvz�s�rnu\g</span><span lang=en-us style='font-size: 12.0pt;line-height:110%'>[j]. </span><span style='font-size:12.0pt; line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>5u�r�yf[n�] z</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, 2020, 36(01): 1-8.<o:p></o:p></span></p> <p class=af0 style='mso-outline-level:2'><a name="_toc66800766"></a><a name="_toc24098166"></a><a name="_toc532562253"></a><a name="_toc532391222"><span style='mso-bookmark:_toc532562253'><span style='mso-bookmark:_toc24098166'><span style='mso-bookmark:_toc66800766'><b style='mso-bidi-font-weight:normal'><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>�ve�o��</span></b></span></span></span></a><a name="_toc24465915"></a><a name="_toc24361023"></a><a name="_toc532562423"></a><span style='mso-bookmark:_toc532562423'><span style='mso-bookmark:_toc24361023'><span style='mso-bookmark:_toc24465915'><span style='mso-bookmark:_toc66800766'><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt; line-height:110%'>international conferences</span></b></span></span></span></span><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt; line-height:110%'><o:p></o:p></span></b></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>tianzhe wang, zetian jiang, junchi yan </span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>���\o�</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, clustering-aware multiple graph matching via decayed pairwise matching composition, aaai 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>shiming ge, kangkai zhang, haolin liu, et al. look one and more: distilling hybrid order relational knowledge for cross-resolution image recognition. in aaai conference on artificial intelligence (aaai) , 2020: 10845-10852.<o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>ke cheng, yifan zhang, congqi cao, lei shi, jian cheng, and hanqing lu, decoupling gcn with dropgraph module for skeleton-based action recognition, in european conference on computer vision(eccv), 2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>4.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>xu han, shanxiong chen, shiyu zhu, xiaoyu lin, fujia zhao, dingwang wang, a character detection method for ancient yi books based on connected components and regressive character segmentation, international conference on automated planning and scheduling 2020, 2</span><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-bidi-font-family: "times new roman"'>020</span><span style='font-size:12.0pt;line-height:110%; font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman";mso-bidi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-bidi-font-family: "times new roman"'>13(11)</span><span style='font-size:12.0pt;line-height: 110%;font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family: "times new roman"'>�</span><span lang=en-us style='font-size:12.0pt; line-height:110%'>55-68. <o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>5.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>yaolin tian, shanxiong chen, fujia zhao, xiaoyu lin, hailing xiong, the layout analysis of handwriting characters and the fusion of multi-style ancient books background, international conference on automated planning and scheduling 2020, 2020</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>13(11)</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>�</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>111-123.<o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>6.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>gehan hao, yang yang, xue zhou*, guanan wang, and zhen lei, horizontal flipping assisted disentangled feature learning for semi-supervised person re-identification, accepted to asia conference on computer vision (accv),2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>7.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>kexin chen, xue zhou*, chao liang and jianxiao zou, bidirectional consistency constrained template update learning for siamese trackers, accepted to ieee international conference on visual communications and image processing(vcip),2020.<o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>8.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>x. liao*, z. huang. a framework for parameters estimation of image operator chain[c]. ieee international conference on acoustics, speech and signal processing (icassp), 2787-2791, 2020. [</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>�o�syt�s�^(u�eb�gwqq_�t�r�v�ve��e0�o��</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>, ccf</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>�cp�</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>b</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>{|o��</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>]<o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>9.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>j. lin and l. dai,  x-net for single image raindrop removal, in 2020 ieee international conference on image processing (icip), abu dhabi, united arab emirates, oct. 2020, pp. 1003 1007, doi: 10/ghmtnz.<o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>10.<span style='font:7.0pt "times new roman"'>&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>hongchao li, chenglong li, xianpeng zhu, aihua zheng*, bin luo. multi-spectral vehicle re-identification: a challenge: the thirty-fourth aaai conference on artificial intelligence(aaai) (2020), pp: 11345-11353, 2020, new york, usa. (ccf</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so; mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�cp�</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>a</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>{|�ve�o��</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>)<o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>11.<span style='font:7.0pt "times new roman"'>&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>hao zhu, huaibo huang, yi li, aihua zheng and ran he*. arbitrary talking face generation via attentional audio-visual coherence learning: international joint conference on artificial intelligence (ijcai) (2020), pp:2362-2368, 2020, yokohama, japan. (ccf</span><span style='font-size:12.0pt;line-height:110%;font-family: �[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�cp�</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>a</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'>{|�ve�o��</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>)<o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>12.<span style='font:7.0pt "times new roman"'>&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>xianmin lin, shengwang peng, zhiqi ma, xiaoyi zhou and aihua zheng*. occlusion based discriminative feature mining for vehicle re-identification: international conference of pioneering computer scientists, engineers and educators. (icpcsee) (2020), pp: 246-257, 2020, springer, singapore. (ei)<o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>13.<span style='font:7.0pt "times new roman"'>&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>shu-yu chen, wanchao su, lin gao, shihong xia, hongbo fu. deepfacedrawing: deep generation of face images from sketches. acm transactions on graphics (siggraph 2020), 2020, 39(4), 72:1-72:16.<o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>14.<span style='font:7.0pt "times new roman"'>&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zedu chen, bineng zhong*, guorong li, shengping zhang, rongrong ji. siamese box adaptive network for visual tracking. 2020 ieee international conference on computer vision and pattern recognition, (cvpr) (2020), pp:6668:6677, 2020, seattle washington, usa<o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>15.<span style='font:7.0pt "times new roman"'>&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>zhihui wang, bing bai, yujun xie, tengfei xing, bineng zhong</span><span lang=en-us style='font-size:12.0pt;line-height:110%;font-family:"cambria math",serif; mso-bidi-font-family:"cambria math"'>"</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>(</span><span style='font-size: 12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:"times new roman"; mso-hansi-font-family:"times new roman"'>���\o�</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>), qinqin zhou, yiping meng, bin xu, zhichao song, pengfei xu, runbo hu, hua chai. robust and fast vehicle turn-counts at intersections via an integrated solution from detection, tracking and trajectory modeling. ai city challenge</span><span style='font-size:12.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family: "times new roman";mso-hansi-font-family:"times new roman"'> �</span><span lang=en-us style='font-size:12.0pt;line-height:110%'>cvpr 2020 workshop.<o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>16.<span style='font:7.0pt "times new roman"'>&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>guibiao liao, wei gao*, qiuping jiang, ronggang wang, ge li,  mmnet: multi-stage and multi-scale fusion network for rgb-d salient object detection , acm international conference on multimedia, seattle, wa, usa, 2020. (*corresponding author) <o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>17.<span style='font:7.0pt "times new roman"'>&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>wei gao*, lvfang tao, linjie zhou, dinghao yang, xiaoyu zhang, zixuan guo,  low-rate image compression with super-resolution learning, ieee/cvf conference on computer vision and pattern recognition (cvpr) workshops, seattle, wa, usa, 2020, pp. 154-155. <o:p></o:p></span></p> <p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>18.<span style='font:7.0pt "times new roman"'>&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt;line-height:110%'>wei gao*,  on the performance evaluation of state-of-the-art rate control algorithms for practical video coding and transmission systems , international conference on video and image processing (icvip), xi an, china, dec. 25-27, 2020.<o:p></o:p></span></p> <p class=af0 style='mso-outline-level:2'><a name="_toc66800767"></a><a name="_toc24098167"></a><a name="_toc532562254"><span style='mso-bookmark: _toc24098167'><span style='mso-bookmark:_toc66800767'><b style='mso-bidi-font-weight: normal'><span style='font-size:12.0pt;line-height:110%;font-family:�[so; mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�v�qo��</span></b></span></span></a><a name="_toc24465916"></a><a name="_toc24361024"></a><a name="_toc532562424"></a><span style='mso-bookmark:_toc532562424'><span style='mso-bookmark:_toc24361024'><span style='mso-bookmark:_toc24465916'><span style='mso-bookmark:_toc66800767'><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt; line-height:110%'>national conference</span></b></span></span></span></span><span style='mso-bookmark:_toc24465916'><span style='mso-bookmark:_toc66800767'><b style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt; line-height:110%'>s</span></b></span></span><b style='mso-bidi-font-weight: normal'><span lang=en-us style='font-size:12.0pt;line-height:110%'><o:p></o:p></span></b></p> <p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt; mso-char-indent-count:0;mso-list:l6 level1 lfo18'><![if !supportlists]><span lang=en-us style='font-size:12.0pt;line-height:110%;mso-fareast-font-family: "times new roman";mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </span></span></span><![endif]><span lang=en-us style='font-size:12.0pt; line-height:110%'>ziyi chen, chengyang ji, qin shen, wei liu, f xiao-feng qin, aiping wu, tissue-specific deconvolution of immune cell composition by integrating bulk and single-cell transcriptomes, bioinformatics, btz672, https://doi.org/10.1093/bioinformatics/btz672 <o:p></o:p></span></p> </td> </tr> </table> </div> <p class=msonormal align=left style='text-align:left;mso-pagination:widow-orphan'><span lang=en-us style='font-size:12.0pt;font-family:�[so;mso-bidi-font-family:�[so; mso-font-kerning:0pt'><o:p>&nbsp;</o:p></span></p> </div> </body> </html>
网站地图