-澳门游戏娱乐场棋牌
��<html xmlns:v="urn:schemas-microsoft-com:vml"
xmlns:o="urn:schemas-microsoft-com:office:office"
xmlns:w="urn:schemas-microsoft-com:office:word"
xmlns:m="http://schemas.microsoft.com/office/2004/12/omml"
xmlns="http://www.w3.org/tr/rec-html40">
<head>
<meta http-equiv=content-type content="text/html; charset=unicode">
<meta name=progid content=word.document>
<meta name=generator content="microsoft word 15">
<meta name=originator content="microsoft word 15">
<link rel=file-list href="2019.files/filelist.xml">
<link rel=edit-time-data href="2019.files/editdata.mso">
<!--[if !mso]>
<style>
v\:* {behavior:url(/clunwen/2019.html);}
o\:* {behavior:url(/clunwen/2019.html);}
w\:* {behavior:url(/clunwen/2019.html);}
.shape {behavior:url(/clunwen/2019.html);}
</style>
<![endif]-->
<title>2019���e</title>
<!--[if gte mso 9]><xml>
<o:documentproperties>
<o:author>cd</o:author>
<o:lastauthor>cd</o:lastauthor>
<o:revision>4</o:revision>
<o:totaltime>1</o:totaltime>
<o:created>2020-05-26t02:56:00z</o:created>
<o:lastsaved>2020-05-26t02:58:00z</o:lastsaved>
<o:pages>1</o:pages>
<o:words>15532</o:words>
<o:characters>88533</o:characters>
<o:lines>737</o:lines>
<o:paragraphs>207</o:paragraphs>
<o:characterswithspaces>103858</o:characterswithspaces>
<o:version>16.00</o:version>
</o:documentproperties>
<o:officedocumentsettings>
<o:allowpng/>
</o:officedocumentsettings>
</xml><![endif]-->
<link rel=themedata href="2019.files/themedata.thmx">
<link rel=colorschememapping href="2019.files/colorschememapping.xml">
<!--[if gte mso 9]><xml>
<w:worddocument>
<w:trackmoves>false</w:trackmoves>
<w:trackformatting/>
<w:validateagainstschemas/>
<w:saveifxmlinvalid>false</w:saveifxmlinvalid>
<w:ignoremixedcontent>false</w:ignoremixedcontent>
<w:alwaysshowplaceholdertext>false</w:alwaysshowplaceholdertext>
<w:donotpromoteqf/>
<w:lidthemeother>en-us</w:lidthemeother>
<w:lidthemeasian>zh-cn</w:lidthemeasian>
<w:lidthemecomplexscript>x-none</w:lidthemecomplexscript>
<w:compatibility>
<w:breakwrappedtables/>
<w:splitpgbreakandparamark/>
<w:usefelayout/>
</w:compatibility>
<w:browserlevel>microsoftinternetexplorer4</w:browserlevel>
<m:mathpr>
<m:mathfont m:val="cambria math"/>
<m:brkbin m:val="before"/>
<m:brkbinsub m:val="--"/>
<m:smallfrac m:val="off"/>
<m:dispdef/>
<m:lmargin m:val="0"/>
<m:rmargin m:val="0"/>
<m:defjc m:val="centergroup"/>
<m:wrapindent m:val="1440"/>
<m:intlim m:val="subsup"/>
<m:narylim m:val="undovr"/>
</m:mathpr></w:worddocument>
</xml><![endif]--><!--[if gte mso 9]><xml>
<w:latentstyles deflockedstate="false" defunhidewhenused="false"
defsemihidden="false" defqformat="false" defpriority="99"
latentstylecount="371">
<w:lsdexception locked="false" priority="0" qformat="true" name="normal"/>
<w:lsdexception locked="false" priority="9" qformat="true" name="heading 1"/>
<w:lsdexception locked="false" priority="9" semihidden="true"
unhidewhenused="true" qformat="true" name="heading 2"/>
<w:lsdexception locked="false" priority="9" semihidden="true"
unhidewhenused="true" qformat="true" name="heading 3"/>
<w:lsdexception locked="false" priority="0" semihidden="true"
unhidewhenused="true" qformat="true" name="heading 4"/>
<w:lsdexception locked="false" priority="9" semihidden="true"
unhidewhenused="true" qformat="true" name="heading 5"/>
<w:lsdexception locked="false" priority="9" semihidden="true"
unhidewhenused="true" qformat="true" name="heading 6"/>
<w:lsdexception locked="false" priority="9" semihidden="true"
unhidewhenused="true" qformat="true" name="heading 7"/>
<w:lsdexception locked="false" priority="9" semihidden="true"
unhidewhenused="true" qformat="true" name="heading 8"/>
<w:lsdexception locked="false" priority="9" semihidden="true"
unhidewhenused="true" qformat="true" name="heading 9"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="index 1"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="index 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="index 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="index 4"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="index 5"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="index 6"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="index 7"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="index 8"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="index 9"/>
<w:lsdexception locked="false" priority="39" semihidden="true"
unhidewhenused="true" name="toc 1"/>
<w:lsdexception locked="false" priority="39" semihidden="true"
unhidewhenused="true" name="toc 2"/>
<w:lsdexception locked="false" priority="39" semihidden="true"
unhidewhenused="true" name="toc 3"/>
<w:lsdexception locked="false" priority="39" semihidden="true"
unhidewhenused="true" name="toc 4"/>
<w:lsdexception locked="false" priority="39" semihidden="true"
unhidewhenused="true" name="toc 5"/>
<w:lsdexception locked="false" priority="39" semihidden="true"
unhidewhenused="true" name="toc 6"/>
<w:lsdexception locked="false" priority="39" semihidden="true"
unhidewhenused="true" name="toc 7"/>
<w:lsdexception locked="false" priority="39" semihidden="true"
unhidewhenused="true" name="toc 8"/>
<w:lsdexception locked="false" priority="39" semihidden="true"
unhidewhenused="true" name="toc 9"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
qformat="true" name="normal indent"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="footnote text"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="annotation text"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="header"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
qformat="true" name="footer"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="index heading"/>
<w:lsdexception locked="false" priority="35" semihidden="true"
unhidewhenused="true" qformat="true" name="caption"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table of figures"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="envelope address"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="envelope return"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="footnote reference"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="annotation reference"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="line number"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="page number"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="endnote reference"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="endnote text"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table of authorities"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="macro"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="toa heading"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list bullet"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list number"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list 4"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list 5"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list bullet 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list bullet 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list bullet 4"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list bullet 5"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list number 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list number 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list number 4"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list number 5"/>
<w:lsdexception locked="false" qformat="true" name="title"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="closing"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="signature"/>
<w:lsdexception locked="false" priority="1" semihidden="true"
unhidewhenused="true" name="default paragraph font"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="body text"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="body text indent"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list continue"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list continue 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list continue 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list continue 4"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="list continue 5"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="message header"/>
<w:lsdexception locked="false" priority="11" qformat="true" name="subtitle"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="salutation"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="date"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="body text first indent"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="body text first indent 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="note heading"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="body text 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="body text 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="body text indent 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="body text indent 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="block text"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
qformat="true" name="hyperlink"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="followedhyperlink"/>
<w:lsdexception locked="false" priority="22" qformat="true" name="strong"/>
<w:lsdexception locked="false" priority="20" qformat="true" name="emphasis"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="document map"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
qformat="true" name="plain text"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="e-mail signature"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="html top of form"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="html bottom of form"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="normal (web)"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="html acronym"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="html address"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="html cite"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="html code"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="html definition"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="html keyboard"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="html preformatted"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="html sample"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="html typewriter"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="html variable"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="normal table"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="annotation subject"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="no list"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="outline list 1"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="outline list 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="outline list 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table simple 1"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table simple 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table simple 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table classic 1"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table classic 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table classic 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table classic 4"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table colorful 1"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table colorful 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table colorful 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table columns 1"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table columns 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table columns 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table columns 4"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table columns 5"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table grid 1"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table grid 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table grid 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table grid 4"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table grid 5"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table grid 6"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table grid 7"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table grid 8"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table list 1"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table list 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table list 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table list 4"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table list 5"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table list 6"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table list 7"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table list 8"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table 3d effects 1"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table 3d effects 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table 3d effects 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table contemporary"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table elegant"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table professional"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table subtle 1"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table subtle 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table web 1"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table web 2"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table web 3"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="balloon text"/>
<w:lsdexception locked="false" priority="39" name="table grid"/>
<w:lsdexception locked="false" semihidden="true" unhidewhenused="true"
name="table theme"/>
<w:lsdexception locked="false" semihidden="true" name="placeholder text"/>
<w:lsdexception locked="false" priority="1" qformat="true" name="no spacing"/>
<w:lsdexception locked="false" priority="60" name="light shading"/>
<w:lsdexception locked="false" priority="61" name="light list"/>
<w:lsdexception locked="false" priority="62" name="light grid"/>
<w:lsdexception locked="false" priority="63" name="medium shading 1"/>
<w:lsdexception locked="false" priority="64" name="medium shading 2"/>
<w:lsdexception locked="false" priority="65" name="medium list 1"/>
<w:lsdexception locked="false" priority="66" name="medium list 2"/>
<w:lsdexception locked="false" priority="67" name="medium grid 1"/>
<w:lsdexception locked="false" priority="68" name="medium grid 2"/>
<w:lsdexception locked="false" priority="69" name="medium grid 3"/>
<w:lsdexception locked="false" priority="70" name="dark list"/>
<w:lsdexception locked="false" priority="71" name="colorful shading"/>
<w:lsdexception locked="false" priority="72" name="colorful list"/>
<w:lsdexception locked="false" priority="73" name="colorful grid"/>
<w:lsdexception locked="false" priority="60" name="light shading accent 1"/>
<w:lsdexception locked="false" priority="61" name="light list accent 1"/>
<w:lsdexception locked="false" priority="62" name="light grid accent 1"/>
<w:lsdexception locked="false" priority="63" name="medium shading 1 accent 1"/>
<w:lsdexception locked="false" priority="64" name="medium shading 2 accent 1"/>
<w:lsdexception locked="false" priority="65" name="medium list 1 accent 1"/>
<w:lsdexception locked="false" semihidden="true" name="revision"/>
<w:lsdexception locked="false" priority="34" qformat="true"
name="list paragraph"/>
<w:lsdexception locked="false" priority="29" qformat="true" name="quote"/>
<w:lsdexception locked="false" priority="30" qformat="true"
name="intense quote"/>
<w:lsdexception locked="false" priority="66" name="medium list 2 accent 1"/>
<w:lsdexception locked="false" priority="67" name="medium grid 1 accent 1"/>
<w:lsdexception locked="false" priority="68" name="medium grid 2 accent 1"/>
<w:lsdexception locked="false" priority="69" name="medium grid 3 accent 1"/>
<w:lsdexception locked="false" priority="70" name="dark list accent 1"/>
<w:lsdexception locked="false" priority="71" name="colorful shading accent 1"/>
<w:lsdexception locked="false" priority="72" name="colorful list accent 1"/>
<w:lsdexception locked="false" priority="73" name="colorful grid accent 1"/>
<w:lsdexception locked="false" priority="60" name="light shading accent 2"/>
<w:lsdexception locked="false" priority="61" name="light list accent 2"/>
<w:lsdexception locked="false" priority="62" name="light grid accent 2"/>
<w:lsdexception locked="false" priority="63" name="medium shading 1 accent 2"/>
<w:lsdexception locked="false" priority="64" name="medium shading 2 accent 2"/>
<w:lsdexception locked="false" priority="65" name="medium list 1 accent 2"/>
<w:lsdexception locked="false" priority="66" name="medium list 2 accent 2"/>
<w:lsdexception locked="false" priority="67" name="medium grid 1 accent 2"/>
<w:lsdexception locked="false" priority="68" name="medium grid 2 accent 2"/>
<w:lsdexception locked="false" priority="69" name="medium grid 3 accent 2"/>
<w:lsdexception locked="false" priority="70" name="dark list accent 2"/>
<w:lsdexception locked="false" priority="71" name="colorful shading accent 2"/>
<w:lsdexception locked="false" priority="72" name="colorful list accent 2"/>
<w:lsdexception locked="false" priority="73" name="colorful grid accent 2"/>
<w:lsdexception locked="false" priority="60" name="light shading accent 3"/>
<w:lsdexception locked="false" priority="61" name="light list accent 3"/>
<w:lsdexception locked="false" priority="62" name="light grid accent 3"/>
<w:lsdexception locked="false" priority="63" name="medium shading 1 accent 3"/>
<w:lsdexception locked="false" priority="64" name="medium shading 2 accent 3"/>
<w:lsdexception locked="false" priority="65" name="medium list 1 accent 3"/>
<w:lsdexception locked="false" priority="66" name="medium list 2 accent 3"/>
<w:lsdexception locked="false" priority="67" name="medium grid 1 accent 3"/>
<w:lsdexception locked="false" priority="68" name="medium grid 2 accent 3"/>
<w:lsdexception locked="false" priority="69" name="medium grid 3 accent 3"/>
<w:lsdexception locked="false" priority="70" name="dark list accent 3"/>
<w:lsdexception locked="false" priority="71" name="colorful shading accent 3"/>
<w:lsdexception locked="false" priority="72" name="colorful list accent 3"/>
<w:lsdexception locked="false" priority="73" name="colorful grid accent 3"/>
<w:lsdexception locked="false" priority="60" name="light shading accent 4"/>
<w:lsdexception locked="false" priority="61" name="light list accent 4"/>
<w:lsdexception locked="false" priority="62" name="light grid accent 4"/>
<w:lsdexception locked="false" priority="63" name="medium shading 1 accent 4"/>
<w:lsdexception locked="false" priority="64" name="medium shading 2 accent 4"/>
<w:lsdexception locked="false" priority="65" name="medium list 1 accent 4"/>
<w:lsdexception locked="false" priority="66" name="medium list 2 accent 4"/>
<w:lsdexception locked="false" priority="67" name="medium grid 1 accent 4"/>
<w:lsdexception locked="false" priority="68" name="medium grid 2 accent 4"/>
<w:lsdexception locked="false" priority="69" name="medium grid 3 accent 4"/>
<w:lsdexception locked="false" priority="70" name="dark list accent 4"/>
<w:lsdexception locked="false" priority="71" name="colorful shading accent 4"/>
<w:lsdexception locked="false" priority="72" name="colorful list accent 4"/>
<w:lsdexception locked="false" priority="73" name="colorful grid accent 4"/>
<w:lsdexception locked="false" priority="60" name="light shading accent 5"/>
<w:lsdexception locked="false" priority="61" name="light list accent 5"/>
<w:lsdexception locked="false" priority="62" name="light grid accent 5"/>
<w:lsdexception locked="false" priority="63" name="medium shading 1 accent 5"/>
<w:lsdexception locked="false" priority="64" name="medium shading 2 accent 5"/>
<w:lsdexception locked="false" priority="65" name="medium list 1 accent 5"/>
<w:lsdexception locked="false" priority="66" name="medium list 2 accent 5"/>
<w:lsdexception locked="false" priority="67" name="medium grid 1 accent 5"/>
<w:lsdexception locked="false" priority="68" name="medium grid 2 accent 5"/>
<w:lsdexception locked="false" priority="69" name="medium grid 3 accent 5"/>
<w:lsdexception locked="false" priority="70" name="dark list accent 5"/>
<w:lsdexception locked="false" priority="71" name="colorful shading accent 5"/>
<w:lsdexception locked="false" priority="72" name="colorful list accent 5"/>
<w:lsdexception locked="false" priority="73" name="colorful grid accent 5"/>
<w:lsdexception locked="false" priority="60" name="light shading accent 6"/>
<w:lsdexception locked="false" priority="61" name="light list accent 6"/>
<w:lsdexception locked="false" priority="62" name="light grid accent 6"/>
<w:lsdexception locked="false" priority="63" name="medium shading 1 accent 6"/>
<w:lsdexception locked="false" priority="64" name="medium shading 2 accent 6"/>
<w:lsdexception locked="false" priority="65" name="medium list 1 accent 6"/>
<w:lsdexception locked="false" priority="66" name="medium list 2 accent 6"/>
<w:lsdexception locked="false" priority="67" name="medium grid 1 accent 6"/>
<w:lsdexception locked="false" priority="68" name="medium grid 2 accent 6"/>
<w:lsdexception locked="false" priority="69" name="medium grid 3 accent 6"/>
<w:lsdexception locked="false" priority="70" name="dark list accent 6"/>
<w:lsdexception locked="false" priority="71" name="colorful shading accent 6"/>
<w:lsdexception locked="false" priority="72" name="colorful list accent 6"/>
<w:lsdexception locked="false" priority="73" name="colorful grid accent 6"/>
<w:lsdexception locked="false" priority="19" qformat="true"
name="subtle emphasis"/>
<w:lsdexception locked="false" priority="21" qformat="true"
name="intense emphasis"/>
<w:lsdexception locked="false" priority="31" qformat="true"
name="subtle reference"/>
<w:lsdexception locked="false" priority="32" qformat="true"
name="intense reference"/>
<w:lsdexception locked="false" priority="33" qformat="true" name="book title"/>
<w:lsdexception locked="false" priority="37" semihidden="true"
unhidewhenused="true" name="bibliography"/>
<w:lsdexception locked="false" priority="39" semihidden="true"
unhidewhenused="true" qformat="true" name="toc heading"/>
<w:lsdexception locked="false" priority="41" name="plain table 1"/>
<w:lsdexception locked="false" priority="42" name="plain table 2"/>
<w:lsdexception locked="false" priority="43" name="plain table 3"/>
<w:lsdexception locked="false" priority="44" name="plain table 4"/>
<w:lsdexception locked="false" priority="45" name="plain table 5"/>
<w:lsdexception locked="false" priority="40" name="grid table light"/>
<w:lsdexception locked="false" priority="46" name="grid table 1 light"/>
<w:lsdexception locked="false" priority="47" name="grid table 2"/>
<w:lsdexception locked="false" priority="48" name="grid table 3"/>
<w:lsdexception locked="false" priority="49" name="grid table 4"/>
<w:lsdexception locked="false" priority="50" name="grid table 5 dark"/>
<w:lsdexception locked="false" priority="51" name="grid table 6 colorful"/>
<w:lsdexception locked="false" priority="52" name="grid table 7 colorful"/>
<w:lsdexception locked="false" priority="46"
name="grid table 1 light accent 1"/>
<w:lsdexception locked="false" priority="47" name="grid table 2 accent 1"/>
<w:lsdexception locked="false" priority="48" name="grid table 3 accent 1"/>
<w:lsdexception locked="false" priority="49" name="grid table 4 accent 1"/>
<w:lsdexception locked="false" priority="50" name="grid table 5 dark accent 1"/>
<w:lsdexception locked="false" priority="51"
name="grid table 6 colorful accent 1"/>
<w:lsdexception locked="false" priority="52"
name="grid table 7 colorful accent 1"/>
<w:lsdexception locked="false" priority="46"
name="grid table 1 light accent 2"/>
<w:lsdexception locked="false" priority="47" name="grid table 2 accent 2"/>
<w:lsdexception locked="false" priority="48" name="grid table 3 accent 2"/>
<w:lsdexception locked="false" priority="49" name="grid table 4 accent 2"/>
<w:lsdexception locked="false" priority="50" name="grid table 5 dark accent 2"/>
<w:lsdexception locked="false" priority="51"
name="grid table 6 colorful accent 2"/>
<w:lsdexception locked="false" priority="52"
name="grid table 7 colorful accent 2"/>
<w:lsdexception locked="false" priority="46"
name="grid table 1 light accent 3"/>
<w:lsdexception locked="false" priority="47" name="grid table 2 accent 3"/>
<w:lsdexception locked="false" priority="48" name="grid table 3 accent 3"/>
<w:lsdexception locked="false" priority="49" name="grid table 4 accent 3"/>
<w:lsdexception locked="false" priority="50" name="grid table 5 dark accent 3"/>
<w:lsdexception locked="false" priority="51"
name="grid table 6 colorful accent 3"/>
<w:lsdexception locked="false" priority="52"
name="grid table 7 colorful accent 3"/>
<w:lsdexception locked="false" priority="46"
name="grid table 1 light accent 4"/>
<w:lsdexception locked="false" priority="47" name="grid table 2 accent 4"/>
<w:lsdexception locked="false" priority="48" name="grid table 3 accent 4"/>
<w:lsdexception locked="false" priority="49" name="grid table 4 accent 4"/>
<w:lsdexception locked="false" priority="50" name="grid table 5 dark accent 4"/>
<w:lsdexception locked="false" priority="51"
name="grid table 6 colorful accent 4"/>
<w:lsdexception locked="false" priority="52"
name="grid table 7 colorful accent 4"/>
<w:lsdexception locked="false" priority="46"
name="grid table 1 light accent 5"/>
<w:lsdexception locked="false" priority="47" name="grid table 2 accent 5"/>
<w:lsdexception locked="false" priority="48" name="grid table 3 accent 5"/>
<w:lsdexception locked="false" priority="49" name="grid table 4 accent 5"/>
<w:lsdexception locked="false" priority="50" name="grid table 5 dark accent 5"/>
<w:lsdexception locked="false" priority="51"
name="grid table 6 colorful accent 5"/>
<w:lsdexception locked="false" priority="52"
name="grid table 7 colorful accent 5"/>
<w:lsdexception locked="false" priority="46"
name="grid table 1 light accent 6"/>
<w:lsdexception locked="false" priority="47" name="grid table 2 accent 6"/>
<w:lsdexception locked="false" priority="48" name="grid table 3 accent 6"/>
<w:lsdexception locked="false" priority="49" name="grid table 4 accent 6"/>
<w:lsdexception locked="false" priority="50" name="grid table 5 dark accent 6"/>
<w:lsdexception locked="false" priority="51"
name="grid table 6 colorful accent 6"/>
<w:lsdexception locked="false" priority="52"
name="grid table 7 colorful accent 6"/>
<w:lsdexception locked="false" priority="46" name="list table 1 light"/>
<w:lsdexception locked="false" priority="47" name="list table 2"/>
<w:lsdexception locked="false" priority="48" name="list table 3"/>
<w:lsdexception locked="false" priority="49" name="list table 4"/>
<w:lsdexception locked="false" priority="50" name="list table 5 dark"/>
<w:lsdexception locked="false" priority="51" name="list table 6 colorful"/>
<w:lsdexception locked="false" priority="52" name="list table 7 colorful"/>
<w:lsdexception locked="false" priority="46"
name="list table 1 light accent 1"/>
<w:lsdexception locked="false" priority="47" name="list table 2 accent 1"/>
<w:lsdexception locked="false" priority="48" name="list table 3 accent 1"/>
<w:lsdexception locked="false" priority="49" name="list table 4 accent 1"/>
<w:lsdexception locked="false" priority="50" name="list table 5 dark accent 1"/>
<w:lsdexception locked="false" priority="51"
name="list table 6 colorful accent 1"/>
<w:lsdexception locked="false" priority="52"
name="list table 7 colorful accent 1"/>
<w:lsdexception locked="false" priority="46"
name="list table 1 light accent 2"/>
<w:lsdexception locked="false" priority="47" name="list table 2 accent 2"/>
<w:lsdexception locked="false" priority="48" name="list table 3 accent 2"/>
<w:lsdexception locked="false" priority="49" name="list table 4 accent 2"/>
<w:lsdexception locked="false" priority="50" name="list table 5 dark accent 2"/>
<w:lsdexception locked="false" priority="51"
name="list table 6 colorful accent 2"/>
<w:lsdexception locked="false" priority="52"
name="list table 7 colorful accent 2"/>
<w:lsdexception locked="false" priority="46"
name="list table 1 light accent 3"/>
<w:lsdexception locked="false" priority="47" name="list table 2 accent 3"/>
<w:lsdexception locked="false" priority="48" name="list table 3 accent 3"/>
<w:lsdexception locked="false" priority="49" name="list table 4 accent 3"/>
<w:lsdexception locked="false" priority="50" name="list table 5 dark accent 3"/>
<w:lsdexception locked="false" priority="51"
name="list table 6 colorful accent 3"/>
<w:lsdexception locked="false" priority="52"
name="list table 7 colorful accent 3"/>
<w:lsdexception locked="false" priority="46"
name="list table 1 light accent 4"/>
<w:lsdexception locked="false" priority="47" name="list table 2 accent 4"/>
<w:lsdexception locked="false" priority="48" name="list table 3 accent 4"/>
<w:lsdexception locked="false" priority="49" name="list table 4 accent 4"/>
<w:lsdexception locked="false" priority="50" name="list table 5 dark accent 4"/>
<w:lsdexception locked="false" priority="51"
name="list table 6 colorful accent 4"/>
<w:lsdexception locked="false" priority="52"
name="list table 7 colorful accent 4"/>
<w:lsdexception locked="false" priority="46"
name="list table 1 light accent 5"/>
<w:lsdexception locked="false" priority="47" name="list table 2 accent 5"/>
<w:lsdexception locked="false" priority="48" name="list table 3 accent 5"/>
<w:lsdexception locked="false" priority="49" name="list table 4 accent 5"/>
<w:lsdexception locked="false" priority="50" name="list table 5 dark accent 5"/>
<w:lsdexception locked="false" priority="51"
name="list table 6 colorful accent 5"/>
<w:lsdexception locked="false" priority="52"
name="list table 7 colorful accent 5"/>
<w:lsdexception locked="false" priority="46"
name="list table 1 light accent 6"/>
<w:lsdexception locked="false" priority="47" name="list table 2 accent 6"/>
<w:lsdexception locked="false" priority="48" name="list table 3 accent 6"/>
<w:lsdexception locked="false" priority="49" name="list table 4 accent 6"/>
<w:lsdexception locked="false" priority="50" name="list table 5 dark accent 6"/>
<w:lsdexception locked="false" priority="51"
name="list table 6 colorful accent 6"/>
<w:lsdexception locked="false" priority="52"
name="list table 7 colorful accent 6"/>
</w:latentstyles>
</xml><![endif]-->
<style>
<!--
/* font definitions */
@font-face
{font-family:�[so;
panose-1:2 1 6 0 3 1 1 1 1 1;
mso-font-alt:simsun;
mso-font-charset:134;
mso-generic-font-family:auto;
mso-font-pitch:variable;
mso-font-signature:3 680460288 22 0 262145 0;}
@font-face
{font-family:ўso;
panose-1:2 1 6 9 6 1 1 1 1 1;
mso-font-alt:simhei;
mso-font-charset:134;
mso-generic-font-family:modern;
mso-font-pitch:fixed;
mso-font-signature:-2147482945 953122042 22 0 262145 0;}
@font-face
{font-family:"cambria math";
panose-1:2 4 5 3 5 4 6 3 2 4;
mso-font-charset:0;
mso-generic-font-family:roman;
mso-font-pitch:variable;
mso-font-signature:-536869121 1107305727 33554432 0 415 0;}
@font-face
{font-family:i{�~;
panose-1:2 1 6 0 3 1 1 1 1 1;
mso-font-alt:dengxian;
mso-font-charset:134;
mso-generic-font-family:auto;
mso-font-pitch:variable;
mso-font-signature:-1610612033 953122042 22 0 262159 0;}
@font-face
{font-family:"arial unicode ms";
panose-1:2 11 6 4 2 2 2 2 2 4;
mso-font-charset:134;
mso-generic-font-family:swiss;
mso-font-pitch:variable;
mso-font-signature:0 -371195905 63 0 4129279 0;}
@font-face
{font-family:cambria;
panose-1:2 4 5 3 5 4 6 3 2 4;
mso-font-charset:0;
mso-generic-font-family:roman;
mso-font-pitch:variable;
mso-font-signature:-536869121 1107305727 33554432 0 415 0;}
@font-face
{font-family:verdana;
panose-1:2 11 6 4 3 5 4 4 2 4;
mso-font-charset:0;
mso-generic-font-family:swiss;
mso-font-pitch:variable;
mso-font-signature:-1610610945 1073750107 16 0 415 0;}
@font-face
{font-family:�n�[_gb2312;
panose-1:2 1 6 9 3 1 1 1 1 1;
mso-font-charset:134;
mso-generic-font-family:modern;
mso-font-pitch:fixed;
mso-font-signature:1 135135232 16 0 262144 0;}
@font-face
{font-family:calibri;
panose-1:2 15 5 2 2 2 4 3 2 4;
mso-font-charset:0;
mso-generic-font-family:swiss;
mso-font-pitch:variable;
mso-font-signature:-536859905 -1073732485 9 0 511 0;}
@font-face
{font-family:cmbx9;
mso-font-alt:cambria;
mso-font-charset:0;
mso-generic-font-family:roman;
mso-font-pitch:auto;
mso-font-signature:0 0 0 0 0 0;}
@font-face
{font-family:"\@�[so";
panose-1:2 1 6 0 3 1 1 1 1 1;
mso-font-charset:134;
mso-generic-font-family:auto;
mso-font-pitch:variable;
mso-font-signature:3 680460288 22 0 262145 0;}
@font-face
{font-family:"\@i{�~";
panose-1:2 1 6 0 3 1 1 1 1 1;
mso-font-charset:134;
mso-generic-font-family:auto;
mso-font-pitch:variable;
mso-font-signature:-1610612033 953122042 22 0 262159 0;}
@font-face
{font-family:"\@ўso";
panose-1:2 1 6 0 3 1 1 1 1 1;
mso-font-charset:134;
mso-generic-font-family:modern;
mso-font-pitch:fixed;
mso-font-signature:-2147482945 953122042 22 0 262145 0;}
@font-face
{font-family:"\@�n�[_gb2312";
mso-font-charset:134;
mso-generic-font-family:modern;
mso-font-pitch:fixed;
mso-font-signature:1 135135232 16 0 262144 0;}
@font-face
{font-family:"\@arial unicode ms";
panose-1:2 11 6 4 2 2 2 2 2 4;
mso-font-charset:134;
mso-generic-font-family:swiss;
mso-font-pitch:variable;
mso-font-signature:0 -371195905 63 0 4129279 0;}
/* style definitions */
p.msonormal, li.msonormal, div.msonormal
{mso-style-unhide:no;
mso-style-qformat:yes;
mso-style-parent:"";
margin:0cm;
margin-bottom:.0001pt;
text-align:justify;
text-justify:inter-ideograph;
mso-pagination:none;
font-size:10.5pt;
mso-bidi-font-size:11.0pt;
font-family:i{�~;
mso-ascii-font-family:i{�~;
mso-ascii-theme-font:minor-latin;
mso-fareast-font-family:i{�~;
mso-fareast-theme-font:minor-fareast;
mso-hansi-font-family:i{�~;
mso-hansi-theme-font:minor-latin;
mso-bidi-font-family:"times new roman";
mso-bidi-theme-font:minor-bidi;
mso-font-kerning:1.0pt;}
h1
{mso-style-priority:9;
mso-style-unhide:no;
mso-style-qformat:yes;
mso-style-link:"h�� 1 w[&{";
mso-style-next:ck�e;
margin-top:17.0pt;
margin-right:0cm;
margin-bottom:16.5pt;
margin-left:0cm;
text-align:justify;
text-justify:inter-ideograph;
line-height:240%;
mso-pagination:lines-together;
page-break-after:avoid;
mso-outline-level:1;
font-size:22.0pt;
font-family:i{�~;
mso-ascii-font-family:i{�~;
mso-ascii-theme-font:minor-latin;
mso-fareast-font-family:i{�~;
mso-fareast-theme-font:minor-fareast;
mso-hansi-font-family:i{�~;
mso-hansi-theme-font:minor-latin;
mso-bidi-font-family:"times new roman";
mso-bidi-theme-font:minor-bidi;
mso-font-kerning:22.0pt;
font-weight:bold;}
h2
{mso-style-noshow:yes;
mso-style-priority:9;
mso-style-qformat:yes;
mso-style-link:"h�� 2 w[&{";
mso-style-next:ck�e;
margin-top:13.0pt;
margin-right:0cm;
margin-bottom:13.0pt;
margin-left:0cm;
text-align:justify;
text-justify:inter-ideograph;
line-height:172%;
mso-pagination:lines-together;
page-break-after:avoid;
mso-outline-level:2;
font-size:16.0pt;
font-family:"i{�~ light";
mso-ascii-font-family:"i{�~ light";
mso-ascii-theme-font:major-latin;
mso-fareast-font-family:"i{�~ light";
mso-fareast-theme-font:major-fareast;
mso-hansi-font-family:"i{�~ light";
mso-hansi-theme-font:major-latin;
mso-bidi-font-family:"times new roman";
mso-bidi-theme-font:major-bidi;
mso-font-kerning:1.0pt;
font-weight:bold;}
h3
{mso-style-noshow:yes;
mso-style-priority:9;
mso-style-qformat:yes;
mso-style-link:"h�� 3 w[&{";
mso-style-next:ck�e;
margin-top:13.0pt;
margin-right:0cm;
margin-bottom:13.0pt;
margin-left:0cm;
text-align:justify;
text-justify:inter-ideograph;
line-height:172%;
mso-pagination:lines-together;
page-break-after:avoid;
mso-outline-level:3;
font-size:16.0pt;
font-family:i{�~;
mso-ascii-font-family:i{�~;
mso-ascii-theme-font:minor-latin;
mso-fareast-font-family:i{�~;
mso-fareast-theme-font:minor-fareast;
mso-hansi-font-family:i{�~;
mso-hansi-theme-font:minor-latin;
mso-bidi-font-family:"times new roman";
mso-bidi-theme-font:minor-bidi;
mso-font-kerning:1.0pt;
font-weight:bold;}
h4
{mso-style-noshow:yes;
mso-style-qformat:yes;
mso-style-link:"h�� 4 w[&{1";
mso-style-next:ck�e;
margin-top:14.0pt;
margin-right:0cm;
margin-bottom:14.5pt;
margin-left:0cm;
text-align:justify;
text-justify:inter-ideograph;
line-height:155%;
mso-pagination:lines-together;
page-break-after:avoid;
mso-outline-level:4;
font-size:14.0pt;
font-family:"cambria",serif;
mso-fareast-font-family:�[so;
mso-bidi-font-family:"times new roman";
mso-font-kerning:1.0pt;
font-weight:bold;}
h6
{mso-style-noshow:yes;
mso-style-priority:9;
mso-style-qformat:yes;
mso-style-link:"h�� 6 w[&{";
mso-style-next:ck�e;
margin-top:12.0pt;
margin-right:0cm;
margin-bottom:3.2pt;
margin-left:0cm;
text-align:justify;
text-justify:inter-ideograph;
line-height:132%;
mso-pagination:lines-together;
page-break-after:avoid;
mso-outline-level:6;
font-size:12.0pt;
font-family:"i{�~ light";
mso-ascii-font-family:"i{�~ light";
mso-ascii-theme-font:major-latin;
mso-fareast-font-family:"i{�~ light";
mso-fareast-theme-font:major-fareast;
mso-hansi-font-family:"i{�~ light";
mso-hansi-theme-font:major-latin;
mso-bidi-font-family:"times new roman";
mso-bidi-theme-font:major-bidi;
mso-font-kerning:1.0pt;
font-weight:bold;}
p.msotoc1, li.msotoc1, div.msotoc1
{mso-style-update:auto;
mso-style-noshow:yes;
mso-style-priority:39;
mso-style-next:ck�e;
margin:0cm;
margin-bottom:.0001pt;
text-align:justify;
text-justify:inter-ideograph;
mso-pagination:none;
font-size:10.5pt;
mso-bidi-font-size:11.0pt;
font-family:i{�~;
mso-ascii-font-family:i{�~;
mso-ascii-theme-font:minor-latin;
mso-fareast-font-family:i{�~;
mso-fareast-theme-font:minor-fareast;
mso-hansi-font-family:i{�~;
mso-hansi-theme-font:minor-latin;
mso-bidi-font-family:"times new roman";
mso-bidi-theme-font:minor-bidi;
mso-font-kerning:1.0pt;}
p.msotoc2, li.msotoc2, div.msotoc2
{mso-style-update:auto;
mso-style-noshow:yes;
mso-style-priority:39;
mso-style-next:ck�e;
margin-top:0cm;
margin-right:0cm;
margin-bottom:0cm;
margin-left:21.0pt;
margin-bottom:.0001pt;
mso-para-margin-top:0cm;
mso-para-margin-right:0cm;
mso-para-margin-bottom:0cm;
mso-para-margin-left:2.0gd;
mso-para-margin-bottom:.0001pt;
text-align:justify;
text-justify:inter-ideograph;
mso-pagination:none;
font-size:10.5pt;
mso-bidi-font-size:11.0pt;
font-family:i{�~;
mso-ascii-font-family:i{�~;
mso-ascii-theme-font:minor-latin;
mso-fareast-font-family:i{�~;
mso-fareast-theme-font:minor-fareast;
mso-hansi-font-family:i{�~;
mso-hansi-theme-font:minor-latin;
mso-bidi-font-family:"times new roman";
mso-bidi-theme-font:minor-bidi;
mso-font-kerning:1.0pt;}
p.msotoc3, li.msotoc3, div.msotoc3
{mso-style-update:auto;
mso-style-noshow:yes;
mso-style-priority:39;
mso-style-next:ck�e;
margin-top:0cm;
margin-right:0cm;
margin-bottom:0cm;
margin-left:42.0pt;
margin-bottom:.0001pt;
mso-para-margin-top:0cm;
mso-para-margin-right:0cm;
mso-para-margin-bottom:0cm;
mso-para-margin-left:4.0gd;
mso-para-margin-bottom:.0001pt;
text-align:justify;
text-justify:inter-ideograph;
mso-pagination:none;
font-size:10.5pt;
mso-bidi-font-size:11.0pt;
font-family:i{�~;
mso-ascii-font-family:i{�~;
mso-ascii-theme-font:minor-latin;
mso-fareast-font-family:i{�~;
mso-fareast-theme-font:minor-fareast;
mso-hansi-font-family:i{�~;
mso-hansi-theme-font:minor-latin;
mso-bidi-font-family:"times new roman";
mso-bidi-theme-font:minor-bidi;
mso-font-kerning:1.0pt;}
p.msonormalindent, li.msonormalindent, div.msonormalindent
{mso-style-name:"ck�e)ۏ\,ck�e���l�)ۏ$nw[ �";
mso-style-noshow:yes;
mso-style-priority:99;
mso-style-qformat:yes;
margin:0cm;
margin-bottom:.0001pt;
text-align:justify;
text-justify:inter-ideograph;
text-indent:21.0pt;
line-height:20.0pt;
mso-line-height-rule:exactly;
mso-pagination:none;
font-size:12.0pt;
mso-bidi-font-size:10.0pt;
font-family:�[so;
mso-hansi-font-family:"times new roman";
mso-bidi-font-family:"times new roman";
mso-font-kerning:1.0pt;}
p.msocommenttext, li.msocommenttext, div.msocommenttext
{mso-style-noshow:yes;
mso-style-priority:99;
mso-style-link:"yb�l�ew[ w[&{";
margin:0cm;
margin-bottom:.0001pt;
mso-pagination:none;
font-size:10.5pt;
mso-bidi-font-size:11.0pt;
font-family:i{�~;
mso-ascii-font-family:i{�~;
mso-ascii-theme-font:minor-latin;
mso-fareast-font-family:i{�~;
mso-fareast-theme-font:minor-fareast;
mso-hansi-font-family:i{�~;
mso-hansi-theme-font:minor-latin;
mso-bidi-font-family:"times new roman";
mso-bidi-theme-font:minor-bidi;
mso-font-kerning:1.0pt;}
p.msoheader, li.msoheader, div.msoheader
{mso-style-priority:99;
mso-style-link:"u� w w[&{";
margin:0cm;
margin-bottom:.0001pt;
text-align:center;
mso-pagination:none;
tab-stops:center 207.65pt right 415.3pt;
layout-grid-mode:char;
border:none;
mso-border-bottom-alt:solid windowtext .75pt;
padding:0cm;
mso-padding-alt:0cm 0cm 1.0pt 0cm;
font-size:9.0pt;
font-family:i{�~;
mso-ascii-font-family:i{�~;
mso-ascii-theme-font:minor-latin;
mso-fareast-font-family:i{�~;
mso-fareast-theme-font:minor-fareast;
mso-hansi-font-family:i{�~;
mso-hansi-theme-font:minor-latin;
mso-bidi-font-family:"times new roman";
mso-bidi-theme-font:minor-bidi;
mso-font-kerning:1.0pt;}
p.msofooter, li.msofooter, div.msofooter
{mso-style-priority:99;
mso-style-qformat:yes;
mso-style-link:"u�� w[&{";
margin:0cm;
margin-bottom:.0001pt;
mso-pagination:none;
tab-stops:center 207.65pt right 415.3pt;
layout-grid-mode:char;
font-size:9.0pt;
font-family:i{�~;
mso-ascii-font-family:i{�~;
mso-ascii-theme-font:minor-latin;
mso-fareast-font-family:i{�~;
mso-fareast-theme-font:minor-fareast;
mso-hansi-font-family:i{�~;
mso-hansi-theme-font:minor-latin;
mso-bidi-font-family:"times new roman";
mso-bidi-theme-font:minor-bidi;
mso-font-kerning:1.0pt;}
p.msocaption, li.msocaption, div.msocaption
{mso-style-noshow:yes;
mso-style-priority:35;
mso-style-qformat:yes;
mso-style-next:ck�e;
margin:0cm;
margin-bottom:.0001pt;
text-align:justify;
text-justify:inter-ideograph;
mso-pagination:none;
font-size:10.0pt;
font-family:"i{�~ light";
mso-ascii-font-family:"i{�~ light";
mso-ascii-theme-font:major-latin;
mso-fareast-font-family:ўso;
mso-hansi-font-family:"i{�~ light";
mso-hansi-theme-font:major-latin;
mso-bidi-font-family:"times new roman";
mso-bidi-theme-font:major-bidi;
mso-font-kerning:1.0pt;}
span.msocommentreference
{mso-style-noshow:yes;
mso-style-priority:99;
mso-ansi-font-size:10.5pt;
mso-bidi-font-size:10.5pt;}
p.msotitle, li.msotitle, div.msotitle
{mso-style-priority:99;
mso-style-unhide:no;
mso-style-qformat:yes;
mso-style-link:"h�� w[&{1";
margin:0cm;
margin-bottom:.0001pt;
text-align:center;
line-height:15.6pt;
mso-pagination:none;
mso-layout-grid-align:none;
font-size:14.0pt;
mso-bidi-font-size:10.0pt;
font-family:"times new roman",serif;
mso-fareast-font-family:�[so;
font-weight:bold;
mso-bidi-font-weight:normal;}
p.msobodytext, li.msobodytext, div.msobodytext
{mso-style-noshow:yes;
mso-style-priority:99;
mso-style-link:"ck�e�e,g w[&{";
margin-top:0cm;
margin-right:0cm;
margin-bottom:6.0pt;
margin-left:0cm;
text-align:justify;
text-justify:inter-ideograph;
mso-pagination:none;
font-size:10.5pt;
mso-bidi-font-size:11.0pt;
font-family:i{�~;
mso-ascii-font-family:i{�~;
mso-ascii-theme-font:minor-latin;
mso-fareast-font-family:i{�~;
mso-fareast-theme-font:minor-fareast;
mso-hansi-font-family:i{�~;
mso-hansi-theme-font:minor-latin;
mso-bidi-font-family:"times new roman";
mso-bidi-theme-font:minor-bidi;
mso-font-kerning:1.0pt;}
p.msodate, li.msodate, div.msodate
{mso-style-noshow:yes;
mso-style-priority:99;
mso-style-link:"�eg w[&{";
mso-style-next:ck�e;
margin-top:0cm;
margin-right:0cm;
margin-bottom:0cm;
margin-left:5.0pt;
margin-bottom:.0001pt;
mso-para-margin-top:0cm;
mso-para-margin-right:0cm;
mso-para-margin-bottom:0cm;
mso-para-margin-left:25.0gd;
mso-para-margin-bottom:.0001pt;
text-align:justify;
text-justify:inter-ideograph;
mso-pagination:none;
font-size:10.5pt;
mso-bidi-font-size:11.0pt;
font-family:i{�~;
mso-ascii-font-family:i{�~;
mso-ascii-theme-font:minor-latin;
mso-fareast-font-family:i{�~;
mso-fareast-theme-font:minor-fareast;
mso-hansi-font-family:i{�~;
mso-hansi-theme-font:minor-latin;
mso-bidi-font-family:"times new roman";
mso-bidi-theme-font:minor-bidi;
mso-font-kerning:1.0pt;}
p.msobodytext3, li.msobodytext3, div.msobodytext3
{mso-style-noshow:yes;
mso-style-priority:99;
mso-style-link:"ck�e�e,g 3 w[&{";
margin:0cm;
margin-bottom:.0001pt;
text-align:center;
mso-pagination:none;
font-size:9.0pt;
mso-bidi-font-size:10.0pt;
font-family:"times new roman",serif;
mso-fareast-font-family:�[so;
mso-font-kerning:1.0pt;}
a:link, span.msohyperlink
{mso-style-noshow:yes;
mso-style-priority:99;
mso-style-qformat:yes;
color:#0563c1;
mso-themecolor:hyperlink;
text-decoration:underline;
text-underline:single;}
a:visited, span.msohyperlinkfollowed
{mso-style-noshow:yes;
mso-style-priority:99;
color:#954f72;
mso-themecolor:followedhyperlink;
text-decoration:underline;
text-underline:single;}
p.msoplaintext, li.msoplaintext, div.msoplaintext
{mso-style-name:"�~�e,g\,nf��ew[";
mso-style-noshow:yes;
mso-style-priority:99;
mso-style-qformat:yes;
mso-style-link:"�~�e,g w[&{1\,nf��ew[ w[&{";
margin:0cm;
margin-bottom:.0001pt;
text-align:justify;
text-justify:inter-ideograph;
mso-pagination:none;
font-size:10.5pt;
mso-bidi-font-size:10.0pt;
font-family:�[so;
mso-hansi-font-family:"courier new";
mso-bidi-font-family:"times new roman";
mso-font-kerning:1.0pt;}
p
{mso-style-noshow:yes;
mso-style-priority:99;
mso-margin-top-alt:auto;
margin-right:0cm;
mso-margin-bottom-alt:auto;
margin-left:0cm;
mso-pagination:widow-orphan;
font-size:12.0pt;
font-family:�[so;
mso-bidi-font-family:�[so;}
pre
{mso-style-noshow:yes;
mso-style-priority:99;
mso-style-link:"html ����
</style>
<!--[if gte mso 10]>
<style>
/* style definitions */
table.msonormaltable
{mso-style-name:nf�h�
<![endif]--><!--[if gte mso 9]><xml>
<o:shapedefaults v:ext="edit" spidmax="2049"/>
</xml><![endif]--><!--[if gte mso 9]><xml>
<o:shapelayout v:ext="edit">
<o:idmap v:ext="edit" data="1"/>
</o:shapelayout></xml><![endif]-->
</head>
<body lang=zh-cn link="#0563c1" vlink="#954f72" style='tab-interval:21.0pt'>
<div class=wordsection1>
<p class=msonormal align=center style='text-align:center;mso-pagination:widow-orphan'><span
lang=en-us style='font-size:12.0pt;font-family:�[so;mso-bidi-font-family:�[so;
mso-font-kerning:0pt;mso-no-proof:yes'><img width=1002 height=120
id="_x0000_i1025" src="../nlpr.jpg"></span><span lang=en-us style='font-size:
12.0pt;font-family:�[so;mso-bidi-font-family:�[so;mso-font-kerning:0pt'><o:p></o:p></span></p>
<div align=center>
<table class=msonormaltable border=1 cellspacing=0 cellpadding=0 width=1117
style='width:838.0pt;border-collapse:collapse;border:none;mso-border-alt:solid windowtext .5pt;
mso-yfti-tbllook:1184;mso-padding-alt:0cm 0cm 0cm 0cm'>
<colgroup><col width="1117" style="width: 838pt"></colgroup>
<tr style='mso-yfti-irow:0;mso-yfti-firstrow:yes;height:15.0pt'>
<td width=1117 style='width:838.0pt;border:solid windowtext 1.0pt;border-bottom:
none;mso-border-top-alt:solid windowtext .5pt;mso-border-left-alt:solid windowtext .5pt;
mso-border-right-alt:solid windowtext .5pt;padding:.75pt .75pt 0cm .75pt;
height:15.0pt'>
<p class=msonormal align=center style='text-align:center;mso-pagination:widow-orphan'><b><span
lang=en-us style='mso-bidi-font-size:10.5pt;font-family:"times new roman",serif;
mso-fareast-font-family:�[so;color:black;mso-font-kerning:0pt'>2019</span></b><b><span
style='mso-bidi-font-size:10.5pt;font-family:�[so;mso-bidi-font-family:"times new roman";
color:black;mso-font-kerning:0pt'>���e�vu_</span></b><b><span lang=en-us
style='mso-bidi-font-size:10.5pt;font-family:"times new roman",serif;
mso-fareast-font-family:�[so;color:black;mso-font-kerning:0pt'> <o:p></o:p></span></b></p>
</td>
</tr>
<tr style='mso-yfti-irow:1;height:15.0pt'>
<td width=1117 style='width:838.0pt;border-top:none;border-left:solid windowtext 1.0pt;
border-bottom:none;border-right:solid windowtext 1.0pt;mso-border-left-alt:
solid windowtext .5pt;mso-border-right-alt:solid windowtext .5pt;padding:
.75pt .75pt 0cm .75pt;height:15.0pt'>
<p class=msonormal align=center style='text-align:center;mso-pagination:widow-orphan'><b><span
lang=en-us style='mso-bidi-font-size:10.5pt;font-family:"times new roman",serif;
mso-fareast-font-family:�[so;color:black;mso-font-kerning:0pt'> list of
publications<o:p></o:p></span></b></p>
</td>
</tr>
<tr style='mso-yfti-irow:2;mso-yfti-lastrow:yes;height:15.0pt'>
<td width=1117 style='width:838.0pt;border:solid windowtext 1.0pt;border-top:
none;mso-border-left-alt:solid windowtext .5pt;mso-border-bottom-alt:solid windowtext .5pt;
mso-border-right-alt:solid windowtext .5pt;padding:.75pt .75pt 0cm .75pt;
height:15.0pt'>
<p class=msonormal style='mso-pagination:widow-orphan'><span lang=en-us
style='mso-bidi-font-size:10.5pt;font-family:"times new roman",serif;
mso-fareast-font-family:�[so;color:black;mso-font-kerning:0pt'> <o:p></o:p></span></p>
<p class=af0 style='mso-outline-level:2'><a name="_toc40189707"
id="_toc532562395"></a><a name="_toc24098141" id="_toc532562225"></a><a
name="_toc532562225" id="_toc532391194"></a><a name="_toc532391194"><span
style='mso-bookmark:_toc532562225'><span style='mso-bookmark:_toc24098141'><span
style='mso-bookmark:_toc40189707'><b style='mso-bidi-font-weight:normal'><span
style='font-size:12.0pt;mso-bidi-font-size:11.0pt;line-height:110%;
font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�n �w�</span></b></span></span></span></a><a
name="_toc24465890"></a><span style='mso-bookmark:_toc24465890'><span
style='mso-bookmark:_toc40189707'><b style='mso-bidi-font-weight:normal'><span
lang=en-us style='font-size:12.0pt;mso-bidi-font-size:11.0pt;line-height:
110%'>books / chapters</span></b></span></span><b style='mso-bidi-font-weight:
normal'><span lang=en-us style='font-size:12.0pt;mso-bidi-font-size:11.0pt;
line-height:110%'><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l0 level1 lfo2'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>sergio escalera, stephane
ayache, jun wan, meysam madadi, umut guclu, xavier baro(eds.), inpainting and
denoising challenges, isbn:978-3-030-25613-5, springer </span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l0 level1 lfo2'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�[b�^�yw� _�[�o��e,gpencc�c�</span><span
lang=en-us>2019</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�nns'yf[�qhr>y</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l0 level1 lfo2'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xu-yao zhang, yi-chao, fei
yin, cheng-lin liu, deep learning based handwritten chinese character and
text recognition, in: k. huang, a. hussain, q.-f. wang, r. zhang (eds.), deep
learning: fundamentals, theory and applications, 2019, pp.58-88, springer.</span></p>
<p class=af0><b><span lang=en-us><o:p> </o:p></span></b></p>
<p class=af0 style='mso-outline-level:2'><a name="_toc40189708"></a><a
name="_toc24098142"></a><a name="_toc532562227"></a><a name="_toc532391196"><span
style='mso-bookmark:_toc532562227'><span style='mso-bookmark:_toc24098142'><span
style='mso-bookmark:_toc40189708'><b><span style='font-size:12.0pt;
mso-bidi-font-size:11.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�ve�
rir</span></b></span></span></span></a><a
name="_toc24465891"></a><a name="_toc24360999"></a><a name="_toc532562397"></a><span
style='mso-bookmark:_toc532562397'><span style='mso-bookmark:_toc24360999'><span
style='mso-bookmark:_toc24465891'><span style='mso-bookmark:_toc40189708'><b><span
lang=en-us style='font-size:12.0pt;mso-bidi-font-size:11.0pt;line-height:
110%'>international journals</span></b></span></span></span></span><b><span
lang=en-us style='font-size:12.0pt;mso-bidi-font-size:11.0pt;line-height:
110%'><o:p></o:p></span></b></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189709"></a><a
name="_toc24098143"></a><a name="_toc532562228"></a><a name="_toc532391197"><span
style='mso-bookmark:_toc532562228'><span style='mso-bookmark:_toc24098143'><span
style='mso-bookmark:_toc40189709'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>���{:gɖɉ</span></b></span></span></span></a><a
name="_toc24465892"></a><a name="_toc24361000"></a><a name="_toc532562398"><span
style='mso-bookmark:_toc24361000'><span style='mso-bookmark:_toc24465892'><span
style='mso-bookmark:_toc40189709'><b><span lang=en-us>computer vision</span></b></span></span></span></a><b><span
lang=en-us><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>fuzhang wu, yan kong, weiming
dong, yanjun wu, gradient-aware blind face inpainting for deep face
verification. neurocomputing, vol. 331, pp. 301-311, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yucheng zhao, fan tang,
weiming dong, feiyue huang, xiaopeng zhang, joint face alignment and
segmentation via deep multi-task learning, multimedia tools and
applications, vol. 78, no. 10, pp. 13131-13148, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>wangli hao, zhaoxiang zhang,
spatiotemporal distilled dense-connectivity network for video action
recognition, pattern recognition, vol.92, pp. 13-24, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>4.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>guibo zhu, zhaoxiang zhang,
jinqiao wang, yi wu, hanqing lu, dynamic collaborative tracking, ieee transactions
on neural networks and learning systems (tnnls), vol.30, pp. 3035-3046, 2019.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>5.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xinchu shi, haibin ling, yu
pang, weiming hu, peng chu, and junliang xing, rank-1 tensor approximation
for high-order association in multi-target tracking, international journal
of computer vision (ijcv), vol. 127, no. 8, pp. 1063-1083, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>6.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hainan cui, shuhan shen, wei
gao, hongmin liu, zhiheng wang. efficient and robust large-scale
structure-from-motion via track selection and camera prioritization . isprs
journal of photogrammetry and remote sensing, vol. 156, october 2019,
pp.202-214.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>7.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jianwei li, wei gao, yihong
wu, high-quality 3d reconstruction with depth super-resolution and
completion , ieee access, no. 7, pp. 19370-19381, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>8.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yangdong liu, wei gao, zhanyi
hu, 3d scanning of high dynamic scenes using an rgb-d sensor and an imu on a
mobile device , ieee access, no. 7, pp. 24057-24070, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>9.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>wei wang, wei gao, efficient
multi-plane extraction from massive 3d points for modeling large-scale urban
scenes , the visual computer, vol. 35, no. 5, pp. 625-638, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>10.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiang gao, shuhan shen,
zhanyi hu, zhiheng wang. ground and aerial meta-data integration for
localization and reconstruction: a review. pattern recognition letters, 127:
202-214, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>11.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yang zhou, shuhan shen,
zhanyi hu. detail preserved surface reconstruction from point cloud. sensors,
19(6): 1278, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>12.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yihong wu, haoren wang, fulin
tang, zhiheng wang. efficient conic fitting with a polar-n-direction
geometric distance. pattern recognition, vol. 90, pp. 415-423, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>13.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiaomei zhao, yihong wu.
automatically extract semi-transparent motion-blurred hand from a single
image. ieee trans. on signal processing letters, vol. 26, no. 11, pp. 1598-
1602, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>14.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zichang tan, yang yang, jun
wan*, hanyuan huang, guodong guo and stan z. li, "attention based
pedestrian attribute analysis", ieee transactions on image processing
(tip), vol 28, no. 12, 6126-6140, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>15.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiangyu zhu, hao liu, zhen
lei, hailin shi, fan yang, dong yi, guojun qi, stan z. li. large-scale bisample
learning on id versus spot face recognition. international journal of
computer vision (ijcv), 2019, 127(6-7): 684-700.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>16.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhiwei liu, xiangyu zhu, ming
tang, zhen lei, jinqiao wang.efficient face alignment with fast normalization
and contour fitting loss,acm transactions on multimedia computing,
communications, and applications (tomm), 2019, 15: 89.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>17.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xinchu shi, haibin ling, yu
pang, weiming hu, peng chu, and junliang xing, rank-1 tensor approximation
for high-order association in multi-target tracking, international journal
of computer vision (ijcv), vol. 127, no. 8, pp. 1063-1083, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>18.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yunze gao, yingying
chen, jinqiao wang, ming tang, hanqing lu, reading scene text
with fully convolutional sequence
modeling. neurocomputing 339: 161-170, 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>19.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>guibo zhu, jinqiao
wang, peisong wang, yi wu, hanqing lu, feature distilled
tracking. ieee trans. cybernetics 49(2): 440-452, 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>20.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiyan liu, gaofeng meng and
chunhong pan. scene text detection and recognition with advances in deep
learning: a survey[j]. international journal on document analysis and
recognition (ijdar), 2019, 22(2): 143-162.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>21.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yabei li, zhang zhang, yanhua
cheng, liang wang and tieniu tan, mapnet: multi-modal attentive pooling
network for rgb-d indoor scene classification, pattern recognition, vol. 90,
pp. 436-449, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>22.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>huaibo huang, ran he, zhenan
sun and tieniu tan, wavelet domain generative adversarial network for
multi-scale face hallucination, international journal of computer vision,
vol. 127, no. 6-7, pp. 763-784, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>23.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>feng yu, qiang liu, shu wu,
liang wang and tieniu tan, attention-based convolutional approach for
misinformation identification from massive and noisy microblog posts,
computers & security, vol. 83, pp. 106-121, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>24.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yupei wang, xin zhao, xuecai
hu, yin li, kaiqi huang, focal boundary guided salient object detection,
ieee trans. on image processing, vol. 28, no. 6, pp. 2813-2824, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>25.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>dangwei li, zhang zhang,
xiaotang chen, kaiqi huang, a richly annotated pedestrian dataset for person
retrieval in real surveillance scenarios, ieee trans. on image processing,
vol. 28, no. 4, pp. 1575-1590, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>26.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>da li, zhang zhang, kai yu,
kaiqi huang and tieniu tan, isee: an intelligent scene exploration and
evaluation platform for large-scale visual surveillance, ieee trans. on
parallel and distributed systems, vol. 30, no.12, pp. 2743-2758.</span></p>
<p class=af0><b style='mso-bidi-font-weight:normal'><span lang=en-us><o:p> </o:p></span></b></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189710"></a><a
name="_toc24098144"></a><a name="_toc532562229"></a><a name="_toc532391198"><span
style='mso-bookmark:_toc532562229'><span style='mso-bookmark:_toc24098144'><span
style='mso-bookmark:_toc40189710'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�va�</span><span
lang=en-us>/</span></b></span></span></span></a><span style='mso-bookmark:
_toc532391198'><span style='mso-bookmark:_toc532562229'><span
style='mso-bookmark:_toc24098144'><span style='mso-bookmark:_toc40189710'><b><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>ɖ��ytnr�g</span></b></span></span></span></span><a
name="_toc24465893"></a><a name="_toc24361001"></a><a name="_toc532562399"><span
style='mso-bookmark:_toc24361001'><span style='mso-bookmark:_toc24465893'><span
style='mso-bookmark:_toc40189710'><b><span lang=en-us>image/video processing </span></b></span></span></span></a><span
style='mso-bookmark:_toc24465893'><span style='mso-bookmark:_toc40189710'><b><span
lang=en-us>and analysis</span></b></span></span><b><span lang=en-us><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>27.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>changde du, changying du,
lijie huang, huiguang he, reconstructing perceived images from human brain
activities with bayesian deep multiview learning , ieee trans. neural netw.
learning syst, 30(8): page(s): 2310-2323, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>28.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhao zhong, zichen yang,
wentao feng, wei wu, yangyang hu, cheng-lin liu, decision controller for
object tracking with deep reinforcement learning , ieee access, 7:
28069-28079, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>29.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yingying chen, jinqiao
wang, bingke zhu, ming tang, hanqing lu, pixelwise deep
sequence learning for moving object detection. ieee trans. circuits
syst. video techn. 29(9): 2567-2579, 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>30.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>songyan liu, chaoyang zhao, yunze
gao, jinqiao wang, ming tang, adversarial image generation by combining
content and style, ieee transactions on image processing, 13(14): 2716-2723,
2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>31.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>congqi cao, cuiling
lan, yifan zhang, wenjun zeng, hanqing lu, yanning zhang,
skeleton-based action recognition with gated convolutional neural
networks. ieee trans. circuits syst. video
techn. 29(11): 3247-3257, 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>32.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yousong zhu, chaoyang
zhao, haiyun guo, jinqiao wang, xu zhao, hanqing lu,
attention couplenet: fully convolutional attention coupling network for
object detection. ieee trans. image processing 28(1): 113-126,
2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>33.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>haiyun guo, kuan zhu, ming
tang, jinqiao wang, two-level attention network with multi-grain ranking loss
for vehicle re-identification, ieee trans. image processing, 28(9): 4328-4338,
2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>34.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lingfeng wang, chunhong pan:
visual object tracking via a manifold regularized discriminative dual
dictionary model. pattern recognition 91: 272-280 (2019).</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>35.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jie gu, gaofeng meng, shiming
xiang, chunhong pan: blind image quality assessment via learnable
attention-based pooling. pattern recognition 91: 332-344 (2019), if:5.898</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>36.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>tingzhao yu, lingfeng wang,
chaoxu guo, huxiang gu, shiming xiang, chunhong pan: pseudo low rank video
representation. pattern recognition 85: 50-59 (2019)</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>37.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>tingzhao yu, lingfeng wang,
cheng da, huxiang gu, shiming xiang, chunhong pan: weakly semantic guided
action recognition. ieee trans. multimedia 21(10): 2504-2517 (2019)</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>38.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yunbo wang, jian liang, dong
cao, zhenan sun, local semantic-aware deep hashing with hamming-isometric
quantization, ieee trans. image processing, vol. 28, no. 6, pp. 2665-2679,
2019. </span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>39.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hongwen zhang, qi li, zhenan
sun, adversarial learning semantic volume for 2d/3d face shape regression in
the wild, ieee trans. on image processing, vol. 28, no. 9, pp. 4526-4540,
2019.</span></p>
<p class=af0><b><i><span lang=en-us><o:p> </o:p></span></i></b></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189711"></a><a
name="_toc24098145"></a><a name="_toc532562230"></a><a name="_toc532391199"><span
style='mso-bookmark:_toc532562230'><span style='mso-bookmark:_toc24098145'><span
style='mso-bookmark:_toc40189711'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>!j_ƌ r</span></b></span></span></span></a><a
name="_toc24465894"></a><a name="_toc24361002"></a><a name="_toc532562400"><span
style='mso-bookmark:_toc24361002'><span style='mso-bookmark:_toc24465894'><span
style='mso-bookmark:_toc40189711'><b><span lang=en-us>pattern recognition</span></b></span></span></span></a><b><span
lang=en-us><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>40.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lei wang, weiliang meng,
runping xi, chengcheng ma, yanning zhang, ling lu and xiaopeng zhang. 3d
point cloud analysis and classification in large-scale scene based on deep
learning. ieee access. 2019, 7(1): 55649-55658.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>41.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hongjun li, weiliang meng,
xinying liu, shiming xiang and xiaopeng zhang. parameter optimization
criteria guided 3d point cloud classification. multimedia tools and
applications.2019, 2019(78): 5081-5104.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>42.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hao yang, chunfengyuan,
bingli, yang du, junliangxing, weiming hu, and stephen j. maybank,
asymmetric 3d convolutional neural networks for action recognition, pattern
recognition, vol. 85, pp. 1-12, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>43.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>haiqing ren, weiqiang wang,
chenglin liu, recognizing online handwritten chinese characters using rnns
with new computing architectures , pattern recognition, 93: 179-192, 2019.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>44.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xinyu zuo, yubo chen, kang
liu and jun zhao, event coreference resolution via a multi-loss neural
network without using argument information, science china information
sciences2019, vol. 62, no. 11, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>45.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hao yang, chunfengyuan,
bingli, yang du, junliangxing, weiming hu, and stephen j. maybank,
asymmetric 3d convolutional neural networks for action recognition, pattern
recognition, vol. 85, pp. 1-12, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>46.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhiwei fang, jing
liu, yong li, yanyuan qiao, hanqing lu. improving visual
question answering using dropout and enhanced question encoder. pattern
recognition 90: 404-414, 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>47.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lingxiao he, haiqing li, qi
zhang, and zhenan sun, dynamic feature matching for partial face
recognition , ieee trans. on image processing, vol. 28, no. 2, pp. 791-802,
2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>48.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>man zhang, zhaofeng he, hui
zhang, tieniu tan and zhenan sun, toward practical remote iris recognition:
a boosting based framework, neurocomputing, vol.330, pp. 238-252, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>49.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yi li, lingxiao song, xiang
wu, ran he and tieniu tan, learning a bi-level adversarial network with
global and local perception for makeup-invariant face verification, pattern
recognition, vol. 90, pp. 99-108, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>50.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jie cao, yibo hu, bing yu,
ran he, zhenan sun, 3d aided duet gans for multi-view face image synthesis,
ieee trans. information forensics and security, vol. 14, no. 8, pp.
2028-2042, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>51.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jian liang, ran he, zhenan
sun and tieniu tan, aggregating randomized clustering-promoting invariant
projections for domain adaptation, ieee trans. on pattern analysis and
machine intelligence (pami), vol. 41, no. 5, pp. 1027-1042, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>52.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>ran he, xiang wu, zhenan sun
and tieniu tan, wasserstein cnn: learning invariant features for nir-vis
face recognition, ieee trans. on pattern analysis and machine intelligence
(pami), vol. 41, no. 7, pp. 1761-1773, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>53.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>chunshui cao, yongzhen huang,
yi yang, liang wang, zilei wang and tieniu tan, feedback convolutional
neural network for visual localization and segmentation, ieee trans. on
pattern analysis and machine intelligence (pami), vol. 41, no. 7, pp.
1627-1640, 2019. </span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>54.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>chunfeng song, yongzhen
huang, yan huang, ning jia, and liang wang, gaitnet: an end-to-end network for
video-based human identication, pattern recognition, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>55.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yuqi zhang, yongzhen huang,
liang wang, shiqi yu, a comprehensive study on gait biometrics via a joint
cnn-based method, pattern recognition, vol. 93, pp. 228-236, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>56.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yanyun wang, chunfeng song,
yan huang, zhenyu wang, liang wang, learning view invariant gait features
with two-stream gan, neurocomputing, vol. 339, pp. 245-254, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>57.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>linjiang huang, yan huang,
wanli ouyang, liang wang, part-aligned pose-guided recurrent network for
action recognition, pattern recognition, vol. 92, pp. 162-176.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>58.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yuwei wang, yi zeng, jianbo
tang and bo xu. biological neuron coding inspired binary word embeddings.
cognitive computation, springer, 11:676 684, 2019.</span></p>
<p class=af0><b style='mso-bidi-font-weight:normal'><i><span lang=en-us><o:p> </o:p></span></i></b></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189712"></a><a
name="_toc24098146"></a><a name="_toc532562231"></a><a name="_toc532391200"><span
style='mso-bookmark:_toc532562231'><span style='mso-bookmark:_toc24098146'><span
style='mso-bookmark:_toc40189712'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>:ghvf[`n</span></b></span></span></span></a><a
name="_toc24465895"></a><a name="_toc24361003"></a><a name="_toc532562401"><span
style='mso-bookmark:_toc24361003'><span style='mso-bookmark:_toc24465895'><span
style='mso-bookmark:_toc40189712'><b><span lang=en-us>machine learning</span></b></span></span></span></a><b><span
lang=en-us><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>59.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hanyang peng, cheng-lin liu,
discriminative feature selection via employing smooth and robust hinge
loss , ieee trans. neural networks and learning systems, 30(3): 788-802,
2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>60.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>ting-bing xu, peipei yang,
xu-yao zhang, cheng-lin liu, lightweightnet: toward fast and lightweight
convolutional neural networks via architecture distillation , pattern
recognition, 88: 272-284, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>61.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiao-bo jin, xu-yao zhang,
kaizhu huang, guang-gang geng, stochastic conjugate gradient algorithm with
variance reduction , ieee trans. neural networks and learning systems, 30(5):
1360-1369, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>62.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>guibo zhu, zhaoxiang
zhang, jinqiao wang, yi wu, hanqing lu, dynamic collaborative
tracking. ieee trans. neural netw. learning
syst. 30(10): 3035-3046, 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>63.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qi zhang, jianlong chang,
gaofeng menga, shibiao xua, shiming xiang, chunhong pan. learning graph
structure via graph convolutional networks. pattern recognition, 2019:
308-318.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>64.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>cheng da, gaofeng meng,
shiming xiang, kun ding, shibiao xu, qing yang, chunhong pan: nonlinear
asymmetric multi-valued hashing. ieee transactions on pattern analysis and
machine intelligence, 41(11): 2660-2676 (2019), if: 17.730</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>65.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>peipei li, yibo hu, ran he,
zhenan sun, global and local consistent wavelet-domain age synthesis, ieee
trans. on information forensics and security, vol. 14, no. 11, pp. 2943-2957,
2019.</span></p>
<p class=af0><b style='mso-bidi-font-weight:normal'><span lang=en-us><o:p> </o:p></span></b></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189713"></a><a
name="_toc24098147"></a><a name="_toc532562232"></a><a name="_toc532391201"><span
style='mso-bookmark:_toc532562232'><span style='mso-bookmark:_toc24098147'><span
style='mso-bookmark:_toc40189713'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>pencc�c</span></b></span></span></span></a><a
name="_toc24465896"></a><a name="_toc24361004"></a><a name="_toc532562402"><span
style='mso-bookmark:_toc24361004'><span style='mso-bookmark:_toc24465896'><span
style='mso-bookmark:_toc40189713'><b><span lang=en-us>data mining</span></b></span></span></span></a><b><span
lang=en-us><o:p></o:p></span></b></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>66.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qiang cui, shu wu, yan huang,
liang wang, a hierarchical contextual attention-based network for sequential
recommendation, neurocomputing, vol. 358, pp. 141-149, 2019.</span></p>
<p class=af0><b style='mso-bidi-font-weight:normal'><i><span lang=en-us><o:p> </o:p></span></i></b></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189714"></a><a
name="_toc24098148"></a><a name="_toc532562233"></a><a name="_toc532391202"><span
style='mso-bookmark:_toc532562233'><span style='mso-bookmark:_toc24098148'><span
style='mso-bookmark:_toc40189714'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>���{:g�vb_f[</span></b></span></span></span></a><a
name="_toc24465897"></a><a name="_toc24361005"></a><a name="_toc532562403"><span
style='mso-bookmark:_toc24361005'><span style='mso-bookmark:_toc24465897'><span
style='mso-bookmark:_toc40189714'><b><span lang=en-us>computer graphics</span></b></span></span></span></a><b><span
lang=en-us><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>67.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>meng yang, juntao ye, frank
ding, yubo zhang, dong-ming yan. a semi-explicit surface tracking mechanism
for multi-phase immiscible liquids. ieee trans. visualization and computer
graphics, 25(10), pages 2873-2885, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>68.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jianwei guo, shibiao xu,
dong-ming yan, zhanglin cheng, marc jaeger, xiaopeng zhang. realistic
procedural plant modeling from multiple view images. ieee transactions on
visualization and computer graphics, 2019, online</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>69.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jianwei guo, fan ding,
xiaohong jia, dong-ming yan. automatic and high-quality surface mesh
generation for cad models. computer-aided design, volume 109, pages 49-59,
2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>70.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yiqun wang, dong-ming yan,
xiaohan liu, chengcheng tang, jianwei guo, xiaopeng zhang, peter wonka:
isotropic surface remeshing without large and small angles. ieee trans. vis.
comput. graph. 25(7): 2430-2442 (2019)</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>71.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qunce xu, dong-ming yan,
wenbin li, yong-liang yang. anisotropic surface remeshing without obtuse
angles. computer graphics forum (pacific graphics), 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>72.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>m. yuan, l. dai, d.-m. yan,
l. zhang, x. zhang. fast and error-bounded space-variant bilateral filtering.
journal of computer science and technology. 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>73.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>c. rao, l. tian, d.-m. yan,
s. liao, d. oliver, l. lu. consistently fitting orthopaedic casts.
computer-aided geometric design, 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>74.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>y. gao, l. wu, d.-m. yan, l.
nan. near support-free multi-directional 3d printig via global-optimal
decomposition. graphical models, 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>75.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>bin fan, qingqun kong,
xinchao wang, zhiheng wang, shiming xiang, chunhong pan, and pascal fua.
"a performance evaluation of local features for image based 3d
reconstruction". ieee transactions on image processing, 28(10): 4774 -
4789, 2019.</span></p>
<p class=af0><span lang=en-us style='mso-bidi-font-weight:bold'><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189715"></a><a
name="_toc24098149"></a><a name="_toc532562234"></a><a name="_toc532391203"><span
style='mso-bookmark:_toc532562234'><span style='mso-bookmark:_toc24098149'><span
style='mso-bookmark:_toc40189715'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>y�zso���{</span></b></span></span></span></a><a
name="_toc24465898"></a><a name="_toc24361006"></a><a name="_toc532562404"><span
style='mso-bookmark:_toc24361006'><span style='mso-bookmark:_toc24465898'><span
style='mso-bookmark:_toc40189715'><b><span lang=en-us>multimedia computing</span></b></span></span></span></a><b><span
lang=en-us><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>76.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yingying deng, fan tang,
weiming dong, fuzhang wu, oliver deussen, changsheng xu, selective
clustering for representative paintings selection, multimedia tools and
applications, vol. 78, no. 14, pp. 19305-19323, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>77.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiaoshan yang, changsheng xu:
image captioning by asking questions. acm transactions on multimedia
computing, communications, and applications (tomm) 15(2s): 55:1-55:19, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>78.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>fan qi, xiaoshan yang,
tianzhu zhang, and changsheng xu: discriminative multimodal embedding for
event classification, neurocomputing, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>79.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>cuirong long, xiaoshan yang,
changsheng xu: cross-domain personalized image captioning, multimedia tools
and applications, 2019: 1-16.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>80.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hantao yao, feng dai,
shiliang zhang, yongdong zhang, qi tian, changsheng xu, dr2-net: deep
residual reconstruction network for image compressive sensing .
neurocomputing 359: 483-493 (2019)</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>81.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hantao yao, shiliang zhang,
richang hong, yongdong zhang, changsheng xu, qi tian, deep representation
learning with part loss for person re-identification . ieee trans. image
processing 28(6): 2860-2871 (2019)</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>82.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>feng xue, jianwei wang,
shengsheng qian, tianzhu zhang, xueliang liu, and changsheng xu: multi-modal
max-margin supervised topic model for social event analysis. multimedia tools
appl. 78(1): 141-160 (2019)</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>83.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>junyu gao, tianzhu zhang,
changsheng xu: smart: joint sampling and regression for visual tracking. ieee
trans. image processing 28(8): 3923-3935 (2019)</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>84.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xinhong ma , tianzhu zhang ,
changsheng xu : deep multi-modality adversarial networks for unsupervised
domain adaptation. ieee trans. multimedia 21(9): 2419-2431 (2019)</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>85.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jun hu, shengsheng qian, quan
fang, xueliang liu, changsheng xu: a2cmhne: attention-aware collaborative
multimodal heterogeneous network embedding. tomccap 15(2): 45:1-45:17 (2019)</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>86.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhiwei fang, jing
liu, xueliang liu, qu tang, yong li, hanqing lu, btdp:
toward sparse fusion with block term decomposition pooling for visual
question answering. tomm 15(2s): 50:1-50:21, 2019</span></p>
<p class=af0><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189716"></a><a
name="_toc24098150"></a><a name="_toc532562236"></a><a name="_toc532391205"><span
style='mso-bookmark:_toc532562236'><span style='mso-bookmark:_toc24098150'><span
style='mso-bookmark:_toc40189716'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>;sf[�v�pr�g</span></b></span></span></span></a><a
name="_toc24465899"></a><a name="_toc24361007"></a><a name="_toc532562406"><span
style='mso-bookmark:_toc24361007'><span style='mso-bookmark:_toc24465899'><span
style='mso-bookmark:_toc40189716'><b><span lang=en-us>medical image analysis</span></b></span></span></span></a><b><span
lang=en-us><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>87.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>longwei fang, zuowei wang,
zhiqiang chen, fengzeng jian, shuo li, huiguang he, 3d shape reconstruction
of lumbar vertebra from two x-ray images and a ct model , ieee/caa journal of
automatica sinica, vol 10</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>pages 1-10, 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>88.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>longwei fang, lichi zhang,
dong nie, xiaohuan cao, islem rekik, seongwhan lee, huiguang he, dinggang
shen, automatic brain labeling via multi-atlas guided fully convolutional
networks , medical image analysis, volume 51, pages 157-168, 2019</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>89.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yuan lin, linlin li, nie wei,
xiaolei liu, avital adler, chi xiao, fujian lu, liping wang, hua han, xianhua
wang, wen-biao gan, heping cheng, brain activity regulates loose coupling
between mitochondrial and cytosolic ca^(2 ) transients, nature
communications, vol. 10, no. 1, pp. 1-13, 2019</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>90.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>fu z, tu y, di x, du y, sui
j, biswal bb, zhang z, de lacy n, calhoun vd. transient increased
thalamic-sensory connectivity and decreased whole-brain dynamism in autism.
neuroimage. 2019; 190:191-204.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>91.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hu g, huang x, jiang t, yu s.
multi-scale expressions of one optimal state regulated by dopamine in the
prefrontal cortex. front physiol. 2019; 10:113.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>92.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jiang r, zuo n, ford jm, qi
s, zhi d, zhuo c, xu y, fu z, bustillo j, turner ja, calhoun vd, sui j.
task-induced brain connectivity promotes the detection of individual
differences in brain-behavior relationships. neuroimage. 2019:116370.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>93.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>li j, wang l, zhang x, liu l,
li j, chan mf, sui j, yang r. machine learning for patient-specific quality
assurance of vmat: prediction and classification accuracy. int j radiat oncol
biol phys. 2019; 105(4):893-902.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>94.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>li j, zhang x, li j, jiang r,
sui j, chan mf, yang r. impact of delivery characteristics on dose delivery
accuracy of volumetric modulated arc therapy for different treatment sites. j
radiat res. 2019; 60(5):603-11.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>95.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>li x, wu d, cui y, liu b,
walter h, schumann g, li c, jiang t. reliable heritability estimation using
sparse regularization in ultrahigh dimensional genome-wide association
studies. bmc bioinformatics. 2019; 20(1):219.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>96.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>liu h, jian m, liu s, li a,
li s, fang j, liang f, liu b, han r. preserved individual differences in
functional connectivity patterns under dexmedetomidine-induced sedation.
neurosci lett. 2019; 707:134289.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>97.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>liu s, li a, zhu m, li j, liu
b. genetic influences on cortical myelination in the human brain. genes brain
behav. 2019; 18(4):e12537.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>98.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>liu s, wang h, song m, lv l, cui
y, liu y, fan l, zuo n, xu k, du y, yu q, luo n, qi s, yang j, xie s, li j,
chen j, chen y, wang h, guo h, wan p, yang y, li p, lu l, yan h, yan j, wang
h, zhang h, zhang d, calhoun vd, jiang t, sui j. linked 4-way multimodal
brain differences in schizophrenia in a large chinese han population.
schizophrenia bulletin. 2019; 45(2):436-49.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>99.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>liu x, li y, li s, fan x, sun
z, yang z, wang k, zhang z, jiang t, liu y, wang l, wang y. idh
mutation-specific radiomic signature in lower-grade gliomas. aging (albany
ny). 2019; 11(2):673-96.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>100.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>luo n, tian l, calhoun vd,
chen j, lin d, vergara vm, rao s, yang j, zhuo c, xu y, turner ja, zhang f,
sui j. brain function, structure and genomic data are linked but show
different sensitivity to duration of illness and disease stage in
schizophrenia. neuroimage clin. 2019; 23:101887.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>101.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>miller sr, yu s, plenz d. the
scale-invariant, temporal profile of neuronal avalanches in relation to
cortical gamma-oscillations. sci rep. 2019; 9(1):16403.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>102.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>niu w, huang x, xu k, jiang
t, yu s. pairwise interactions among brain regions organize large-scale
functional connectivity during execution of various tasks. neuroscience.
2019; 412:190-206.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>103.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qi s, sui j, chen j, liu j,
jiang r, silva r, iraji a, damaraju e, salman m, lin d, fu z, zhi d, turner ja,
bustillo j, ford jm, mathalon dh, voyvodic j, mcewen s, preda a, belger a,
potkin sg, mueller ba, adali t, calhoun vd. parallel group ica ica: joint
estimation of linked functional network variability and structural
covariation with application to schizophrenia. hum brain mapp. 2019;
40(13):3795-809.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>104.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qin j, sui j, ni h, wang s,
zhang f, zhou z, tian l. the shared and distinct white matter networks
between drug-naive patients with obsessive-compulsive disorder and
schizophrenia. front neurosci. 2019; 13:96.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>105.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>salman ms, du y, lin d, fu z,
fedorov a, damaraju e, sui j, chen j, mayer ar, posse s, mathalon dh, ford
jm, van erp t, calhoun vd. group ica for identifying biomarkers in
schizophrenia: 'adaptive' networks via spatially constrained ica show more
sensitivity to group differences than spatio-temporal regression. neuroimage
clin. 2019; 22:101747.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>106.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>su c, jiang j, zhang s, shi
j, xu k, shen n, zhang j, li l, zhao l, zhang j, qin y, liu y, zhu w.
radiomics based on multicontrast mri can precisely differentiate among glioma
subtypes and predict tumour-proliferative behaviour. eur radiol. 2019;
29(4):1986-96.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>107.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>tang j, li y, xu j, qin w, su
q, xu q, liu b, jiang t, yu c. impact of comt haplotypes on functional
connectivity density and its association with the gene expression of dopamine
receptors. brain struct funct. 2019; 224(8):2619-30.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>108.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>wang d, hu l, xu x, ma x, li
y, liu y, wang q, zhuo c. kibra and apoe gene variants affect brain
functional network connectivity in healthy older people. j gerontol a biol
sci med sci. 2019; 74(11):1725-33.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>109.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>wang j, becker b, wang l, li
h, zhao x, jiang t. corresponding anatomical and coactivation architecture of
the human precuneus showing similar connectivity patterns with macaques.
neuroimage. 2019; 200:562-74.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>110.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xia x, fan l, cheng c, yao r,
deng h, zhao d, li h, jiang t. interspecies differences in the connectivity
of ventral striatal components between humans and macaques. front neurosci.
2019; 13:623.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>111.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xia x, fan l, hou b, zhang b,
zhang d, cheng c, deng h, dong y, zhao x, li h, jiang t. fine-grained
parcellation of the macaque nucleus accumbens by high-resolution diffusion
tensor tractography. front neurosci. 2019; 13:709.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>112.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xie s, liu b, wang j, zhou y,
cui y, song m, chen y, li p, lu l, lv l, wang h, yan h, yan j, zhang h, zhang
d, jiang t. hyperconnectivity in perisylvian language pathways in
schizophrenia with auditory verbal hallucinations: a multi-site diffusion mri
study. schizophr res. 2019; 210:262-9.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>113.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yan j, cui y, li q, tian l,
liu b, jiang t, zhang d, yan h. cortical thinning and flattening in
schizophrenia and their unaffected parents. neuropsychiatr dis treat. 2019;
15:935-46.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>114.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yan w, calhoun v, song m, cui
y, yan h, liu s, fan l, zuo n, yang z, xu k, yan j, lv l, chen j, chen y, guo
h, li p, lu l, wan p, wang h, wang h, yang y, zhang h, zhang d, jiang t, sui
j. discriminating schizophrenia using recurrent neural network applied on
time courses of multi-site fmri data. ebiomedicine. 2019; 47:543-52.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>115.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yan x, wang y, xu l, liu y,
song s, ding k, zhou y, jiang t, lin x. altered functional connectivity of
the primary visual cortex in adult comitant strabismus: a resting-state
functional mri study. curr eye res. 2019; 44(3):316-23.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>116.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yang y, liu s, jiang x, yu h,
ding s, lu y, li w, zhang h, liu b, cui y, fan l, jiang t, lv l. common and
specific functional activity features in schizophrenia, major depressive
disorder, and bipolar disorder. front psychiatry. 2019; 10:52.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>117.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yu q, chen j, du y, sui j,
damaraju e, turner ja, van erp tgm, macciardi f, belger a, ford jm, mcewen s,
mathalon dh, mueller ba, preda a, vaidya j, pearlson gd, calhoun vd. a method
for building a genome-connectome bipartite graph model. j neurosci methods.
2019; 320:64-71.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>118.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zeng g, chen y, cui b, yu s.
continual learning of context-dependent processing in neural networks. nature
machine intelligence. 2019; 1(8):364-72.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>119.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zeng g, huang x, jiang t, yu
s. short-term synaptic plasticity expands the operational range of long-term
synaptic changes in neural networks. neural netw. 2019; 118:140-7.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>120.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhang j, zhang j, hu g, and chen
y, yu s. scalenet: a convolutional network to extract multi-scale and
fine-grained visual features. ieee access. 2019; 7:147560-70.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>121.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhang z, chen y, mi y, hu g.
reconstruction of dynamic networks with time-delayed interactions in the
presence of fast-varying noises. phys rev e. 2019; 99(4-1):042311.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>122.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zheng f, yan l, zhong b, yang
z, xie w. progression of cognitive decline before and after incident stroke.
neurology. 2019; 93(1):e20-e8.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>123.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhou w, jin y, meng q, zhu x,
bai t, tian y, mao y, wang l, xie w, zhong h, zhang n, luo mh, tao w, wang h,
li j, li j, qiu bs, zhou jn, li x, xu h, wang k, zhang x, liu y,
richter-levin g, xu l, zhang z. a neural circuit for comorbid depressive
symptoms in chronic pain. nat neurosci. 2019; 22(10):1649-58.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>124.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhu w, huang h, yang s, luo
x, zhu w, xu s, meng q, zuo c, zhao k, liu h, liu y, wang w. dysfunctional
architecture underlies white matter hyperintensities with and without
cognitive impairment. journal of alzheimer's disease: jad. 2019;
71(2):461-76.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>125.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zuo n, salami a, yang y, yang
z, sui j, jiang t. activation-based association profiles differentiate
network roles across cognitive loads. hum brain mapp. 2019; 40(9):2800-12.</span></p>
<p class=af0><b style='mso-bidi-font-weight:normal'><span lang=en-us><o:p> </o:p></span></b></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189717"><b><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>pg�e�yf[</span><span lang=en-us>materials science</span></b></a><b><span
lang=en-us><o:p></o:p></span></b></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;line-height:normal;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-bidi-font-size:10.5pt;mso-fareast-font-family:"times new roman";
mso-bidi-font-family:"times new roman"'><span style='mso-list:ignore'>126.<span
style='font:7.0pt "times new roman"'> </span></span></span><![endif]><span
lang=en-us style='mso-bidi-font-size:10.5pt'>zi wang, lina zhang, weifu li, zijun
qin, zexin wang, zihang li, liming tan, lilong zhu, feng liu, hua han, liang
jiang, high throughput experiment and machine learning assisted discovery of
new ni-base superalloys, scripta materialia, vol. 178, pp. 134-138, 2019<o:p></o:p></span></p>
<p class=af0><b style='mso-bidi-font-weight:normal'><i style='mso-bidi-font-style:
normal'><span lang=en-us><o:p> </o:p></span></i></b></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189718"></a><a
name="_toc24098151"></a><a name="_toc532562237"></a><a name="_toc532391206"><span
style='mso-bookmark:_toc532562237'><span style='mso-bookmark:_toc24098151'><span
style='mso-bookmark:_toc40189718'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�����b/g</span></b></span></span></span></a><a
name="_toc24465900"></a><a name="_toc24361008"></a><a name="_toc532562407"><span
style='mso-bookmark:_toc24361008'><span style='mso-bookmark:_toc24465900'><span
style='mso-bookmark:_toc40189718'><b><span lang=en-us>speech and language
technology</span></b></span></span></span></a><b><span lang=en-us><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>127.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>long zhou, jiajun zhang,
chengqing zong.synchronous bidirectional neural machine translation,
transactions of association for computational linguistics (tacl), vol. 7, pp.
91-105, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>128.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jiajun zhang, yang zhao,
haoran li and chengqing zong. attention with sparsity regularization for
neural machine translation and summarization. ieee/acm transactions on audio,
speech and language processing, vol. 27, no.3, pp. 507-518, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>129.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>haoran li, junnan zhu, cong
ma, jiajun zhang and chengqing zong. read, watch, listen and summarize:
multi-modal summarization for asynchronous text, image, audio and video.
transactions on knowledge and data engineering (tkde), vol. 31, no. 5, may
2019, pages 996-1009.<span style='mso-spacerun:yes'>� </span>print issn: 1041-4347,
online issn: 1041-4347, digital object identifier: 10.1109/tkde.2018.2848260</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>130.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>guoping huang, jiajun zhang,
yu zhou and chengqing zong. input method for human translators: a novel
approach to integrate machine translation effectively and imperceptibly. acm
transactions on asian and low-resource language information processing
(tallip), vol. 18, no. 1, article 4, 22 pages, january 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>131.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jiangyan yi, jianhua tao,
zhengqi wen, ye bai, language-adversarial transfer learning for low-resource
speech recognition , ieee/acm trans. audio, speech & language processing,
2019;27(3)621-630</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>132.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zipeng zhao, zhongtian bao,
zixing zhang, jun deng, nicholas cummins, haishuai wang, jianhua tao, bjorn
schuller, automatic assessment of depression from speech via a hierarchical
attention transfer network and attention autoencoders , journal of selected
topics in signal processing, vol. 14, no. 8, august 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>133.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yibin zheng, jianhua tao,
zhengqi wen, jiangyan yi, forward backward decoding sequence for
regularizing end-to-end tts , ieee/acm trans. audio, speech & language
processing, 2019;27(12): 2067-2079</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>134.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xinyu xiao, lingfeng wang,
kun ding, shiming xiang, chunhong pan, "deep hierarchical
encoder-decoder network for image captioning," in ieee transactions on
multimedia 21(11): 2942-2956 (2019).</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l4 level1 lfo4'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>135.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xinyu xiao, lingfeng wang,
kun ding, shiming xiang, chunhong pan, dense semantic embedding network for
image captioning, pattern recognition 90: 285-296 (2019).</span></p>
<p class=af0><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:2'><a name="_toc40189719"></a><a
name="_toc24098152"></a><a name="_toc532562238"></a><a name="_toc532391207"><span
style='mso-bookmark:_toc532562238'><span style='mso-bookmark:_toc24098152'><span
style='mso-bookmark:_toc40189719'><b style='mso-bidi-font-weight:normal'><span
style='font-size:12.0pt;mso-bidi-font-size:11.0pt;line-height:110%;
font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�v�q
rir</span></b></span></span></span></a><a
name="_toc24465901"></a><a name="_toc24361009"></a><a name="_toc532562408"></a><span
style='mso-bookmark:_toc532562408'><span style='mso-bookmark:_toc24361009'><span
style='mso-bookmark:_toc24465901'><span style='mso-bookmark:_toc40189719'><b
style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt;
mso-bidi-font-size:11.0pt;line-height:110%'>national journals</span></b></span></span></span></span><b
style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt;
mso-bidi-font-size:11.0pt;line-height:110%'><o:p></o:p></span></b></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l3 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>changde du, jinpeng li, lijie
huang, huiguang he, brain encoding and decoding in fmri with bidirectional
deep generative models , engineering, volume 5, issue 5, pages 948-953, 2019.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l3 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jiali han, shuhan shen.
scalable point cloud meshing for image based large scale 3d modeling. visual
computing for industry, biomedicine, and art, 2(10): 1-10, 2019.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l3 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiang gao, hainan cui,
lingjie zhu, tianxin shi, shuhan shen. multi-source data based 3d digital
preservation of large-scale ancient chinese architecture: a case report.
virtual reality & intelligent hardware, 1(5): 525-541 2019.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l3 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>4.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiaomei zhao, fulin tang and
yihong wu. real time human segmentation by bowtienet and a slam based human
ar system, virtual reality & intelligent hardware, vol 1. issue 5, pp.
511-524, 2019.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l3 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>5.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>m. yuan, l. dai, d. yan, l.
zhang, j. xiao, x. zhang: fast and error-bounded space-variant bilateral
filtering. j. comput. sci. technol. 34(3): 550-568 2019.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l3 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>6.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hao wang, qingyuan zhu,
lufeng ding, yan shen, chao-yu yang, fang xu, chang shu, yujie guo, zhiwei
xiong, qinghong shan, fan jia, peng su, qian-ru yang, bing li, yuxiao cheng,
xiaobin he, xi chen, feng wu, jiang-ning zhou, fuqiang xu, hua han, pak-ming
lau, guo-qiang bi</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>scalable volumetric imaging for ultrahigh-speed brain mapping at
synaptic resolution. national science review, vol. 6, no. 5, pp. 982-992,
2019</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l3 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>7.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>li j, jin d, li a, liu b,
song c, wang p, wang d, xu k, yang h, yao h, zhou b, bejanin a, chetelat g,
han t, lu j, wang q, yu c, zhang x, zhou y, zhang x, jiang t, liu y, han y.
asaf: altered spontaneous activity fingerprinting in alzheimer s disease
based on multisite fmri. science bulletin. 2019; 64(14):998-1010.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l3 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>8.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yang w, liu y, tu z, xiao c,
yan s, ma x, guo x, chen x, yin p, yang z, yang s, jiang t, li s, qin c, li
xj. crispr/cas9-mediated pink1 deletion leads to neurodegeneration in rhesus
monkeys. cell res. 2019; 29(4):334-6.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l3 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>9.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhang df, fan y, xu m, wang
g, wang d, li j, kong ll, zhou h, luo r, bi r, wu y, li gd, li m, luo xj,
jiang hy, tan l, zhong c, fang y, zhang c, sheng n, jiang t, yao yg.
complement c7 is a novel risk gene for alzheimer's disease in han chinese.
natl sci rev. 2019; 6(2):257-74.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo6;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>10.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>wei wang, wei gao, zhanyi hu,
effectively modeling piecewise planar urban scenes based on structure priors
and cnn , science china-information sciences, vol. 62, no. 2, pp. 29102,
2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l3 level1 lfo6;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>11.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jianhua tao, jian huang, ya li,
zheng lian, mingyue niu, semi-supervised ladder networks for speech emotion
recognition , international journal of automation and computing, vol.16 no.4,
august 2019, 437-448</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l3 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>12.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�x[*��r�e�v��^ns�)nck�h�f_l�q� n�y�w�nws�y^y�~q�~�v�z0r�z��r�y�e�l �
0�o�syt0�</span><span
lang=en-us>2019</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>t^</span><span lang=en-us>4</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>g�</span><span lang=en-us>vol.35</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>no.4</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l3 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>13.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>v�</span><span
style='font-family:�[so'>�^ns<span lang=en-us>,</span>hgfim<span lang=en-us>*,</span>�s�_o�<span
lang=en-us>,</span>�ssfz<span lang=en-us>,</span>��n<span lang=en-us>,</span>jl�nwm<span
lang=en-us>,</span>�f,</span>�sޘ<span lang=en-us>,</span>�s�~��<span
lang=en-us>,</span>r�e<span lang=en-us>,</span>��_^<span lang=en-us>,</span>xo*�<span
lang=en-us>,</span>h��e�b</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us> </span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�ẽag�nn�v^��c�_y�s��6q�n�nkb/g�s�x</span><span
lang=en-us> </span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�
0o��nf[�b0�</span><span lang=en-us>2019</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>t^,{</span><span lang=en-us>10</span><span style='font-family:
�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>g
0�6q�n:g�n�n�eۏu\n
r0</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l3 level1 lfo6;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>14.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>hgfim</span><span
lang=en-us>*</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�v��^ns�</span><span style='font-family:
�[so'> y!j`�n:g�[��n�n_f[`n���ra�s͑�� </span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�
0mr�l�yf[0�
0mr�l�yf[</span><span
lang=en-us>-</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�n�]zf��n��0</span><span lang=en-us>2019</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>t^,{</span><span lang=en-us>2</span><span style='font-family:
�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>g</span></p>
<p class=af0><b><span lang=en-us><o:p> </o:p></span></b></p>
<p class=af0 style='mso-outline-level:2'><a name="_toc40189720"></a><a
name="_toc24098153"></a><a name="_toc532562239"></a><a name="_toc532391208"><span
style='mso-bookmark:_toc532562239'><span style='mso-bookmark:_toc24098153'><span
style='mso-bookmark:_toc40189720'><b><span style='font-size:12.0pt;
mso-bidi-font-size:11.0pt;line-height:110%;font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�ve�o��</span></b></span></span></span></a><a
name="_toc24465902"></a><a name="_toc24361010"></a><a name="_toc532562409"></a><span
style='mso-bookmark:_toc532562409'><span style='mso-bookmark:_toc24361010'><span
style='mso-bookmark:_toc24465902'><span style='mso-bookmark:_toc40189720'><b><span
lang=en-us style='font-size:12.0pt;mso-bidi-font-size:11.0pt;line-height:
110%'>international conferences</span></b></span></span></span></span><b><span
lang=en-us style='font-size:12.0pt;mso-bidi-font-size:11.0pt;line-height:
110%'><o:p></o:p></span></b></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189721"></a><a
name="_toc24098154"></a><a name="_toc532562240"></a><a name="_toc532391209"><span
style='mso-bookmark:_toc532562240'><span style='mso-bookmark:_toc24098154'><span
style='mso-bookmark:_toc40189721'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>���{:gɖɉ</span></b></span></span></span></a><a
name="_toc24465903"></a><a name="_toc24361011"></a><a name="_toc532562410"><span
style='mso-bookmark:_toc24361011'><span style='mso-bookmark:_toc24465903'><span
style='mso-bookmark:_toc40189721'><b><span lang=en-us>computer vision</span></b></span></span></span></a><b><span
lang=en-us><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yong zhang, baoyuan wu,
weiming dong, zhifeng li, wei liu, bao-gang hu, qiang ji, joint
representation and estimator learning for facial action unit intensity
estimation. ieee conference on computer vision and pattern recognition
(cvpr), pp. 3457-3466, 2019, long beach, ca, usa.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yi huang, xiaoshan yang,
changsheng xu: time-guided high-order attention model of longitudinal
heterogeneous healthcare data. pacific rim international conference on
artificial intelligence (pricai): pp. 57-70, cuvu, yanuca island, fiji,
august 26-30, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>wenkai dong, zhaoxiang zhang,
tieniu tan, attention-aware sampling via deep reinforcement learning for
action recognition, the thirty-third aaai conference on artificial
intelligence, aaai 2019, january 27-february 1, 2019, hawaii, usa.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>4.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qing en, lijuan duan,
zhaoxiang zhang, xiang bai, yundong zhang, human-like delicate region
erasing strategy for weakly supervised detection, the thirty-third aaai
conference on artificial intelligence, aaai 2019, january 27-february 1,
2019, hawaii, usa.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>5.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yong zhao, shibiao xu, shuhui
bu, hongkai jiang, pengcheng han, gslam: a general slam framework and
benchmark, proc. ieee international conference on computer vision, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>6.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yiqun wang, jianwei guo,
dong-ming yan, kai wang, and xiaopeng zhang. a robust local spectral
descriptor for matching non-rigid shapes with incompatible shape structures.
ieee conference on computer vision and pattern recognition (cvpr), pp.
6231-6240, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>7.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhao yang, qiang wang, luca
bertinetto, weiming hu, song bai, and philip h. s. torr, anchor diffusion
for unsupervised video object segmentation, ieee international conference on
computer vision (iccv), pp. 931-940</span><span style='font-family:�[so;
mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>8.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qiang wang, li zhang, luca
bertinetto, weiming hu, and philip h.s. torr, fast online object tracking
and segmentation: a unifying approach, ieee conference on computer vision
and pattern recognition (cvpr), pp. 1328-1338, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>9.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yufan liu, jiajiong cao, bing
li, chunfeng yuan, weiming hu, yangxi li and yunqiang duan, knowledge
distillation via instance relationship graph, ieee conference on computer
vision and pattern recognition (cvpr), pp. 7096-7104, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>10.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>liang sun, bing li, chunfeng
yuan, zhengjun zha and weiming hu, multimodal semantic attention network for
video captioning, ieee intermational conference on multimedia and
expo(icme), pp. 1300-1305, 2019.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>11.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jinxu liu, wei gao, zhanyi
hu, visual-inertial odometry tightly coupled with wheel encoder adopting
robust initialization and online extrinsic calibration , proc. ieee/rsj
international conference on intelligent robots and systems, pp. 5391-5397,
november 2019, macau, china.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>12.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jiali han, shuhan shen.
distributed surface reconstruction from point cloud for city-scale scenes,
international conference on 3d vision, 3dv 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>13.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yang zhou, shuhan shen,
zhanyi hu. active semantic labeling of street view point clouds.
international conference on multimedia and expo, icme 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>14.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>tianxin shi, shuhan shen,
xiang gao, lingjie zhu. visual localization using sparse semantic 3d map.
international conference on image processing, icip 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>15.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>fulin tang, heping li, yihong
wu. fmd stereo slam: fusing mvg and direct formulation towards accurate and
fast stereo slam. icra, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>16.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lang wu, yihong wu.
similarity hierarchy based place recognition by deep supervised hashing for
slam. iros 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>17.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiaomei zhao and yihong wu.
automatic motion-blurred hand matting for human soft segmentation in videos.
icip 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>18.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zikun liu, chunyang li,
yinglu liu, zifeng lian, yihong wu. self-supervised classification assisted
segmentation network for human parsing. icip 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>19.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zichang tan, yang yang, jun
wan, guodong guo, stan z. li, "deeply-learned hybrid representation for
facial age estimation", ijcai, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>20.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>ajian liu, jun wan, sergio
escalera, hugo jair escalante, zichang tan, qi yuan, kai wang, chi lin,
guodong guo, isabelle guyon, stan z. li, "multi-modal face anti-spoo�ng
attack detection challenge at cvpr2019", cvpr workshop, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>21.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>shifeng zhang, xiaobo wang,
ajian liu, chenxu zhao, jun wan, sergio escalera, hailin shi, zezheng wang,
stan z. li, "a dataset and benchmark for large-scale multi-modal face
anti-spoofing", cvpr, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>22.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lu zhang, xiangyu zhu,
xiangyu chen, xu yang, zhen lei, zhiyong liu.weakly aligned cross-modal
learning for multispectral pedestrian detection,proceedings of the ieee
international conference on computer vision (iccv). 2019: 5127-5137.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>23.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhiwei liu*, xiangyu zhu*,
guosheng hu, haiyunguo, ming tang, zhen lei, neil m. robertson, jinqiao wang.
semantic alignment: finding semantically consistent ground-truth for facial
landmark detection,in proceedings of the ieee conference on computer vision
and pattern recognition (cvpr). 2019: 3467-3476.(*co-first author)</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>24.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hao liu, xiangyu zhu, zhen
lei, stan z. li.adaptiveface: adaptive margin and sampling for face
recognition,in proceedings of the ieee conference on computer vision and
pattern recognition (cvpr). 2019: 3467-3476.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>25.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jianzhuguo, xiangyu zhu,
jinchuan xiao, zhen lei, genxun wan, stan z li.improving face anti-spoofing
by 3d virtual synthesis,iapr international conference on biometrics (icb),
2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>26.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>chubin zhuang, shifeng zhang,
zhen lei, xiangyu zhu, jinqiao wang. fldet: a cpu real-time joint face and
landmark detector, iapr international conference on biometrics (icb), 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>27.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>sufang zhang, qinghai miao,
xiangyu zhu, yingying chen, zhen lei, jinqiao wang.pose-weighted gan for
photorealistic face frontalization,ieee international conference on image
processing (icip). ieee, 2019: 2384-2388.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>28.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lei ju, xiangyu zhu, zhen
lei, xinfang cui, wankou yang, changyin sun.dense facial landmark
localization: database and annotation tool, 34rd youth academic annual
conference of chinese association of automation (yac). ieee, 2019: 625-630.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>29.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jinchuan xiao, yinhang tang,
jianzhuguo, yang yang, xiangyu zhu, zhen lei, stan z li.3dma: a
multi-modality 3d mask face anti-spoofing database, 16th ieee international
conference on advanced video and signal based surveillance (avss). 2019: 1-8.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>30.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yinglu liu, hao shen, yue si,
xiaobo wang, xiangyu zhu, hailin shi, et al. grand challenge of 106-point
facial landmark localization, in 2019 ieee international conference on
multimedia and expo (icme) workshop. 2019.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>31.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>guanan wang, tianzhu zhang,
jian cheng, si liu, yang yang, zengguang hou. rgb-infrared
cross-modality person re-identification via joint pixel and feature
alignment. iccv, pp. 4321-4330, october 2019, seoul, korea</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>32.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>fanrong li, zitao mo, peisong
wang, zejian liu, jiayun zhang, gang li, qinghao hu, xiangyu he, cong leng,
yang zhang, jian cheng. a system-level solution for low-power object
detection. iccv 2019 workshop on low-power computer vision, october 2019,
seoul, korea</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>33.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiangyu he, zitao mo, peisong
wang, yang liu, mingyuan yang, jian cheng. ode-inspired network design
for single image super-resolution. cvpr, pp. 1732-1741, june 2019, long
beach, ca, usa.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>34.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lei shi, yifan zhang, jian
cheng, hanqing lu. skeleton-based action recognition with directed graph
neural networks. cvpr, pp. 7912-7921 , june 2019, long beach, ca, usa</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>35.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lei shi, yifan zhang, jian
cheng, hanqing lu. two-stream adaptive graph convolutional networks for
skeleton-based action recognition. cvpr, pp. 12026-12035 , june 2019, long
beach, ca, usa</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>36.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jun fu, jing
liu, haijie tian, yong li, yongjun bao, zhiwei
fang, hanqing lu, dual attention network for scene
segmentation. cvpr, pp. 3146-3154 , june 2019, long beach, ca, usa </span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>37.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiangyu he, peisong wang,
jian cheng. k-nearest neighbors hashing. cvpr, pp. 2839-2848, june 2019,
long beach, ca, usa</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>38.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jun fu, jing
liu, yuhang wang, yong li, yongjun bao, jinhui
tang, hanqing lu, adaptive context network for scene parsing.iccv, pp.
6748-6757, october 2019, seoul, korea</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>39.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>linyu zheng, ming
tang, jinqiao wang, hanqing lu, learning features with
differentiable closed-form solver for tracking, iccv, pp. 4020-4029, october
2019, seoul, korea</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>40.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>longteng guo, jing
liu, peng yao, jiangwei li, hanqing lu, mscap: multi-style
image captioning with unpaired stylized text. cvpr, pp. 4204-4213 , june
2019, long beach, ca, usa </span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>41.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhiwei liu, xiangyu
zhu, guosheng hu, haiyun guo, ming tang, zhen
lei, neil martin robertson, jinqiao wang,
semantic alignment: finding semantically consistent ground-truth for facial landmark detection. cvpr,
pp. 3467-3476 , june 2019, long beach, ca, usa </span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>42.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xingan ma, kuan
zhu, haiyun guo, jinqiao wang, min huang, qinghai miao,
vehicle re-identification with refined part model. icme
workshops, pp.603-606, july 2019, shanghai, china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>43.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yongcheng liu, bin fan,
shiming xiang, chunhong pan: relation-shape convolutional neural network for
point cloud analysis. cvpr 2019: 8895-8904, long beach, california, usa, june
16-20, 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>44.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yongcheng liu, bin fan, gaofeng
meng, jiwen lu, shiming xiang, chunhong pan. densepoint: learning densely
contextual representation for efficient point cloud processing, ieee
international conference on computer vision 2019, pp. 5239-5248, october
27-november 2, 2019, seoul, korea.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>45.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>nuo xu, chunlei huo, chunhong
pan: adaptive brightness learning for active object recognition. ieee
international conference on acoustics, speech and signal processing (icassp),
2019: 2162-2166, 12 - 17 may, 2019, brighton, uk.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>46.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>he huang, chunlei huo, feilong
wei, chunhong pan: rotation and scale-invariant object detector for high
resolution optical remote sensing images. ieee international geoscience and
remote sensing symposium(igarss), 2019: 1386-1389, july 28 - august 2, 2019,
yokohama, japan</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>47.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qiaozhe li, xin zhao, ran he
and kaiqi huang, visual-semantic graph reasoning for pedestrian attribute
recognition, proc. aaai conference on artificial intelligence, pp.
8634-8641, january 2019, hawaii, usa.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>48.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>weining wang, yan huang and
liang wang, language-driven temporal activity localization: a semantic
matching reinforcement learning model, proc. ieee conference on computer
vision and pattern recognition, pp. 334-343, june 2019, long beach, ca, usa.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>49.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>chunfeng song, yan huang,
wanli ouyang and liang wang, box-driven class-wise region masking and
filling rate guided loss for weakly supervised semantic segmentation, proc.
ieee conference on computer vision and pattern recognition, pp. 3136-3145,
june 2019, long beach, ca, usa.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>50.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>chenyang si, wentao chen, wei
wang, liang wang and tieniu tan, an attention enhanced graph convolutional
lstm network for skeleton-based action recognition, proc. ieee conference on
computer vision and pattern recognition, pp. 1227-1236, june 2019, long
beach, ca, usa.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>51.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xuecai hu, haoyuan mu,
xiangyu zhang, zilei wang, tieniu tan and jian sun, meta-sr: a
magnification-arbitrary network for super-resolution, proc. ieee conference
on computer vision and pattern recognition, pp. 1575-1584, june 2019, long
beach, ca, usa.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>52.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jian liang, ran he, zhenan
sun and tieniu tan, distant supervised centroid shift: a simple and
efficient approach to visual domain adaptation, proc. ieee conference on
computer vision and pattern recognition, pp. 2975-2984, june 2019, long
beach, ca, usa.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>53.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yan huang and liang
wang, acmm: aligned cross-modal memory for few-shot image and sentence
maching, proc. ieee international conference on computer vision, october
2019, seoul, korea.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>54.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>peipei li, xiang wu, yibo hu,
ran he and zhenan sun, m2fpa: a multi-yaw multi-pitch high-quality database
and benchmark for facial pose analysis, proc. ieee international conference
on computer vision, october 2019, seoul, korea.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>55.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>kai niu, yan huang and liang
wang, "fusing two directions in cross-domain adaption for real life
person search by language, proc. ieee international conference on computer
vision workshop, october 2019, seoul, korea.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>56.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hongyuan yu, chengquan zhang,
xuan li, junyu han, errui ding, liang wang, an end-to-end video text
detector with online tracking, proc. international conference on document
analysis and recognition (icdar), september 2019, sydney, australia.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>57.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lingxiao he, yinggang wang,
wu liu, he zhao, zhenan sun, jiashi feng, foreground-aware pyramid
reconstruction for alignment-free occluded person re-identification, proc.
ieee international conference on computer vision, pp. 8450-8459, october
2019, seoul, korea..</span></p>
<p class=af0><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189722"></a><a
name="_toc24098155"></a><a name="_toc532562241"></a><a name="_toc532391210"><span
style='mso-bookmark:_toc532562241'><span style='mso-bookmark:_toc24098155'><span
style='mso-bookmark:_toc40189722'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>!j_ƌ r</span></b></span></span></span></a><a
name="_toc24465904"></a><a name="_toc24361012"></a><a name="_toc532562411"><span
style='mso-bookmark:_toc24361012'><span style='mso-bookmark:_toc24465904'><span
style='mso-bookmark:_toc40189722'><b><span lang=en-us>pattern recognition</span></b></span></span></span></a><b><span
lang=en-us><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>58.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>ting-bing xu, cheng-lin liu,
data-distortion guided self-distillation for deep neural networks, proc. 33th
aaai, honolulu, hawaii, usa, jan. 27-feb. 1, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>59.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiaobing wang, yingying
jiang, zhenbo luo, cheng-lin liu, hyunsoo choi, sungjin kim, arbitrary shape
scene text detection with adaptive text region representation, cvpr 2019,
long beach, ca, june 16-20, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>60.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hongwei pang, peipei yang,
xiaolin chen, yong wang, cheng-lin liu, insect recognition under natural
scenes using r-fcn with anchor boxes estimation, icig 2019, beijing, china,
august 23-25, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>61.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiaolong yun, yan-ming zhang,
jun-yu ye, cheng-lin liu, online handwritten diagram recognition with graph
attention networks, icig 2019, beijing, china, august 23-25, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>62.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>nibal nayef, cheng-lin liu,
jean-marc ogier, icdar2019 robust reading challenge on multi-lingual scene
text detection and recognition rrc-mlt-2019, proc. 15th icdar, sydney,
australia, september 20-25, 2019, pp.1582-1587.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>63.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>cu vinh loc, jean-christophe
burie, jean-marc ogier, cheng-lin liu, hiding security feature into text
context for securing documents using generated font, proc. 15th icdar,
sydney, australia, september 20-25, 2019, pp.1214-1219.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>64.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>cu vinh loc, jean-christophe
burie, jean-marc ogier, cheng-lin liu, a robust data hiding scheme using
generated content for securing genuine documents, proc. 15th icdar, sydney,
australia, september 20-25, 2019, pp.787-792.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>65.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiang ao, xu-yao zhang,
hong-ming yang, fei yin, cheng-lin liu, cross-modal prototype learning for
zero-shot handwriting recognition, proc. 15th icdar, sydney, australia,
september 20-25, 2019, pp.589-594.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>66.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiao-hui li, fei yin, tao
xue, long liu, jean-marc ogier, cheng-lin liu, instance aware document image
segmentation using label pyramid networks and deep watershed transformation,
proc. 15th icdar, sydney, australia, september 20-25, 2019, pp.514-519.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>67.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yi-kang zhang, heng zhang,
yong-ge liu, qing yang, cheng-lin liu, oracle character recognition by
nearest neighbor classification with deep metric learning, proc. 15th icdar,
sydney, australia, september 20-25, 2019, pp.309-314.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>68.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yue xu, fei yin, da-han wang,
xu-yao zhang, zhaoxiang zhang, cheng-lin liu, casia-ahcdb: a large-scale
chinese ancient handwritten characters database, proc. 15th icdar, sydney,
australia, september 20-25, 2019, pp.793-798.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>69.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>miao zhao, rui-qi wang, fei
yin, xu-yao zhang, lin-lin huang, jean-marc ogier, fast text/non-text image
classification with knowledge distillation, proc. 15th icdar, sydney,
australia, september 20-25, 2019, pp.1458-1463.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>70.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>wei feng, wenhao he, fei yin,
xu-yao zhang, cheng-lin liu, textdragon: an end-to-end framework for
arbitrary shaped text spotting, iccv 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>71.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>shi yan, wei feng, peng zhao,
cheng-lin liu, progressive scale expansion network with octave convolution
for arbitrary shape scene text detection, acpr 2019, aukland, new zealand.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>72.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jian-hui chen, zuo-ren wang,
cheng-lin liu, accelerating bag-of-words with som, iconip 2019, australia.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>73.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>bingning wang,zhixing
tian,kang liu,jun zhao,ting yao,qi zhang and jingfang xu. document gated
reader for open domain question answering, international acm sigir conference
on research and development in information retrieval</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>sigir 2019),pp. 85-94 ,2019,
paris,france</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>74.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>shizhu he,kang liu and
weiting an. learning to align question and answer utterances in customer
service conversation with recurrent pointer networks, the thirty-third aaai
conference on artificial intelligence (aaai 2019),pp 134-141,2019,
hawaii,america</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>75.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jian liu, yubo chen and kang
liu. exploiting the ground-truth: an adversarial imitation based knowledge
distillation approach for event detection, the thirty-third aaai conference
on artificial intelligence (aaai 2019),pp 6754-6761,2019, hawaii,america</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>76.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>cao liu, shizhu he, kang liu
and jun zhao. vocabulary pyramid network: multi-pass encoding and decoding
with multi-level vocabularies for response generation, proceedings of the
58th annual meeting of the association for computational linguistics(acl
2019),pp 3774-3783,2019, florence, italy</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>77.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiang zhang, shizhu he, kang
liu and jun zhao. adansp: uncertainty-driven adaptive decoding in neural
semantic parsing, proceedings of the 58th annual meeting of the association
for computational linguistics(acl 2019),pp 4265 4270,2019, florence, italy</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>78.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>dianbo sui,yubo chen,kang
liu,jun zhao and shengping liu. leverage lexical knowledge for chinese named
entity recognition via collaborative graph network, proceedings of the 2019
conference on empirical methods in natural language processing(emnlp 2019),pp
3828-3838,2019, hongkong,china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>79.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jian liu, yubo chen, kang liu
and jun zhao. neural cross-lingual event detection with minimal parallel
resources, proceedings of the 2019 conference on empirical methods in natural
language processing(emnlp 2019),pp 738-748,2019, hongkong,china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>80.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>cao liu, shizhu he, kang liu,
zaiqing nie and jun zhao. generating questions for knowledge bases via
incorporating diversified contexts and answer-aware loss, proceedings of the
2019 conference on empirical methods in natural language processing(emnlp
2019),pp 2431-2441,2019, hongkong,china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>81.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiangrong zeng, shizhu he,
daojian zeng, kang liu and jun zhao. learning the extraction order of
multiple relational facts in a sentence with reinforcement learning,
proceedings of the 2019 conference on empirical methods in natural language
processing(emnlp 2019),pp 367-377,2019, hongkong,china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>82.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>delai qiu, yuanzhe zhang,
xinwei feng, xiangwen liao, wenbin jiang, yajuan lyu, kang liu and jun zhao.
machine reading comprehension using structural knowledge graph-aware network,
proceedings of the 2019 conference on empirical methods in natural language
processing(emnlp 2019),pp 5895-5900,2019, hongkong,china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>83.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>cao liu, shizhu he, kang liu,
zaiqing nie and jun zhao. incorporating interlocutor-aware context into
response generation on multi-party chatbots, the signll conference on
computational natural language learning(conll 2019 ),pp 718-727,2019,
hongkong,china</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>84.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yang yang, zhen lei; jinqiao
wang, stan z. li. in defense of color names for small-scale person
re-identification, the 12th iapr international conference on biometrics,
2019, crete, greece.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>85.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yaping zhang, shuai nie,
wenju liu, xing xu, dongxiang zhang, heng tao shen, sequence-to-sequence
domain adaptation network for robust text image recognition, in the
proceedings of cvpr2019(2019, june 16-20<span style='mso-spacerun:yes'>�
</span>2019, long beach, ca, usa, 2740-2749.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>86.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yunze gao, yingying
chen, jinqiao wang, hanqing lu, gate-based bidirectional
interactive decoding network for scene text recognition. cikm, pp.
2273-2276, november 2019, beijing, china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>87.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhiwei liu, ming
tang, guosheng hu, jinqiao wang,
learning discriminative and complementary patches for face
recognition. fg, pp. 1-7, may, 2019, lille, france</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>88.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yaping zhang, shuai nie,
wenju liu, xing xu, dongxiang zhang, heng tao shen, sequence-to-sequence
domain adaptation network for robust text image recognition, in the
proceedings of cvpr2019(2019, june 16-20<span style='mso-spacerun:yes'>�
</span>2019, long beach, ca, usa, 2740-2749.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>89.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiang wu, huaibo huang,
vishal patel, ran he and zhenan sun, disentangled variational representation
for heterogeneous face recognition, proc. aaai conference on artificial
intelligence, pp. 9005-9012, january 2019, hawaii, usa.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>90.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yan huang, yang long, and
liang wang, few-shot image and sentence matching via gated visual-semantic
embedding, proc. aaai conference on artificial intelligence, pp. 8489-8496,
january 2019, hawaii, usa.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>91.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>linsen song, jie cao,
lingxiao song, yibo hu and ran he, geometry-aware face completion and
editing, proc. aaai conference on artificial intelligence, january 2019,
hawaii, usa.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>92.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>caiyong wang, yong he, yunfan
liu, zhaofeng he, ran he and zhenan sun, sclerasegnet: an improved u-net
model with attention for accurate sclera segmentation, proc. international
conference on biometrics, june 2019, crete, greece.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>93.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>min ren, caiyong wang,
yunlong wang, zhenan sun and tieniu tan, alignment free and distortion
robust iris recgnition, proc. international conference on biometrics, june
2019, crete, greece. </span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>94.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yunfan liu, qi li and zhenan
sun, attribute-aware face aging with wavelet-based generative adversarial
networks, proc. ieee conference on computer vision and pattern recognition,
pp. 11877-11886, june 2019, long beach, ca, usa.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>95.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jianze wei, yunlong wang,
xiang wu, zhaofeng he, ran he, zhenan sun, cross-sensor iris recognition
using adversarial strategy and sensor-specific information, proc. ieee
international conference on biometrics: theory, applications, and systems,
september 2019, tampa, florida.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>96.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>tianxiang ma, bo peng, wei
wang, jing dong, "any-to-one face reenactment based on conditional
generative adversarial network," proc. asia-pacific signal and
information processing association annual summit and conference, pp.
1657-1664, november 2019, lanzhou, china.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>97.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zerui chen, yan huang, liang
wang, learning depth-aware heatmaps for 3d human pose estimation in the
wild, proc. british machine vision conference (bmvc), september 2019,
cardiff, wales, uk..</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>98.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>wu zheng, lin li, zhaoxiang
zhang, yan huang, and liang wang, relational network for skeleton-based
action recognition, proc. ieee international conference on multimedia and
expo (icme), july 2019, shanghai, china.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>99.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jun bai, yi zeng, yuxuan zhao
and feifei zhao. training a v1 like layer using gabor filters in
convolutional neural networks. proceedings of the 2019 international joint
conference on neural networks (ijcnn 2019). budapest, hungary, july 14-19,
2019.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>100.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>dongcheng zhao, yi zeng.
dynamic fusion of convolutional features based on spatial and temporal
attention for visual tracking. proceedings of the 2019 international joint
conference on neural networks (ijcnn 2019). budapest, hungary, july 14-19,
2019.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:0cm;
mso-char-indent-count:0'><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189723"></a><a
name="_toc24098156"></a><a name="_toc532562242"></a><a name="_toc532391211"><span
style='mso-bookmark:_toc532562242'><span style='mso-bookmark:_toc24098156'><span
style='mso-bookmark:_toc40189723'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�n�]zf��</span></b></span></span></span></a><a
name="_toc24465905"></a><a name="_toc24361013"></a><a name="_toc532562412"><span
style='mso-bookmark:_toc24361013'><span style='mso-bookmark:_toc24465905'><span
style='mso-bookmark:_toc40189723'><b><span lang=en-us>artificial intelligence</span></b></span></span></span></a><b><span
lang=en-us><o:p></o:p></span></b></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>101.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>huaiyu li, weiming dong, xing
mei, chongyang ma, feiyue huang, bao-gang hu, lgm-net: learning to generate
matching networks for few shot learning, international conference on machine
learning (icml), pp. 3825-3834, 2019, long beach, ca, usa.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>102.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhe li, jian
cheng. training binary-valued gates lstm. icdar 2019</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>103.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhe li, peisong
wang, hanqing lu, jian cheng, reading selectively via binary input
gated recurrent unit. ijcai, pp. 5074-5080, august 2019, macao,
china</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>104.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>guanan wang, yang yang, jian
cheng, jinqiao wang, zengguang hou. color-sensitive persong
re-identification. ijcai, pp. 933-939, august 2019, macao, china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>105.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>fei liu, jing
liu, zhiwei fang, richang hong, hanqing lu, densely connected
attention flow for visual question answering. ijcai, pp. 869-875,
august 2019, macao, china</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>106.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>shen fang, qi zhang, gaofeng
meng, shiming xiang, chunhong pan: gstnet: global spatial-temporal network
for traffic flow prediction. ijcai 2019: 2286-2293, august 10-16 2019, macao,
china</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>107.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yukang chen, gaofeng meng,
qian zhang, shiming xiang, chang huang, lisen mu, and xinggang wang: renas:
reinforced evolutionary neural architecture search. cvpr 2019: 4787-4796,
long beach, california, usa, june 16-20, 2019</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>108.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jianlong chang, xinbang
zhang, yiwen guo, gaofeng meng, shiming xiang, chunhong pan. data:
differentiable architecture approximation, thirty-third conference on neural
information processing systems, vancouver canada, dec 8-dec-12, 2019. pp.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>109.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yukang chen, tong yang,
xiangyu zhang, gaofeng meng, xinyu xiao, jian sun. detnas: backbone search
for object detection, vancouver canada, dec 8-dec-12, 2019. pp.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>110.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>fenyu hu, yanqiao zhu, shu
wu, liang wang, tieniu tan, hierarchical graph convolutional networks for
semi-supervised node classification, proc. international joint conference on
artificial intelligence (ijcai), pp. 4532-4539, august 2019, macao, china.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>111.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qiaozhe li, xin zhao, ran he,
kaiqi huang, pedestrian attribute recognition by joint visual-semantic
reasoning and knowledge distillation, proc. international joint conference
on artificial intelligence (ijcai), pp. 833-839, august 2019, macao, china.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>112.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>weikuo guo, huaibo huang,
xiangwei kong, ran he, learning disentangled representation for cross-modal
retrieval with deep mutual information estimation, proc. acm international
conference on multimedia, october 2019, nice, france.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>113.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>chaoyou fu, liangchen song,
xiang wu, guoli wang, ran he, neurons merging layer: towards progressive
redundancy reduction for deep supervised hashing, proc. international joint
conference on artificial intelligence, august 2019, macao, china.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>114.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>junchi yu, jie cao, yi li,
xiaofei jia, ran he, pose-preserving cross spectral face hallucination,
proc. international joint conference on artificial intelligence, august
2019, macao, china.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:0cm;
mso-char-indent-count:0'><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189724"><b><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman";color:black;mso-themecolor:text1'>:ghvf[`n</span><span
lang=en-us style='color:black;mso-themecolor:text1'>machine learning</span></b></a><b><span
lang=en-us style='color:black;mso-themecolor:text1'><o:p></o:p></span></b></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>115.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zeyu cui, zekun li, shu wu,
xiaoyu zhang, liang wang, dressing as a whole: outfit compatibility learning
based on node-wise graph neural networks, proc. the web conference, pp.
307-317, may 2019, san francisco, usa.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>116.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yi li, huaibo huang, junchi
yu, ran he and tieniu tan, cosmetic-aware makeup cleanser, proc. ieee
international conference on biometrics: theory, applications, and systems,
september 2019, tampa, florida, usa.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>117.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>chaoyou fu, xiang wu, yibo
hu, huaibo huang and ran he, dual variational generation for low shot
heterogeneous face recognition, proc. conference on neural information
processing systems (nips), december 2019, vancouver, canada.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>118.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qiyue yin, qingming li, junge
zhang, shu wu, "multi-view clustering via adversarial view embedding and
adaptive view fusion, proc. acm international conference on information and
knowledge management (cikm), november 2019, beijing china.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:0cm;
mso-char-indent-count:0'><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:3'><a name="ole_link2"></a><a
name="_toc40189725"><span style='mso-bookmark:ole_link2'><b><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman";color:black;mso-themecolor:text1'>pencc�c</span><span
lang=en-us style='color:black;mso-themecolor:text1'>data mining</span></b></span></a><span
style='mso-bookmark:ole_link2'><b><span lang=en-us style='color:black;
mso-themecolor:text1'><o:p></o:p></span></b></span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><span
style='mso-bookmark:ole_link2'><a name="_hlk11231759"><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>119.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>shu wu, yuyuan tang, yanqiao
zhu, liang wang, xing xie, tieniu tan, session-based recommendation with
graph neural network, proc. aaai conference on artificial intelligence, pp.
346-353, january 2019, hawaii, usa.</span></a></span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><span
style='mso-bookmark:ole_link2'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>120.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qiang cui, yuyuan tang, shu
wu and liang wang, "distance2pre: personalized spatial
preference for next point-of-interest prediction, proc. pacific-asia
conference on knowledge discovery and data mining (pakdd), pp. 289-301, april
2019, macau, china.</span></span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><span
style='mso-bookmark:ole_link2'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>121.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zekun li, zeyu cui, shu wu,
xiaoyu zhang, liang wang, fi-gnn: modeling feature interactions via graph neural
networks for ctr prediction, proc. acm international conference on
information and knowledge management (cikm), november 2019, beijing,
china.</span></span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><span
style='mso-bookmark:ole_link2'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>122.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jingyi wang, qiang liu,
zhaocheng liu, shu wu, towards accurate and interpretable sequential
prediction: a cnn & attention-based feature extractor, proc. acm
international conference on information and knowledge management (cikm),
november 2019, beijing china.</span></span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><span
style='mso-bookmark:ole_link2'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>123.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zekun li, zeyu cui, shu
wu, xiaoyu zhang, liang wang, semi-supervised compatibility learning across
categories for clothing matching, proc. ieee international conference on
multimedia and expo (icme), july 2019, shanghai, china.</span></span></p>
<p class=af0><s><span lang=en-us><o:p><span style='text-decoration:none'> </span></o:p></span></s></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189726"></a><a
name="_toc24098157"></a><a name="_toc532562245"></a><a name="_toc532391214"><span
style='mso-bookmark:_toc532562245'><span style='mso-bookmark:_toc24098157'><span
style='mso-bookmark:_toc40189726'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>���{:g�vb_f[</span></b></span></span></span></a><a
name="_toc24465906"></a><a name="_toc24361014"></a><a name="_toc532562415"><span
style='mso-bookmark:_toc24361014'><span style='mso-bookmark:_toc24465906'><span
style='mso-bookmark:_toc40189726'><b><span lang=en-us>computer graphics</span></b></span></span></span></a><b><span
lang=en-us><o:p></o:p></span></b></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>124.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yu song, fan tang, weiming
dong, feiyue huang, changsheng xu, balance-based photo posting, siggraph
asia (posters), november 17-20, 2019, brisbane, qld, australia.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>125.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yiqun wang, jianwei guo, jun
xiao, and dong-ming yan. 2019. a wavelet energy decomposition signature for
robust non-rigid shape matching. acm siggraph asia 2019 posters.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>126.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhengda, lu and guo, jianwei
and xiao, jun and wang, ying and zhang, xiaopeng and yan, dong-ming. feature
curve network extraction via quadric surface fitting, pacific graphics short
papers, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>127.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qiang yu, wei sui, ying wang,
shiming xiang, chunhong pan. incremental poisson surface reconstruction for
large scale three-dimensional modeling, in proceedings of the 2nd chinese
conference on pattern recognition and computer vision, lncs 11859, pp.
442 453, 2019.</span></p>
<p class=af0><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189727"></a><a
name="_toc24098158"><span style='mso-bookmark:_toc40189727'><b><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>y�zso���{</span></b></span></a><a name="_toc24465907"></a><a
name="_toc24361015"><span style='mso-bookmark:_toc24465907'><span
style='mso-bookmark:_toc40189727'><b><span lang=en-us>multimedia computing</span></b></span></span></a><b><span
lang=en-us><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>128.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>wenhua meng, shan zhang,
xudong yao, xiaoshan yang, changsheng xu, and xiaowen huang: biomedia acm mm
grand challenge 2019: using data enhancement to solve sample unbalance. acm
international conference on multimedia (mm): pp. 2588-2592, nice, france,
october 21-25, 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>129.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yaoyu li, hantao yao, lingyu
duan, hanxing yao, changsheng xu, adaptive feature fusion via graph neural
network for person re-identification . acm multimedia 2019: 2115-2123</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>130.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>shaobo min, hantao yao,
hongtao xie, zheng-jun zha, yongdong zhang, domain-specific embedding
network for zero-shot recognition . acm multimedia 2019: 2070-2078</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>131.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>junyu gao, tianzhu zhang,
changsheng xu: i know the relationships: zero-shot action recognition via
two-stream graph convolutional networks and knowledge graphs. aaai 2019:
8303-8311</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>132.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>junyu gao, tianzhu zhang,
changsheng xu: graph convolutional tracking. cvpr 2019: 4649-4659</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>133.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xinhong ma, tianzhu zhang,
changsheng xu: gcan: graph convolutional adversarial network for unsupervised
domain adaptation. cvpr 2019: 8266-8276</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>134.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xuan ma, bing-kun bao,
lingling yao, changsheng xu: multimodal latent factor model with language
constraint for predicate detection. icip 2019: 4454-4458</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>135.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiaowen huang, quan fang,
shengsheng qian, jitao sang, yan li, changsheng xu: explainable
interaction-driven user modeling over knowledge graph for sequential
recommendation. acm multimedia 2019: 548-556</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>136.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yingying zhang, shengsheng
qian, quan fang, changsheng xu: multi-modal knowledge-aware hierarchical
attention network for explainable medical question answering. acm multimedia
2019: 1089-1097</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>137.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jun hu, shengsheng qian, quan
fang, changsheng xu: hierarchical graph semantic pooling network for
multi-modal community question answer matching. acm multimedia 2019:
1157-1165</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>138.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>huaiwen zhang, quan fang,
shengsheng qian, changsheng xu: multi-modal knowledge-aware event memory
network for social media rumor detection. acm multimedia 2019: 1942-1951</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>139.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>longteng guo, jing
liu, jinhui tang, jiangwei li, wei luo, hanqing lu,
aligning linguistic words and visual semantic units for image
captioning. acm multimedia, pp. 765-773, october 2019, nice, france</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>140.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yousong zhu, chaoyang
zhao, chenxia han, jinqiao wang, hanqing lu, mask guided
knowledge distillation for single shot detector. icme, pp. 1732-1737,
july 2019, shanghai, china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>141.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lu zhou, yingying
chen, jinqiao wang, ming tang, hanqing lu, bi-directional
message passing based scanet for human pose estimation. icme, pp.
1048-1053, july 2019, shanghai, china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>142.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>junbo wang, wei wang, zhiyong
wang, liang wang, dagan feng and tieniu tan, stacked memory network for
video summarization, proc. acm multimedia conference, october 2019, nice,
france.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>143.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hongwen zhang, jie cao, guo
lu, wanli ouyang and zhenan sun, danet: decompose-and-aggregate network for
3d human shape and pose estimation proc. acm multimedia conference, october
2019, nice, france.</span></p>
<p class=af0><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189728"></a><a
name="_toc24098159"></a><a name="_toc532562246"></a><a name="_toc532391215"><span
style='mso-bookmark:_toc532562246'><span style='mso-bookmark:_toc24098159'><span
style='mso-bookmark:_toc40189728'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�va�</span><span
lang=en-us>/</span></b></span></span></span></a><span style='mso-bookmark:
_toc532391215'><span style='mso-bookmark:_toc532562246'><span
style='mso-bookmark:_toc24098159'><span style='mso-bookmark:_toc40189728'><b><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>ɖ��ytnr�g</span></b></span></span></span></span><a
name="_toc24465908"></a><a name="_toc24361016"></a><a name="_toc532562416"><span
style='mso-bookmark:_toc24361016'><span style='mso-bookmark:_toc24465908'><span
style='mso-bookmark:_toc40189728'><b><span lang=en-us>image/video processing
and analysis</span></b></span></span></span></a><b><span lang=en-us><o:p></o:p></span></b></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>144.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>tingting xie, xiaoshan yang,
tianzhu zhang, changsheng xu, ioannis patras: exploring feature
representation and training strategies in temporal action localization. ieee international
conference on image processing (icip), pp. 1605-1609, taipei, taiwan,
september 22-25, 2019.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>145.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>changde du; changying du;
huiguang he, doubly semi-supervised multimodal adversarial learning for
classification, generation and retrieval , ieee international conference on
multimedia and expo (icme), pages. 13-18, 2019</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>146.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>haibao wang, lijie huang,
changde du, huiguang he, learning "what" and "where": an
interpretable neural encoding model, international joint conference on
neural networks, ijcnn 2019 budapest, hungary, july 14-19, 2019, pp. 1-8,</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>147.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yile feng, xiaoqi chai, qinle
ba, ge yang, quality assessment of synthetic fluorescence microscopy images
for image segmentation, 2019 ieee international conference on image
processing (icip), sep. 22-25, taipei, taiwan, china</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>148.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lei shi, yifan
zhang, jing hu, jian cheng, hanqing lu, gesture recognition
using spatiotemporal deformable convolutional representation. icip, pp.
1900-1904, september 2019, taibei, taiwan, china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>149.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>haiyun guo, huiyao
wu, chaoyang zhao, huichen zhang, jinqiao wang, hanqing
lu, cascade attention network for person re-identification. icip, pp.
2264-2268, september 2019, taibei, taiwan, china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>150.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>fei liu, jing
liu, zhiwei fang, hanqing lu, language and visual relations
encoding for visual question answering. icip, pp. 3307-3311, september
2019, taibei, taiwan, china</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>151.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jie gu, gaofeng meng, cheng
da, shiming xiang, chunhong pan: no-reference image quality assessment with
reinforcement recursive list-wise ranking. in proceedings of the 33th aaai
conference on artificial intelligence (aaai) 2019: 8336-8343, hawaii, usa,
january 27 february 1, 2019</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>152.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhengkai jiang, peng gao,
chaoxu guo, qian zhang, shiming xiang, chunhong pan: video object detection
with locally-weighted deformable neighbors. in proceedings of the 33th aaai
conference on artificial intelligence (aaai) 2019: 8529-8536, hawaii, usa,
january 27 february 1, 2019</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>153.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>chaoxu guo, bin fan, jie gu,
qian zhang, shiming xiang, veronique prinet, chunhong pan. progressive sparse
local attention for video object detection, ieee international conference on
computer vision, pp. 3909-3918, october 27-november 2, 2019, seoul, korea.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>154.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>da li, dangwei li, zhang
zhang, liang wang and tieniu tan, unsupervised cross-domain person
re-identification: a new framework. proc. ieee international conference on
image processing, september 2019, taiwan.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>155.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yi fan song, zhang zhang and
liang wang, richly activated graph convolutional network for action
recognition with incomplete skeletons, proc. ieee international conference on
image processing, september 2019, taiwan.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>156.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zerui chen, yan huang and
liang wang, augmented visual-semantic embeddings for image and sentence
matching, proc. ieee international conference on image processing, september
2019, taiwan.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>157.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>linjiang huang, yan huang,
wanli ouyang and liang wang, "hierarchical graph convolutional network
for skeleton-based action recognition, international conference on image and
graphics (icig), august 2019, beijing, china.</span></p>
<p class=af0><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189729"></a><a
name="_toc24098160"></a><a name="_toc532562247"></a><a name="_toc532391216"><span
style='mso-bookmark:_toc532562247'><span style='mso-bookmark:_toc24098160'><span
style='mso-bookmark:_toc40189729'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>;sf[�va�r�g</span></b></span></span></span></a><a
name="_toc24465909"></a><a name="_toc24361017"></a><a name="_toc532562417"><span
style='mso-bookmark:_toc24361017'><span style='mso-bookmark:_toc24465909'><span
style='mso-bookmark:_toc40189729'><b><span lang=en-us>medical image analysis</span></b></span></span></span></a><b><span
lang=en-us><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>158.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>bo wang, shuang qiu, huiguang
he. dual encoding u-net for retinal vessel segmentation, medical image
computing and computer assisted intervention - miccai 2019 - 22nd
international conference, shenzhen, china, pp.: 84-92, october 13-17, 2019,
proceedings, part i.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>159.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>shuang qiu, shengpei wang,
weibo yi, chuncheng zhang, huiguang he: the lasting effects of 1hz
repetitive transcranial magnetic stimulation on resting state eeg in healthy
subjects 41st annual international conference of the ieee engineering in
medicine and biology society, embc 2019, berlin, germany, pp. 5918-5922, july
23-27, 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>160.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yixin wang, shuang qiu, chen
zhao, weijie yang, jinpeng li, xuelin ma, huiguang he, eeg-based emotion
recognition with prototype-based data representation, 41st annual
international conference of the ieee engineering in medicine and biology
society, embc 2019, berlin, germany, pp.684-689 july 23-27, 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>161.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yixin wang, shuang qiu,
jinpeng li, xuelin ma, zhiyue liang, hui li, huiguang he, eeg-based emotion
recognition with similarity learning network, 41st annual international
conference of the ieee engineering in medicine and biology society, embc
2019, berlin, germany, pp. 1209-1212, july 23-27, 2019</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l8 level1 lfo8;tab-stops:list 21.0pt'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>162.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yuanxing zhao, yanming zhang,
ming song, cheng-lin liu, multi-view semi-supervised 3d whole brain segmentation
with a self-ensemble network, miccai, shenzhen, china, 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>163.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>chen x, shen l, xie q.w, han
h. skeleton-based image registration of serial electron microscopy sections,
medical imaging 2019: digital pathology. international society for optics and
photonics, 2019, vol.10956, pp.1095605.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>164.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jiang y, xiao c, li l, chen
x, shen l.j and han h. an effective encoder-decoder network for neural cell
bodies and cell nucleus segmentation of em images. 2019 41st annual
international conference of the ieee engineering in medicine and biology
society (embc). ieee, pp. 6302-6305.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>165.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>luo j, hong b, jiang y, li
l.l, xie q.w and han h. automatic classification for the type of multiple
synapse based on deep learning. 2019 41st annual international conference of
the ieee engineering in medicine and biology society (embc). ieee, pp. 40-43.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>166.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>dou x, yao h, feng f, wang p,
zhou b, jin d, yang z, li j, zhao c, wang l, an n, liu b, zhang x, liu y.
characterizing white matter connectivity in alzheimer s disease and mild
cognitive impairment: automated fiber quantification.<span
style='mso-spacerun:yes'>� </span>2019 ieee 16th international symposium on
biomedical imaging (isbi 2019); 2019 april 8-11, 2019; 2019. p. 117-21.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>167.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hu g, cui b, yu s.
skeleton-based action recognition with synchronous local and non-local spatio-temporal
learning and frequency attention; 2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>168.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jin d, xu j, zhao k, hu f,
yang z, liu b, jiang t, liu y. attention-based 3d convolutional network for
alzheimer s disease diagnosis and biomarkers exploration.<span
style='mso-spacerun:yes'>� </span>2019 ieee 16th international symposium on
biomedical imaging (isbi 2019); 2019 april 8-11, 2019; 2019. p. 1047-51.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>169.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yao d, liu m, wang m, lian c,
wei j, sun l, sui j, shen d. triplet graph convolutional network for
multi-scale analysis of functional connectivity using functional mri.<span
style='mso-spacerun:yes'>� </span>graph learning in medical imaging; 2019. p.
70-8.</span></p>
<p class=af0><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:3'><a name="_toc40189730"></a><a
name="_toc24098161"></a><a name="_toc532562248"></a><a name="_toc532391217"><span
style='mso-bookmark:_toc532562248'><span style='mso-bookmark:_toc24098161'><span
style='mso-bookmark:_toc40189730'><b><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�����b/g</span></b></span></span></span></a><a
name="_toc24465910"></a><a name="_toc24361018"></a><a name="_toc532562418"><span
style='mso-bookmark:_toc24361018'><span style='mso-bookmark:_toc24465910'><span
style='mso-bookmark:_toc40189730'><b><span lang=en-us>speech and language
technology</span></b></span></span></span></a><b><span lang=en-us><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>170.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yang zhao, jiajun zhang,
chengqing zong, zhongjun he, and hua wu. addressing the under-translation
problem from the entropy perspective. in proceedings of the thirty-third aaai
conference on artificial intelligence (aaai), honolulu, hawaii, usa, january
27-february 1, 2019, pp.451-458</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>171.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>junjie li, haoran li and
chengqing zong. 2019. towards personalized review summarization via
user-aware sequence network. in proceedings of the 33rd aaai conference on
artificial intelligence (aaai), honolulu, hawaii, usa, january 27th
february 1st, 2019,vol. 33,<span style='mso-spacerun:yes'>� </span>pp.
6690-6697</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>172.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jingyuan sun, shaonan wang,
jiajun zhang and chengqing zong. towards sentence-level brain decoding with
distributed representations. in proceedings of the 33rd aaai conference on
artificial intelligence (aaai), honolulu, hawaii, usa, january 27th
february 1st, 2019,vol. 33,<span style='mso-spacerun:yes'>� </span>pp.
7047-7054</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>173.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>long zhou, jiajun zhang,
chengqing zong, and heng yu. sequence generation: from both sides to the
middle. in proceedings of the 28th international joint conference on
artificial intelligence (ijcai), macao, china, august 10-16, 2019,
pp.5471-5477</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>174.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>weikang wang, jiajun zhang,
qian li, mei-yuh hwang, chengqing zong and zhifei li. incremental learning
from scratch for task-oriented dialogue systems. in proceedings of the 57th
annual meeting of the association for computational linguistics (acl),
florence, italy, july 28-august 2, 2019, pp.3710-3720</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>175.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yining wang, long zhou,
jiajun zhang, feifei zhai, jingfang xu and chengqing zong. a compact and
language-sensitive multilingual translation method. in proceedings of the
57th annual meeting of the association for computational linguistics (acl),
florence, italy, july 28th-august 2nd, 2019, pp.1213-1223</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>176.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>he bai, yu zhou, jiajun zhang
and chengqing zong. memory consolidation for contextual spoken language
understanding with dialogue logistic inference. in proceedings of the 57th
annual meeting of the association for computational linguistics (acl), florence,
italy, july 28 - august 2, 2019, pp.5448-5453</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>177.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>junjie li, xuepeng wang,
dawei yin and chengqing zong. attribute-aware sequence network for review
summarization. in proceedings of 2019 conference on empirical methods in
natural language processing and 9th international joint conference on natural
language processing (emnlp- ijcnlp), november 3 7, hong kong, china, 2019,
pp.2991-3001</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>178.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>weikang wang, jiajun zhang,
qian li, chengqing zong and zhifei li. are you for real? detecting identity
fraud via dialogue interactions. in proceedings of 2019 conference on
empirical methods in natural language. processing and 9th international joint
conference on natural language processing (emnlp- ijcnlp), november 3 7, hong
kong, china, 2019, pp.1762-1771</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>179.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yining wang, jiajun zhang,
long zhou, yuchen liu and chengqing zong. synchronously generating two
languages with interactive decoding. in proceedings of 2019 conference on
empirical methods in natural language processing and 9th international joint
conference on natural language processing (emnlp- ijcnlp), november 3 7, hong
kong, china, 2019, pp.3341-3346</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>180.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>junnan zhu, qian wang, yining
wang, yu zhou, jiajun zhang, shaonan wang, and chengqing zong. ncls: neural
cross-lingual summarization. in proceedings of 2019 conference on empirical
methods in natural language processing and 9th international joint conference
on natural language processing (emnlp- ijcnlp), november 3 7, hong kong,
china, 2019, pp.3045-3055</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>181.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yuchen liu, hao xiong, jiajun
zhang, zhongjun he, hua wu, haifeng wang and chengqing zong. end-to-end
speech translation with knowledge distillation. in proceedings of the 20th
annual conference of the international speech communication association
(interspeech), graz, austria, september 15-19, 2019, pp.1128-1132</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>182.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>kexin wang, yu zhou, shaonan
wang, jiajun zhang and chengqing zong. understanding memory modules on
learning simple algorithms. in t. miller, r. weber, d. maggazeni (eds.) ijcai
2019 explainable ai workshop. https://sites.google.com/view/xai2019/home</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>183.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zheng lian, jianhua tao, bin
liu, jian huang, conversational emotion analysis via attention mechanisms,
20th annual conference of the speech communication associatio (interspeech
2019), pp. 1936-1940, sept.15-19,2019, graz, austria</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>184.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zheng lian, jianhua tao, bin
liu, jian huang, unsupervised representation learning with future observation
prediction for speech emotion recognition 20th annual conference of the
speech communication associatio (interspeech 2019)</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>pp. 3840-3844, sept.15-19,2019,
graz, austria</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>185.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhengkun tian, jiangyan yi,
jianhua tao, ye bai, zhengqi wen self-attention transducers for end-to-end
speech recognition 20th annual conference of the speech communication
associatio(interspeech 2019)</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>pp. 4395-4399, sept.15-19,2019, graz, austria</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>186.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>ye bai, jiangyan yi, jianhua
tao, zhengkun tian, zhengqi wen learn spelling from teachers: transferring
knowledge from language models to sequence-to-sequence speech
recognition 20th annual conference of the speech communication
associatio(interspeech 2019)</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>pp. 3795-3799, sept.15-19,2019, graz, austria</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>187.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>ye bai, jiangyan yi, jianhua
tao, zhengqi wen, zhengkun tian, chenghao zhao, cunhang fan a time delay
neural network with shared weight self-attention for small-footprint keyword
spotting 20th annual conference of the speech communication
associatio(interspeech 2019)</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>pp. 2190-2194, sept.15-19,2019, graz, austria</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>188.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>cunhang fan,bin liu, jianhua
tao, jiangyan yi, zhengqi wen</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us> discriminative learning for monaural speech separation using deep
embedding features 20th annual conference of the speech communication
associatio (interspeech 2019), pp.4599-4603, sept.15-19,2019, graz, austria</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>189.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>mingyue niu,jianhua tao,bin
liu,cunhang fan</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�</span><span lang=en-us> automatic
depression level detection<span style='mso-spacerun:yes'>� </span>via lp-norm
pooling 20th annual conference of the speech communication associatio
(interspeech 2019), pp.4559-4563, sept.15-19,2019, graz, austria</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>190.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>ruibo fu , jianhua tao,
zhengqi wen, yibin zheng, phoneme dependent speaker embedding and model
factorization for multi-speaker speech synthesis and adaptation international
conference on acoustics, speech and signal processing </span><span
lang=en-us style='mso-bidi-font-family:"times new roman"'>(</span><span
lang=en-us>icassp)</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>pp:6930-6934,may.12-17,2019,brighton, uk</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>191.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jiangyan yi, jianhua tao, ye
bai</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�</span><span lang=en-us> language-invariant
bottleneck features from adversarial end-to-end acoustic models for low
resource speech recognition international conference on acoustics, speech and
signal processing (icassp), pp:6071-6075,may.12-17,2019,brighton, uk</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>192.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jiangyan yi, jianhua
tao self-attention based model for punctuation prediction using word and
speech embeddings international conference on acoustics, speech and signal
processing (icassp), pp:7270-7274, may.12-17, 2019, brighton, uk</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>193.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>mingyue niu,ya li,jianhua
tao,jianhuang,zheng lian discriminative video recognition with temporal order
for microexpression recognition international conference on acoustics, speech
and signal processing (icassp), pp:2112-2116,may.12-17,2019,brighton, uk</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>194.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>bocheng zhao,minghao
yang,jianhua tao drawing order recovery for handwriting chinese
characters international conference on acoustics, speech and signal
processing (icassp), pp:3227-3231,may.12-17,2019,brighton, uk</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>195.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jian huang, jianhua tao, bin
liu, zhen lian, mingyue niu</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us> efficient modeling of long temporal contexts for continuous
emotion recognition 8th international conference on affective computing &
intelligent interaction (acii 2019), sept.3-6, 2019</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>cambridge, united kingdom</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>196.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>cunhang fan,bin liu, jianhua
tao, jiangyan yi, zhengqi wen, ye bai</span><span style='font-family:�[so;
mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us> noise prior knowledge learning for speech enhancement via gated convolutional
generative adversarial network asia-pacific signal and information processing
association annual summit and conference 2019</span><span style='font-family:
�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>november 18-21, 2019</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>lanzhou, china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>197.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jiangyan yi, jianhua tao</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us> distilling knowledge for distant
speech recognition via parallel data asia-pacific signal and information
processing association annual summit and conference 2019</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>november 18-21, 2019</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>lanzhou, china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>198.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jiangyan yi, jianhua tao,
batch normalization based unsupervised speaker adaptation for acoustic
models asia-pacific signal and information processing association annual
summit and conference 2019</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>november 18-21, 2019</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>lanzhou, china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>199.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>haoxin ma, ye bai, jiangyan
yi, jianhua tao, hypersphere embedding and additive margin for
query-by-example keyword spotting asia-pacific signal and information
processing association annual summit and conference 2019</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>november 18-21, 2019</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>lanzhou, china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>200.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>ye bai, jiangyan yi, jianhua
tao, zhengqi wen, bin liu 'voice activity detection based on time-delay
neural networks asia-pacific signal and information processing association
annual summit and conference 2019</span><span style='font-family:�[so;
mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>november 18-21, 2019</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>lanzhou, china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>201.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qiuxian zhang,jiangyan
yi,jianhua tao,mingliang gu,yong ma, focal loss for end-to-end short
utterances chinese dialect identification , asia-pacific signal and
information processing association annual summit and conference 2019</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>november 18-21, 2019</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>lanzhou, china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>202.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yibin zheng,xi wang,lei
he,shifeng pan,frank k. soong,zhengqi wen,jianhua tao, forward-backward
decoding for regularizing end-to-end tts , 20th annual conference of the
international speech communication association, pp:1283-1287,sept.18-22,2019,
graz, austria</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>203.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>bin liu, shuai nie, yaping
zhang, shan liang, zhanlei yang, wenju liu, focal loss and
double-edge-triggered detector for robust small-footprint keyword spotting,
in the proceedings of icassp 2019 (2019 ieee international conference on
acoustics, speech, and signal processing), may 12-17, 2019, brighton, uk,
6361-6365.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>204.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>guanjun li, shan liang, shuai
nie,wenju liu, adaptive dereverberation using multi-channel linear
prediction with deficient length filter, in the proceedings of icassp 2019
(2019 ieee international conference on acoustics, speech, and signal
processing), may 12-17, 2019, brighton, uk, 556-560.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>205.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>bin liu, shuai nie,yaping
zhang, shan liang, wenju liu,meng yu, lianwu chen, shouye peng, changliang
li, jointly adversarial enhancement training for robust end-to-end speech
recognition, in the proceedings of interspeech2019 </span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>the 20th annual conference of the
international speech communication association</span><span style='font-family:
�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'> �</span><span
lang=en-us>, september 15-19, graz, astria 2019, 491-495.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>206.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>guanjun li, shan liang, shuai
nie,wenju liu, meng yu, lianwu chen, shouye peng, changliang li, jointly
adversarial enhancement training for robust end-to-end speech recognition, in
the proceedings of interspeech2019 </span><span style='font-family:�[so;
mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>the 20th annual conference of the international speech
communication association</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'> �</span><span
lang=en-us>, september 15-19, graz, astria 2019, 2713-2717..</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>207.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>bin liu, shuai nie, wenju
liu, hui zhang, xiangang li, and changliang li, deep segment attentive
embedding for duration robust speaker verification, in the proceedings of
apsipa annual summit and conference 2019 </span><span style='font-family:
�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>2019 asia-pacific signal and information processing association
annual summit and conference</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'> �</span><span
lang=en-us>, 18-21 november 2019, lanzhou, china, 822-826</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>208.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xinyu xiao, lingfeng wang,
shiming xiang, and chunhong pan: what and where the themes dominate in image.
in proceedings of the 33th aaai conference on artificial intelligence (aaai)
2019: 9021-9029, hawaii, usa, january 27 february 1, 2019</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l8 level1 lfo8;
tab-stops:list 21.0pt'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>209.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xinyu xiao, lingfeng wang,
bin fan, shiming xiang, and chunhong pan. guiding the flowing of semantics:
interpretable video captioning via pos tag, proceedings of the 2019
conference on empirical methods in natural language processing and the 9th
international joint conference on natural language processing, pages
2068 2077, hong kong, china, november 3 7, 2019.</span></p>
<p class=af0><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:2'><a name="_toc40189731"></a><a
name="_toc24098162"></a><a name="_toc532562249"></a><a name="_toc532391218"><span
style='mso-bookmark:_toc532562249'><span style='mso-bookmark:_toc24098162'><span
style='mso-bookmark:_toc40189731'><b style='mso-bidi-font-weight:normal'><span
style='font-size:12.0pt;mso-bidi-font-size:11.0pt;line-height:110%;
font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�v�qo��</span></b></span></span></span></a><a
name="_toc24465911"></a><a name="_toc24361019"></a><a name="_toc532562419"></a><span
style='mso-bookmark:_toc532562419'><span style='mso-bookmark:_toc24361019'><span
style='mso-bookmark:_toc24465911'><span style='mso-bookmark:_toc40189731'><b
style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt;
mso-bidi-font-size:11.0pt;line-height:110%'>national conference</span></b></span></span></span></span><span
style='mso-bookmark:_toc24465911'><span style='mso-bookmark:_toc40189731'><b
style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt;
mso-bidi-font-size:11.0pt;line-height:110%'>s</span></b></span></span><b
style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt;
mso-bidi-font-size:11.0pt;line-height:110%'><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>guirong bai, shizhu he, kang
liu and jun zhao. variational attention for commonsense knowledge aware
conversation generation, the 8th ccf international conference on natural
language processing and chinese computing (nlpcc 2019) , pp3-15, 10.12-10.14,
dunhuang, china<span style='mso-tab-count:1'>�� </span></span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>bo zhou, yubo chen, kang liu
and jun zhao. relation and fact type supervised knowledge graph embedding via
weighted scores, the eighteenth china national conference on computational
linguistics (ccl 2019), pp258-267, 10.18-10.20, kunming china<span
style='mso-tab-count:1'>���� </span></span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>delai qiu, liang bao, zhixing
tian, yuanzhe zhang, kang liu, jun zhao and xiangwen liao. delai qiu, liang
bao, zhixing tian, yuanzhe zhang, kang liu, jun zhao and xiangwen liao, the
eighteenth china national conference on computational linguistics (ccl 2019),
pp93-104, 10.18-10.20, kunming china<span style='mso-tab-count:1'>� </span></span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>4.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>guirong bai, shizhu he, kang
liu and jun zhao. utterance alignment in custom service by integer
programming, the eighteenth china national conference on computational
linguistics (ccl 2019), pp703-714, 10.18-10.20, kunming china</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>5.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�n�s� _'yo�v��^ns�c�vt� b�tq�~l�:nr�g�v�so(u7b;u�p�g�^n�^(u ,{as�nj\hq�v�n:g�����f[/go���</span><span
lang=en-us>ncmmsc2019</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'> ��</span><span
lang=en-us>pp</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�</span><span lang=en-us>644-650,2019.08.15-17</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�r�wm���[</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>6.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�pwzs�v��^ns�)nck�h</span>
<span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�f_l�q</span> <span style='font-family:
�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>��sf</span>
<span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�:_%f��</span><span lang=en-us> </span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>y���penc�^ꁨros�e�lxvz</span><span lang=en-us> </span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>,{as�nj\hq�v�n:g�����f[/go���</span><span lang=en-us>ncmmsc2019</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'> �</span><span lang=en-us>pp:256-268,2019.08.14-17</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�r�wm���[</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>7.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>:_%f���sf�f_l�q��pwzs�)nck�h�ѐz��e� _^�v��^ns� ��t���_�oo`�v-n�e�z0r�z��tb�|�~ </span>
<span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>,{as�nj\hq�v�n:g�����f[/go���</span><span
lang=en-us>ncmmsc2019</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'> �</span><span
lang=en-us>pp:521-526,2019.08.14-16</span><span style='font-family:�[so;
mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�r�wm���[</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l7 level1 lfo10'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>8.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yaping zhang, shuai nie, shan
liang, and wenju liu, bidirectional adversarial domain adaptation with
semantic consistency, in the proceedings of prcv2019 (2019 chinese
conference on pattern recognition and computer vision), november 8-11, 2019,
xian, china, 184-198.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l7 level1 lfo10'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>9.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yaping zhang, shuai nie, shan
liang, and wenju liu, bidirectional adversarial domain adaptation with
semantic consistency, in the proceedings of prcv2019 (2019 chinese
conference on pattern recognition and computer vision), november 8-11, 2019,
xian, china, 184-198.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>10.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>m. bian, b. peng, w. wang and
j. dong, an accurate lstm based video heart rate estimation method, proc.
chinese conference on pattern recognition and computer vision (prcv), pp.
409 417, november 2019, xi an china.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l7 level1 lfo10'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>11.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hongyuan yu, yan huang, lihong
pi, liang wang, "recurrent deconvolutional generative adversarial
networks with application to video generation, proc. chinese conference on
pattern recognition and computer vision (prcv) , november 2019, xi an china.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l7 level1 lfo10'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>12.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>x. xuan, b. peng, w. wang and
j. dong, on the generalization of gan image forensics, proc. chinese
conference on biometric recognition, pp. 134 141, october 2019, hunan, china.</span></p>
<p class=af0><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:2'><a name="_toc40189732"></a><a
name="_toc24098163"></a><a name="_toc532562250"></a><a name="_toc532391219"><span
style='mso-bookmark:_toc532562250'><span style='mso-bookmark:_toc24098163'><span
style='mso-bookmark:_toc40189732'><b style='mso-bidi-font-weight:normal'><span
style='font-size:12.0pt;mso-bidi-font-size:11.0pt;line-height:110%;
font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>_>e�����[�^�sh����e</span></b></span></span></span></a><b
style='mso-bidi-font-weight:normal'><span lang=en-us style='font-size:12.0pt;
mso-bidi-font-size:11.0pt;line-height:110%'><o:p></o:p></span></b></p>
<p class=af0><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:2'><a name="_toc40189733"></a><a
name="_toc24098164"></a><a name="_toc532562251"></a><a name="_toc532391220"><span
style='mso-bookmark:_toc532562251'><span style='mso-bookmark:_toc24098164'><span
style='mso-bookmark:_toc40189733'><b style='mso-bidi-font-weight:normal'><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�ve�
rir</span></b></span></span></span></a><b
style='mso-bidi-font-weight:normal'><span lang=en-us><o:p></o:p></span></b></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><a name="ole_link51"><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>wenqi ren, jinshan pan, hua
zhang, xiaochun cao, and ming-hsuan yang, single image dehazing via
multi-scale convolutional neural networks with holistic edges, international
journal of computer vision, 2019</span></a></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>wenqi ren, jingang zhang,
xiangyu xu, lin ma, xiaochun cao, gaofeng meng, and wei liu, deep video
dehazing with semantic segmentation, ieee transactions on image processing,
vol. 28, no. 4, pp. 1895 1908, 2019</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>boyi li, wenqi ren, dengpan
fu, dacheng tao, dan feng, wenjun zeng, and zhangyang wang, benchmarking
single image dehazing and beyond, ieee transactions on image processing, vol.
28, no. 1, pp. 492-505, 2019</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>4.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qingbo wu, wenqi ren, and
xiaochun cao, learning interleaved cascade of shrinkage fields for joint
image dehazing and denoising, ieee transactions on image processing, vol. 29,
pp. 1788-1801, 2019</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>5.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiao-diao chen, junyi ma,
yixin li. approximating trigonometric functions by using exponential
inequalities. journal of inequalities and applications, 2019:53. </span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>6.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>pin wang, yao cao, meifang
yin, yongming li, shanshan lv, lixian huang, dayong zhang, yongquan luo, and
jun wu. full-field burn depth detection based on near-infrared hyperspectral
imaging and ensemble regression. review of scientific instrument, 2019,90,
064103 (2019); doi: 10.1063/1.5034503 </span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>7.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yuchuan liu, xiaoheng tan*,
yongming li*, pin wang. weighted local discriminant preservation projection
ensemble algorithm with embedded micro-noise</span></span><span
style='mso-bookmark:ole_link51'><span lang=en-us style='mso-bidi-font-family:
"times new roman"'>, ieee access, </span><span lang=en-us>2019,7: 143814 -
143828,30 september 2019 (sci/ei</span></span><span style='mso-bookmark:ole_link51'><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>20194307573127) </span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>8.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>pin wang, lirui wang,
yongming li, qi song, shanshan lv, xianling hu. automatic cell nuclei
segmentation and classification of cervical pap smear images. biomedical
signal processing & control, 48 (2019) 93 103 (sci/ei)</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>9.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>j. yang, y. sun, j. liang, b.
ren, s.-h. lai, image captioning by incorporating affective concepts learned
from both visual and textual components, neurocomputing, 328: 56-68, 2019</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>10.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>j. yang, j. liang, k. wang,
p. l. rosin, m.-h. yang, subspace clustering via good neighbors, tpami, 2019</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>11.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>j. liang, j. yang, m.-m.
cheng, p. l. rosin, l. wang, simultaneous subspace clustering and cluster
number estimating based on triplet relationship, tip, 28(8): 3973-3985, 2019</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>12.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>j. yang, x. wu, j. liang, x.
sun, m.-m. cheng, p. l. rosin, l. wang, self-paced balance learning for
clinical skin disease recognition, tnnls, 2019</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>13.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>pengbo bo, yujian zheng,
xiaohong jia, caiming zhang: multi-strip smooth developable surfaces from
sparse design curves. computer-aided design 114: 1-12 (2019)</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>14.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>pengbo bo, michael barton: on
initialization of milling paths for 5-axis flank cnc machining of free-form
surfaces with general milling tools. computer aided geometric design 71:
30-42 (2019)</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>15.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lu bai, luca rossi, </span></span><span
style='mso-bookmark:ole_link51'><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>]=n#k����\o� ��</span><span
lang=en-us> jian cheng, edwin r.hancocka quantum-inspired similarity measure
for the analysis of complete weighted graphs</span></span><span
style='mso-bookmark:ole_link51'><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>ieee transactions on cybernetics, doi: 10.1109/tcyb.2019.2913038 </span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>16.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>]=n#k</span><span
lang=en-us>(</span></span><span style='mso-bookmark:ole_link51'><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>,{n\o�</span><span lang=en-us>), lu bai, zhihong zhang,
edwin r. hancock.identifying the most informative features using a
structurally interacting elastic net, neurocomputing, 2019</span></span><span
style='mso-bookmark:ole_link51'><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>336,13-26.<span style='mso-spacerun:yes'>� </span></span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>17.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lu bai, </span></span><span
style='mso-bookmark:ole_link51'><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>]=n#k</span><span
lang=en-us>(</span></span><span style='mso-bookmark:ole_link51'><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>qqt,{n\o�</span><span lang=en-us>), xiao bai, edwin
r.hancock. deep depth-based representations of graphs through deep learning
networks, neurocomputing,2019</span></span><span style='mso-bookmark:ole_link51'><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>336,3-12.</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>18.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiao-yu zhang, haichao shi,
xiaobin zhu, peng li: active semi-supervised learning based on
self-expressive correlation with generative adversarial networks.
neurocomputing (neucom), 345, 103-113, 2019.</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>19.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiaobin zhu, xinming zhang,
xiao-yu zhang*, ziyu xue, lei wang: a novel framework for semantic
segmentation with generative adversarial network. journal of visual
communication and image representation (jvci), 58, 532-543, 2019.</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>20.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>z. zhao, z. bao, z. zhang, j.
deng, n. cummins, h. wang, j. tao, and b. schuller, automatic assessment of
depression rom speech via a hierarchical attention transfer network and
attention autoencoders, ieee journal of selected topics in signal processing,
special issue on automatic assessment of health disorders based on voice,
speech and language processing, vol. 13, 2019. 11 pages, to appear (if: 6.688
(2018))</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>21.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>3. z. zhao, z. bao, y. zhao,
z. zhang, n. cummins, z. ren, and b. schuller, exploring deep spectrum
representations via attention-based recurrent and convolutional neural
networks for speech emotion recognition, ieee access, vol. 7, pp.
97515 97525, july 2019. (if: 4.098 (2018))</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>22.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>wenjie ying, jitao sang, jian
yu. locality-constrained discrete graph hashing. neurocomputing 2019.</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>23.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yuhao wang, binxiu liang,
meng ding and jiangyun li, dual-branch dense residual network for
hyperspectral imagery classification, international journal of remote
sensing, pp.1-22, 2019.</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>24.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jiangyun li, yikai zhao, jun
fu, jiajia wu, jing liu, attention-guided network for semantic video
segmentation, ieee access, vol.7, pp.140680-140689, 2019.</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>25.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jiangyun li, peng yao, weicun
zhang, boosted transformer for image captioning, applied sciences, vol.9,
no.16, pp.3260, 2019.</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>26.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiaopeng liu, yan liu, meng
zhang, xianzhong chen, jiangyun li, improving stockline detection of radar
sensor array systems in blast furnaces using a novel encoder decoder
architecture, sensors, vol.19, no.16, pp.3470, 2019.</span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>27.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhibin pan, erdun gao, ruoxin
zhu, and lingfei wang, a low bit-rate soc-based reversible data hiding
algorithm by using new encoding strategies, multimedia tools and
applications, 2019. (online) </span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>28.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhibin pan, xinyi gao,
lingfei wang, and erdun gao, effective reversible data hiding using dynamic
neighboring pixels prediction based on prediction-error histogram,
multimedia tools and applications, 2019. (accepted) </span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>29.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhibin pan, xiuquan wu, and
zhengyi li, central pixel selection strategy based on local grey-value
distribution by using gradient information to enhance lbp for texture
classification, expert systems with applications, vol. 120, pp. 319-334,
apr. 2019. (online) </span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>30.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>rui li, zhibin pan, yang
wang. the linear prediction vector quantization for hyperspectral image
compression, multimedia tools and applications, 2019. (online) </span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>31.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhibin pan, rui zhang,
weiping ku, yidi wang, adaptive pattern selection strategy for diamond
search algorithm in fast motion estimation, multimedia tools and
applications, 2019. (online) </span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>32.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>erdun gao, zhibin pan, and xinyi
gao, reversible data hiding based on novel pairwise pvo and annular merging
strategy, information sciences, 2019. (online) </span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>33.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yang wang, zhibin pan, rui
li, a novel low bit rate side match vector quantization algorithm based on
structed state codebook, multimedia tools and applications, 2019. (online) </span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>34.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yang wang, zhibin pan, rui
li, a new cell-level search based non-exhaustive approximate nearest
neighbor (ann) search algorithm in the framework of product quantization,
ieee access, 2019. (online) </span></span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l1 level1 lfo12'><span style='mso-bookmark:
ole_link51'><![if !supportlists]><span lang=en-us style='mso-fareast-font-family:
"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>35.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yiwei pan, zhibin pan, yikun
wang, wei wang, a new fast search algorithm for exact k-nearest neighbors
based on optimal triangle-inequality-based check strategy, knowledge-based
systems, 2019. (online) </span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>36.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>shiming ge, zhao luo, chunhui
zhang, yingying hua, dacheng tao. distilling channels for efficient deep
tracking: ieee transactions on image processing, 2019. (early access)</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>37.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jia li, kui fu, shengwei
zhao, shiming ge. spatiotemporal knowledge distillation for efficient
estimation of aerial video saliency: ieee transactions on image processing,
vol.29, no.1, pp. 1902 1914, 2020.</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>38.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>ziyi chen, chengyang ji, qin
shen, wei liu, f xiao-feng qin, aiping wu, tissue-specific deconvolution of
immune cell composition by integrating bulk and single-cell transcriptomes, bioinformatics,
btz672, https://doi.org/10.1093/bioinformatics/btz672</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>39.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiang-jun shen, si-xing liu,
bing-kun bao, chun-hong pan, zheng-jun zha, jianping fan, a generalized
least-squares approach regularized with graph embedding for dimensionality
reduction, pattern recognition, vol. 98, 2020.</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>40.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>wen-ze shao, jing-jing xu,
long chen, qi ge, li-qian wang, bing-kun bao, hai-bo li, on potentials of
regularized wasserstein generative adversarial networks for realistic
hallucination of tiny faces, vol. 364, pp. 1-15, 2019.</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>41.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qing pan, timon rabczuk, gang
xu, chong chen*.<span style='mso-spacerun:yes'>� </span>isogeometric analysis
for surface pdes with extended loop subdivision, journal of computational
physics,<span style='mso-spacerun:yes'>� </span>398: 108892, 2019.</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>42.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>shangde gao, xin liao,
xuchong liu, real-time detecting one specific tampering operation in multiple
operator chains, journal of real-time image processing, vol. 16, pp. 741-750,
2019.</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>43.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xin liao, yingbo yu, bin li,
zhongpeng li, zheng qin, a new payload partition strategy in color image
steganography, ieee transactions on circuits and systems for video
technology, 2019, doi: 10.1109/tcsvt.2019.2896270.</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>44.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhuoran li, huichuan duan,
kun zhao, yanhui ding. stability of mri radiomics features of hippocampus: an
integrated analysis of test-retest and inter-observer variability. ieee
access, 7: 97106-97116, 2019.</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>45.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>changsheng li, chong liu,
lixin duan, peng gao, kai zheng, reconstruction regularized deep metric
learning for multi-label image classification, ieee transactions on neural
networks and learning systems, doi: 10.1109/tnnls.2019.2924023, 2019</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>46.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>ni kang, wu yiquan, wang
peng. scene classification from synthetic aperture radar images using
generalized compact channel-boosted high-order orderless pooling network.
remote sensing, 2019, 11(9), 1079.</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>47.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>ni kang, wang peng, wu yiquan.
high-order generalized orderless pooling networks for synthetic-aperture
radar scene classification. ieee geoscience and remote sensing letters, 2019,
16(11): 1716-1720.</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>48.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>ni kang, wu yiquan, wang
peng. scene classification from remote sensing images using mid-level deep
feature learning. international journal of remote sensing, 2019, 41(4):
1415-1436.</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>49.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>ni kang, wu yiquan. river
channel extraction from synthetic aperture radar images based on region-based
active contour model. signal, image and video processing, 2019, 13:
1105-1112.</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>50.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>meiling wang, wei shao,
xiaoke hao, li shen, daoqiang zhang. identify consistent cross-modality
imaging genetic patterns via discriminant sparse canonical correlation
analysis. ieee/acm transactions on computational biology and bioinformatics,
in press. doi: 10.1109/tcbb.2019.2944825</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>51.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>aihua zheng, naipeng ye,
chenglong li*, xiaowang, jin tang. multi-modal foreground detection via<span
style='mso-spacerun:yes'>� </span>inter- and intra-modality-consistent
low-rank separation. neurocomputing, vol.371, pp. 27-38, 2020.</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>52.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>aihua zheng, jiacheng dong,
xianmin lin, lidan liu, bo jiang, bin luo. visual cognition inspired
multi-view vehicle re-identification via laplacian-regularized correlative
sparse ranking.<span style='mso-spacerun:yes'>� </span>cognitive computation,
pp. 1-14, 2019 (online).</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>53.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>cairong zhao,kang c, di z,
zhaoxiang z, et al. uncertainty-optimized deep learning model for small-scale
person re-identification[j]. science china information sciences, vol. 62,
issue 12, 220102:1-13, 2019</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>54.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zhenbing zhao, zhen zhen, lei
zhang, yincheng qi, yinghui kong, ke zhang. insulator detection method in
inspection image based on improved faster r-cnn. energies, 2019, 12(7), 1204,
https://doi.org/10.3390/en12071204.</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>55.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lin gao, jie yang, tong wu,
yu-jie yuan, hongbo fu, yu-kun lai, hao (richard) zhang, sdm-net: deep generative
network for structured deformable mesh, acm transactions on graphics, 2019,
37(6), 243:1-243:15</span></span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l1 level1 lfo12'><span
style='mso-bookmark:ole_link51'><![if !supportlists]><span lang=en-us
style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:"times new roman"'><span
style='mso-list:ignore'>56.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yu-jie yuan, yu-kun lai, tong
wu, shihong xia, lin gao, data-driven weight optimization for real-time mesh
deformation, graphical models, vol. 104, 2019</span></span></p>
<p class=af0><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:2'><a name="_toc40189734"></a><a
name="_toc24098165"></a><a name="_toc532562252"></a><a name="_toc532391221"><span
style='mso-bookmark:_toc532562252'><span style='mso-bookmark:_toc24098165'><span
style='mso-bookmark:_toc40189734'><b style='mso-bidi-font-weight:normal'><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�v�q
rir</span></b></span></span></span></a><b
style='mso-bidi-font-weight:normal'><span lang=en-us><o:p></o:p></span></b></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l2 level1 lfo14'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�s</span><span
style='font-family:�[so'>��cg<span lang=en-us>, </span>h�\ֆ<span lang=en-us>, </span>h��z�^<span
lang=en-us>.</span>�p0r<span lang=en-us>nurbs</span>�f�~gяݍ�y�v�_����{�e�l<span
lang=en-us>.</span>���{</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>:g���r����n�vb_f[f[�b�</span><span
lang=en-us>2019,31(1):26-30. (ei</span><span style='font-family:�[so;
mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�n�~</span><span
lang=en-us>)</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l2 level1 lfo14'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'> _\r`�ng�rf</span><span
lang=en-us>*</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>��s�t��f][s^������ _s��s�b'k�h</span><span
lang=en-us>. </span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�w�n��ws�yz�u���yf[`n�tv^l�o ��v^ё�h�ur{|�{�lxvz</span><span
lang=en-us>. </span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>5up[n�oo`f[�b�</span><span lang=en-us>2019,41:1-9</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>ei</span><span style='font-family:
�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>20193707417180</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'> �</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l2 level1 lfo14'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>ng�rf</span><span
lang=en-us>*</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>� _b��s�t�"��^pg��f][s^� _s��s�b'k�h�����</span><span
lang=en-us>. </span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>b�t^ё�h�u��pencc�c�vrs��tɩb�{�l</span><span
lang=en-us>. </span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>uir;sf[�]zf[bg�_�</span><span lang=en-us>2019,36(4):548-556</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l2 level1 lfo14'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>4.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'> _�tf�/om_�l�w%�7hag�fb��bt</span><span
lang=en-us>[j]</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>����{:g���r����n�vb_f[f[�b�</span><span
lang=en-us>31(7):1221-1228</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>2019. (ei)</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l2 level1 lfo14'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>5.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>ѐ_����/om_�l�wq gĉte�f�s�~q�viq�n�fb�����</span><span
lang=en-us>[j], </span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�vf[f[�b�</span><span
lang=en-us>40(1):46-53</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>2019.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l2 level1 lfo14'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>6.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>��ews��q��`��sgޘ� _f[fk�l�ёey</span><span
lang=en-us>. </span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>9eۏ'ylce\!j�w�[�s�v�v�p�s���{�l</span><span
lang=en-us>[j]</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>����{:g���r����n�vb_f[f[�b�</span><span
lang=en-us>31(7):1148-1155</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>2019. (ei)</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l2 level1 lfo14'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>7.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>l�o</span><span
lang=en-us>, </span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>ng�f</span><span lang=en-us>, </span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�����q</span><span lang=en-us>, </span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>n�lf</span><span lang=en-us>. </span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�~t</span><span lang=en-us>cnn</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>nrrr�~_g�v�zso9sm��{�l</span><span lang=en-us>[j], </span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>s�n�]n'yf[f[�b�6q�yf[hr�</span><span lang=en-us>45(5):413-420</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>2019. (</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>8h�_</span><span lang=en-us>)</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l2 level1 lfo14'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>8.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>ng_lf�u�in�q���ss\�!���ng�d��m�^^y�~q�~!j�w�s)�~��</span><span
lang=en-us>[j]</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>��]z�yf[f[�b�</span><span lang=en-us>41(10):1229-1239,
2019. (ei)</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l2 level1 lfo14'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>9.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�l�q��^k���y�b�r�~]��w�nws�y^y�~q�~�vnol]eq�szz�w���qr�g�e�l</span><span
lang=en-us>[j]</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�o��nf[�b��]u_(u�_�qhr�</span><span
lang=en-us>2019.</span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>ei</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'> �</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l2 level1 lfo14'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>10.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>u�/cuq�p�?���b�<span
lang=en-us>. </span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�w�n�m�^f[`n�v��5u�~�ɖɉ�hkmxvz�~��</span><span
lang=en-us>[j]. </span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>^n5u�r�</span><span
lang=en-us>2019, 32(9): 11-23.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l2 level1 lfo14'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>11.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>b���w�ΐy��[�u�/cuq�_l1r��b�<span
lang=en-us>. </span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�~t�la�r:g6r�v�v�[</span><span lang=en-us>gan</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>��h�v�pub</span><span lang=en-us>[j]. </span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>5ukmn�nh��</span><span lang=en-us>2019, 56(19): 64-69.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l2 level1 lfo14'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>12.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>b���w�_l1r��u�/cuq�ΐy��[�b�<span
lang=en-us>. </span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�w�n9eۏ</span><span lang=en-us>ssd</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>!j�w�v��5u�~��]�h�v�pёwq�hkm�e�l</span><span lang=en-us>[j]. </span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>5ukmn�nh��q�~���s�eg�</span><span lang=en-us>2019-09-29.</span></p>
<p class=af0 style='margin-left:21.0pt;text-indent:-21.0pt;mso-list:l2 level1 lfo14'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>13.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>4to���g�v:_�퐉sc��u�/cuq</span><span
lang=en-us>. </span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�f[`nz�u�[ɩޏ�cws�y^y�~q�~�v�pr{|�e�l</span><span
lang=en-us>[j]. </span><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>�o�syt</span><span
lang=en-us>, 2019, 35(10): 1747-1752.</span></p>
<p class=af0><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:2'><a name="_toc40189735"></a><a
name="_toc24098166"></a><a name="_toc532562253"></a><a name="_toc532391222"><span
style='mso-bookmark:_toc532562253'><span style='mso-bookmark:_toc24098166'><span
style='mso-bookmark:_toc40189735'><b style='mso-bidi-font-weight:normal'><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�ve�o��</span></b></span></span></span></a><b
style='mso-bidi-font-weight:normal'><span lang=en-us><o:p></o:p></span></b></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yuanzhi liang, yalong bai,
wei zhang, xueming qian, li zhu, tao mei, vrr-vg: refocusing
visually-relevant relationships, ieee iccv, 2019.</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xinyu li, wei zhang, tong
shen, tao mei. everyone is a cartoonist: selfie cartoonization with attentive
adversarial networks. ieee icme, 2019.</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>wenqi ren, jiaolong yang, senyou
deng, david wipf, xiaochun cao, and xin tong, face video deblurring using 3d
facial priors, ieee international conference on computer vision (iccv)(2019),
pp: 9388 9397, 2019, seoul, korea</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>4.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>liu, yuchuan; tan, xiaoheng;
wang, pin; li, yongming*; zhang, yanling. recognition algorithm of
parkinson's disease based on weighted local discriminant preservation
projection embedded ensemble algorithm. conference: bibe 2019 - the third
international conference on biological information and biomedical engineering,
06/20/2019 - 06/22/2019 at hangzhou, china</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>5.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>li, yongming. feature and
instance learning of speech data of parkinson's disease,<span
style='mso-spacerun:yes'>� </span>2019 international conference on soft
computing & machine learning (scml2019)</span><span style='font-family:
�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>april 26th-29th , 2019, wuhuan, china</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>6.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>x. yao, d. she, s. zhao, j.
liang, y.-k. lai, j. yang, attention-aware polarity sensitive embedding for
affective image retrieval, iccv, 2019</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>7.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>c. zhan, d. she, s. zhao,
m.-m. cheng, j. yang, zero-shot emotion recognition via affective structural
embedding, iccv, 2019</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>8.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>x. wu, n. wen, j. liang,
y.-k. lai, d. she, m.-m. cheng, j. yang, joint acne image grading and
counting via label distribution learning, iccv, 2019</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>9.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>x. wu, c. zhan, y.-k. lai,
m.-m. cheng, j. yang, ip102: a large-scale benchmark dataset for insect pest
recognition, cvpr, 2019</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>10.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>x. sun, l. chen, j. yang,
learning from web data using adversarial discriminative neural networks for
fine-grained classification, aaai, 2019</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>11.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>l. chen, j. yang, recognizing
the style of visual arts via adaptive cross-layer correlation, acm mm, 2019</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>12.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lu bai, yuhang jiao, </span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>]=n#k</span><span lang=en-us>(</span><span style='font-family:
�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>���\o�</span><span
lang=en-us>), edwin r.hancock. learning deep representations for graph
classification. ecml-pkdd 2019 (</span><span style='font-family:�[so;
mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�_</span><span
lang=en-us>ei</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�h"}�</span><span lang=en-us>ccf b)</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>13.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yibo chai, yahu cong, lu bai,
</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>]=n#k</span><span lang=en-us>. loan
recommendation in p2p lending investment networks: a hybrid graph convolution
approach. ieee international conference on industrial engineering and
engineering management, ieem 2019. </span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>14.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yue wang, yao wan, chenwei
zhang, lu bai, </span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>]=n#k</span><span lang=en-us>, philip
s. yu. competitive multi-agent deep reinforcement learning with
counterfactual thinking. icdm 2019.</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>15.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yuhang jiao,yueting yang, </span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>]=n#k</span><span lang=en-us>, lu bai. an attributed graph
embedding method using the tree-index algorithm. in proceedings of 12th
iapr-tc-15 international workshop on graph-based representations in pattern
recognition, gbrpr 2019, lecture notes in computer science 11510, springer
2019, isbn 978-3-030-20080-0,pp.172-182.</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>16.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>xiao-yu zhang, haichao shi,
changsheng li, kai zheng, xiaobin zhu, lixin duan: learning transferable
self-attentive representations for action recognition in untrimmed videos with
weak supervision. in proc. aaai conference on artificial intelligence (aaai),
1-8, 2019.</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>17.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>haichao shi, xiao-yu zhang*,
shupeng wang, ge fu and jianqi tang: synchronized detection and recovery of
steganographic messages with adversarial learning. in proc. international
conference on computational science (iccs), 31-43, 2019.</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>*</span><span style='font-family:
�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>qqtn\o �</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>18.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yupeng cao, jing li, qiufeng
wang, kaizhu huang, and rui zhang. improving script identification by
integrating text recognition information, the 26th international conference on
neural information processing (iconip2019), sydney, australia. </span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>19.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>z. zhao, z. bao, z. zhang, n.
cummins, h. wang, and b. w. schuller, attention-enhanced connectionist
temporal classification for discrete speech emotion recognition, in
proceedings interspeech 2019, 20th annual conference of the international
speech communication association, pp. 206-210, september 2019, graz, austria</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>20.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>wenguang ma, wei ma,
rethinking faster r-cnn for window detection in street scenes: the 14th asia
pacific international conference on information science and technology
(apic-ist 2019), issn: 2093-0542, pp: 242-244, 2019, beijing, china.</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>21.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>shen chen, wei ma, yue qin,
cnn-based stereoscopic image inpainting, the 10th international conference on
image and graphics (icig 2019), lncs: 11903, pp: 95-106, 2019, beijing,
china.</span></p>
<p class=msolistparagraph style='margin-left:18.0pt;text-indent:-18.0pt;
mso-char-indent-count:0;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>22.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jiangtao kong, rongchao xu,
junliang xing, kai li, wei ma, spatial temporal attentional glimps for human
activity classification in video, ieee international conference on image
processing (icip 2019), isbn: 978-1-5386-6249-6, pp: 4040-4044, 2019, taipei,
taiwan, china.</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>23.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>runzhong wang, junchi yan </span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>���\o�</span><span lang=en-us>, and xiaokang yang. learning
combinatorial embedding networks for deep graph matching: 2019 ieee
international conference on computer vision (iccv) (2019), seoul, korea.</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>24.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>tianzhe wang, zetian jiang,
and junchi yan </span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>���\o�</span><span lang=en-us>.
clustering-aware multiple graph matching via decayed pairwise matching
composition: 2020 thirty-fourth aaai conference on artificial intelligence
(aaai) (2020), new york, ny, usa.</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>25.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>shiming ge, shengwei zhao,
xindi gao, jia li. fewer-shots and lower-resolutions: towards ultrafast face
recognition in the wild: the 27th acm international conference on multimedia,
isbn-13: 9781450368896, pp:229-237, 2019, nice, france.</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>26.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yingying hua, shiming ge,
xindi gao, xin jin, dan zeng. defending against adversarial examples via soft
decision trees: the 27th acm international conference on multimedia, isbn-13:
9781450368896, pp:2106-2114, 2019, nice, france.</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>27.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>chunhui zhang, shiming ge,
yingying hua, dan zeng. robust deep tracking with two-step augmentation
discriminative correlation filters: ieee international conference on
multimedia and expo (icme): isbn-13: 9781538695524, pp: 1774-1779, 2019,
shanghai, china.</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>28.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>shengwei zhao, xindi gao,
shikun li, shiming ge. low-resolution face recognition in the wild with
mixed-domain distillation: the fifth ieee international conference on
multimedia big data, 2019, singapore.</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>29.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>shanxiong chen, xu han,
xiaolong wang, hui ma<span style='mso-spacerun:yes'>� </span>a recognition
method of ancient yi script based on deep learning<span
style='mso-spacerun:yes'>� </span>icaps 2019 </span><span style='font-family:
�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:"times new roman"'>�</span><span
lang=en-us>international conference on automated planning and scheduling</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>30.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>kexin chen, xue zhou*, qidong
zhou and hongbing xu. adversarial learning-based data augmentation for
rotation-robust human tracking: 2019 ieee conference on acoustics, speech and
signal processing (icassp) (2019), isbn: 978-1-5386-4658-8, pp: 1942-1946,
2019, brighton, uk</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>31.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>jie yang, xue zhou*, zheng
zhou and hao wen. adaptive fusion of rgbd data for two-stream fcn-based level
set tracking: 2019 ieee conference on visual communications and image
processing(vcip)(2019), accepted, sydney, australia</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>32.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>binbin song, xue zhou*, wei
xiang, hao wen. improving person search by adaptive feature pyramid-based
multi-scale matching: 2019 ieee conference on visual communications and image
processing(vcip)(2019), accepted, sydney, australia</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>33.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>mengying zhang, changsheng
li, xiangfeng wang, multi-view metric learning for multi-label image
classification, proceedings of the 26nd ieee international conference on
image processing (icip), 2134-2138, 2019.</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>34.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>ni kang, wu yiquan, zhou fei,
hao xiaohui. multi-order feature fusion joint training network for remote
sensing scene classification[c]. 2019 international conference on electronic
engineering and informatics.</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>35.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qiu d, zhang y, feng x, et
al. machine reading comprehension using structural knowledge graph-aware
network[c]//proceedings of the 2019 conference on empirical methods in
natural language processing and the 9th international joint conference on
natural language processing (emnlp-ijcnlp). 2019: 5898-5903. </span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>36.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>rui wang, huaibo huang,
xufeng zhang, jixin ma and aihua zheng*. a novel distance learning for
elastic cross-modal audio-visual matching: 2019 ieee international conference
on multimedia & expo workshops (icmew) (2019), pp: 300-305, 2019,
shanghai, china. best student paper award.</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>37.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>yuanpeng tan, chunyu deng,
aixue jiang, and zhenbing zhao. insulator segmentation based on community
detection and hybrid feature[c]. the 10th international conference on image
and graphics (icig 2019), aug. 23-25 2019, beijing, china, lncs 11901, pp.
267-283.</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>38.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>zihui yin, rong meng, junhu
dong, jingyi lang, zhenbing zhao. a co-random walks segmentation method for
aerial insulator video images[c]. 12th international congress on image and
signal processing, biomedical engineering and informatics (cisp-bmei 2019),
huaqiao, china, 2019.10.</span></p>
<p class=af0 style='margin-left:18.0pt;text-indent:-18.0pt;mso-list:l5 level1 lfo16'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>39.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>hsien-yu meng, lin gao,
yu-kun lai, dinesh manocha, vv-net: voxel vae net with group convolutions for
point cloud segmentation, international conference in computer vision, 2019</span></p>
<p class=af0><span lang=en-us><o:p> </o:p></span></p>
<p class=af0 style='mso-outline-level:2'><a name="_toc40189736"></a><a
name="_toc24098167"></a><a name="_toc532562254"><span style='mso-bookmark:
_toc24098167'><span style='mso-bookmark:_toc40189736'><b style='mso-bidi-font-weight:
normal'><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�v�qo��</span></b></span></span></a><b
style='mso-bidi-font-weight:normal'><span lang=en-us><o:p></o:p></span></b></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l6 level1 lfo18'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>1.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span style='font-family:�[so;mso-ascii-font-family:
"times new roman";mso-hansi-font-family:"times new roman"'>ng�rf</span><span
lang=en-us>*</span><span style='font-family:�[so;mso-ascii-font-family:"times new roman";
mso-hansi-font-family:"times new roman"'>�r�s�]��s�t</span><span lang=en-us>. </span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�w�n�rcg@\�$r r�oc�bq_l]eqɩb�{�l�v^ё�h�uƌ r�</span><span lang=en-us>2019</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>-n�vuir;sf[�]z'yo�</span><span lang=en-us>2019</span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�</span><span lang=en-us>poster </span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'>�x�b�</span><span lang=en-us>15b-188 </span><span
style='font-family:�[so;mso-ascii-font-family:"times new roman";mso-hansi-font-family:
"times new roman"'> ��</span><span lang=en-us>2019.11.14-16</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l6 level1 lfo18'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>2.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>haichao shi, xiao-yu zhang*,
changsheng li: weakly-supervised action recognition and localization via
knowledge transfer. in proc. chinese conference on pattern recognition and computer
vision (prcv), 205-216, 2019. </span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l6 level1 lfo18'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>3.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>qiu d, bao l, tian z, et al.
reconstructed option rereading network for opinion questions reading
comprehension[c]//china national conference on chinese computational
linguistics. springer, cham, 2019: 93-104.</span></p>
<p class=msolistparagraph style='margin-left:21.0pt;text-indent:-21.0pt;
mso-char-indent-count:0;mso-list:l6 level1 lfo18'><![if !supportlists]><span
lang=en-us style='mso-fareast-font-family:"times new roman";mso-bidi-font-family:
"times new roman"'><span style='mso-list:ignore'>4.<span style='font:7.0pt "times new roman"'>
</span></span></span><![endif]><span lang=en-us>lin x, liao x, xu t, et al.
rumor detection with hierarchical recurrent convolutional neural
network[c]//ccf international conference on natural language processing and
chinese computing. springer, cham, 2019: 338-348.</span></p>
</td>
</tr>
</table>
</div>
<p class=msonormal align=left style='text-align:left;mso-pagination:widow-orphan'><span
lang=en-us style='font-size:12.0pt;font-family:�[so;mso-bidi-font-family:�[so;
mso-font-kerning:0pt'><o:p> </o:p></span></p>
</div>
</body>
</html>