{"id":1213,"date":"2018-03-29T14:29:02","date_gmt":"2018-03-29T14:29:02","guid":{"rendered":"https:\/\/cv.snu.ac.kr\/?page_id=1213"},"modified":"2022-01-27T19:24:52","modified_gmt":"2022-01-27T10:24:52","slug":"kmlee","status":"publish","type":"page","link":"https:\/\/cv.snu.ac.kr\/index.php\/kmlee\/","title":{"rendered":"kmlee"},"content":{"rendered":"<p>[et_pb_section bb_built=&#8221;1&#8243; background_color=&#8221;rgba(109,58,0,0.08)&#8221; _builder_version=&#8221;3.0.89&#8243; max_width_last_edited=&#8221;on|desktop&#8221; custom_margin_phone=&#8221;|||&#8221; custom_margin_last_edited=&#8221;on|phone&#8221; custom_padding=&#8221;10px|0px|52.358px|0px&#8221;][et_pb_row _builder_version=&#8221;3.0.89&#8243;][et_pb_column type=&#8221;4_4&#8243;][et_pb_text _builder_version=&#8221;3.0.89&#8243; text_font=&#8221;PT Serif||||||||&#8221; text_font_size=&#8221;35px&#8221;]<\/p>\n<p>\nFaculty<\/p>\n<p>[\/et_pb_text][\/et_pb_column][\/et_pb_row][et_pb_row use_custom_gutter=&#8221;on&#8221; gutter_width=&#8221;4&#8243; custom_padding=&#8221;20px|180px||20px&#8221; custom_padding_phone=&#8221;|0px||0px&#8221; custom_margin=&#8221;||10px|&#8221; background_color=&#8221;rgba(255,255,255,0)&#8221; padding_1_phone=&#8221;|100px||100px&#8221; padding_2_phone=&#8221;|||&#8221; padding_1_last_edited=&#8221;on|desktop&#8221; padding_2_last_edited=&#8221;on|desktop&#8221; custom_padding_last_edited=&#8221;on|phone&#8221; _builder_version=&#8221;3.0.89&#8243; background_size=&#8221;initial&#8221; background_position=&#8221;top_left&#8221; background_repeat=&#8221;repeat&#8221; module_alignment=&#8221;center&#8221;][et_pb_column type=&#8221;1_4&#8243;][et_pb_image src=&#8221;https:\/\/cv.snu.ac.kr\/wp-content\/uploads\/2017\/11\/portrait_round_professor.png&#8221; align=&#8221;center&#8221; force_fullwidth=&#8221;on&#8221; _builder_version=&#8221;3.0.89&#8243; border_radii=&#8221;on|50px|50px|50px|50px&#8221; custom_padding=&#8221;0px|||&#8221; \/][\/et_pb_column][et_pb_column type=&#8221;3_4&#8243;][et_pb_text _builder_version=&#8221;3.0.89&#8243; text_font=&#8221;||||||||&#8221; custom_padding=&#8221;0px|||50px&#8221;]<\/p>\n<h1 style=\"color: #000000; font-family: Arial; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-style: initial; text-decoration-color: initial; font-size: 22px;\">Kyoung Mu Lee<\/h1>\n<p><span style=\"color: #000000; font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-style: initial; text-decoration-color: initial; display: inline !important; float: none;\">Professor, Ph. D.<\/span><br style=\"color: #000000; font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-style: initial; text-decoration-color: initial;\" \/><span style=\"color: #000000; font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-style: initial; text-decoration-color: initial; display: inline !important; float: none;\">Department of Electrical and Computer Engineering<\/span><br style=\"color: #000000; font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-style: initial; text-decoration-color: initial;\" \/><span style=\"color: #000000; font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-style: initial; text-decoration-color: initial; display: inline !important; float: none;\">College of Engineering, Seoul National University<\/span><br style=\"color: #000000; font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-style: initial; text-decoration-color: initial;\" \/><span style=\"color: #000000; font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-style: initial; text-decoration-color: initial; display: inline !important; float: none;\">1 Gwanak-ro, Gwanak-gu, Seoul 151-744, Korea<\/span> <br style=\"color: #000000; font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-style: initial; text-decoration-color: initial;\" \/><span style=\"color: #000000; font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-style: initial; text-decoration-color: initial; display: inline !important; float: none;\">Phone: +82 02-880-8885 or 02-880-1743,\u00a0<\/span><span style=\"color: #000000; font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-style: initial; text-decoration-color: initial; display: inline !important; float: none;\">Fax: +822-875-7144<\/span><br style=\"color: #000000; font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-style: initial; text-decoration-color: initial;\" \/><span style=\"color: #000000; font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-style: initial; text-decoration-color: initial; display: inline !important; float: none;\">E-mail:\u00a0<\/span><a style=\"font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px;\">kyoungmu(at)snu.ac.kr<\/a><br style=\"color: #000000; font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-style: initial; text-decoration-color: initial;\" \/><br style=\"color: #000000; font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-style: initial; text-decoration-color: initial;\" \/><a style=\"font-family: Arial; font-size: 16px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: start; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px;\" href=\"http:\/\/scholar.google.co.kr\/citations?user=Hofj9kAAAAAJ&amp;hl=ko&amp;oi=ao\">Google Scholar Profile<\/a><\/p>\n<p>[\/et_pb_text][\/et_pb_column][\/et_pb_row][et_pb_row custom_padding=&#8221;|20px||20px&#8221; custom_padding_phone=&#8221;|||&#8221; background_color=&#8221;#ffffff&#8221; padding_1_phone=&#8221;|||&#8221; padding_1_last_edited=&#8221;on|desktop&#8221; custom_padding_last_edited=&#8221;on|phone&#8221; _builder_version=&#8221;3.0.89&#8243; custom_margin_phone=&#8221;|||&#8221; custom_margin_last_edited=&#8221;on|desktop&#8221; box_shadow_style=&#8221;preset1&#8243;][et_pb_column type=&#8221;4_4&#8243;][et_pb_text _builder_version=&#8221;3.0.89&#8243; background_layout=&#8221;light&#8221;]<\/p>\n<h3><em><strong>Biography<\/strong><\/em><\/h3>\n<p><strong>Kyoung Mu Lee<\/strong> received the BS and MS degrees in control and instrumentation engineering from Seoul National University (SNU), Seoul, Korea, in 1984 and 1986, respectively, and the Ph.D. degree in electrical engineering from the University of Southern California in 1993. He is currently with the Department of ECE, Seoul National University as a professor. He served as vice dean (2009-2011) of Engineering School, director (2012-2014) of the Automation and Systems Research Institute (ASRI). Since 2020 he has served as the director of the Interdisciplinary Program in Artificial Intelligence in the Graduate School of SNU. He has held affiliation with the University of California, San Diego (visiting scientist, 2011-2012),<\/p>\n<p>His research interests are in the broad areas of Computer Vision and Machine Learning, including low-level vision, visual tracking and navigation, 3D reconstruction, human pose and shape estimation, and video analysis. He is co-author of more than 70 journal papers and 180 conference papers. Prof. Lee introduced the novel idea of the global skip (residual)-connection in designing deep CNN models for image restoration, which was first used in the VDSR (Very Deep Super-Resolution) algorithm (CVPR2016). The \u2018skip connection\u2019 concept made a huge impact and became a de facto standard technique for CNN-based methods for low-level vision problems. He further developed a much deeper and high-performing network, EDSR (Enhanced Deep Super-Resolution), and won the first NTIRE2017 Single Image Super-Resolution (SR) Challenge in all categories. EDSR became a standard benchmark algorithm in SR. Recently, Prof Lee has been working on 3D hand and 3D multi-person pose estimation problems. He won the 2017 3D Hand Pose Estimation Challenge, and the First 3D Poses in the Wild Challenge 2020.<\/p>\n<p>He received several awards, in particular, the prestigious 36th Samil Award, the Medal of Merit and the Scientist of Engineers of the month award from the Korean Government in 2020 and 2018, respectively, the Excellence in Research Award from SNU in 2020, MSRA Grant Award in 2016, the Most Influential Paper over the Decade Award by the IAPR Machine Vision Application in 2009, the ACCV Honorable Mention Award in 2007, the Okawa Foundation Research Grant Award in 2006, the Distinguished Professor Award from the College of Engineering of SNU in 2009, and both the Outstanding Research Award and the Shinyang Engineering Academy Award from the College of Engineering of SNU in 2010. He was a distinguished lecturer of the Asia-Pacific Signal and Information Processing Association (APSIPA) for 2012-2013.<\/p>\n<p>He is currently serving as the Editor in Chief (EIC) of the IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) which is the top journal in Artificial Intelligence and in all Computer Science. He has served as an Associate Editor in Chief (AEIC) (2014-2019) and an Associate Editor (AE) (2014-2014) of the IEEE TPAMI, an AE of the Machine Vision Application (MVA) Journal (2010-2015), and the IPSJ Transactions on Computer Vision and Applications (CVA) (2008-2012), the EURASIP Journal on Applied Signal Processing (2000-2004), the IEEE Signal Processing Letters (SPL) (2012-2013), and an Area Editor of the Computer Vision and Image Understanding (CVIU) (2013-2018). He also has served as General co-chair of ICCV2019, ACM MM2018, and ACCV2018, a Program co-chair of ACCV2012, a Track chair of ICPR2020 and ICPR2012, and an Area Chair of CVPR, ICCV, and ECCV many times. He is an Advisory Board Member of the Computer Vision Foundation (CVF). He is the founding member and served as a President of the Korean Computer Vision Society (KCVS). Prof. Lee is a Fellow of IEEE, a member of the Korean Academy of Science and Technology (KAST) and the National Academy of Engineering of Korea (NAEK).<\/p>\n<p>[\/et_pb_text][et_pb_text _builder_version=&#8221;3.0.89&#8243; background_layout=&#8221;light&#8221; text_text_color=&#8221;#000000&#8243;]<\/p>\n<p>&nbsp;<\/p>\n<h3><strong><i>Professional Activities<\/i><\/strong><\/h3>\n<p><strong>Government Service\/Society Member<\/strong><br \/>\u2022 IEEE Fellow (2021 \u2013 present)<br \/>\u2022 Member of Korean Academy of Science and Technology (KAST) (2020 \u2013 Present)<br \/>\u2022 Member of the National Academy of Engineering of Korea (NAEK) (2020 \u2013 Present)<br \/>\u2022 Member of the AI Education Council, Korean Ministry of Education (2020 \u2013 2021)<br \/>\u2022 Member of the AI&amp;SW Council, Korean Ministry of Science and ICT (2020 \u2013 2021)<br \/>\u2022 Advisory Board Member of CVF (Computer Vision Foundation) (2016. 1. \u2013 2020)<br \/>\u2022 President, Korean Computer Vision Society (KCVS) (2018 \u2013 2021)<br \/>\u2022 Administrative Committee member of AFCV (Asian Federation of Computer Vision) (2010 \u2013 present)<\/p>\n<p><strong>Editorial Board Member<\/strong><br \/>\u2022 Editor in Chief (EIC), IEEE Trans. on PAMI (Pattern Analysis and Machine Intelligence), (2021.01 \u2013 Present)<br \/>\u2022 Editorial advisory board member for Academic Press\/Elsevier in computer vision and pattern recognition, (2015. 8 \u2013 Present)<br \/>\u2022 Associate Editor in Chief, IEEE Trans. on PAMI (Pattern Analysis and Machine Intelligence), (2014.09 \u2013 2019.10)<br \/>\u2022 Area Editor, Computer Vision and Image Understanding (CVIU), (2013.11 &#8211; Present)<br \/>\u2022 Guest Editor, Special Issue on Visual Tracking, Computer Vision and Image Understanding, 2015<br \/>\u2022 Guest Editor, Special Issue on Augmented Video, IEEE Trans. on Circuits and Systems for Video Technology, 2015<br \/>\u2022 Associate Editor, IEEE Trans. on PAMI (Pattern Analysis and Machine Intelligence), (2014.01 \u2013 2014. 08)<br \/>\u2022 Associate Editor, IEEE Signal Processing Letters, (2012. 2 \u2013 2013. 12)<br \/>\u2022 Associate Editor, MVA (Machine Vision and Applications) journal, (2010. 1\u2013 2015. 6)<br \/>\u2022 Associate Editor, Journal of Information Hiding and Multimedia Signal Processing (JIH-MSP), (2009.3. &#8211; Present)<br \/>\u2022 Associate Editor, IPSJ Transactions on Computer Vision and Applications (CVA), (2008.1. \u2013 2012.1)<br \/>\u2022 Editor, EURASIP Journal on Applied Signal Processing (2000.1 \u2013 2004.9)<\/p>\n<p><strong>Conference Chair<\/strong><br \/>\u2022 Area Chair, IEEE-CVF CVPR (Computer Vision and Pattern Recognition) 2022, New Orleans, LA<br \/>\u2022 Area Chair, IEEE-CVF ICCV (International Conference on Computer Vision) 2021, Oct. 11-17, Montreal, Canada<br \/>\u2022 Area Chair, IEEE-CVF CVPR (Computer Vision and Pattern Recognition) 2021, Nashville, TN<br \/>\u2022 Area Chair, ECCV (European Conference on Computer Vision) 2020, Aug. 23-28, 2020, Glasgow, UK<br \/>\u2022 Track Chair, ICPR (International Conference on Pattern Recognition) 2020, Sep. 13-18, 2020, Milan, Italy<br \/>\u2022 General Chair, IEEE-CVF ICCV (International Conference on Computer Vision) 2019, Oct. 27-Nov. 3, Seoul, Korea<br \/>\u2022 General Chair, ACCV (Asian Conference on Computer Vision) 2018, Dec. 4-6, Perth, Australia<br \/>\u2022 General Chair, ACM MM (Multimedia) 2018, Oct. 22-26, Seoul, Korea<br \/>\u2022 Area Chair, ECCV (European Conference on Computer Vision) 2018, Oct. 8-16, 2014, Munich, Germany<br \/>\u2022 Area Chair, BMVC (British Machine Vision Conference) 2017, 4-7, Sept., London, England.<br \/>\u2022 Area Chair, ACCV (Asian Conference on Computer Vision) 2016, Nov. 20-24, 2016, Taipei, Taiwan<br \/>\u2022 Area Chair, ECCV (European Conference on Computer Vision) 2016, Oct. 8-16, 2014, Amsterdam, The Netherlands<br \/>\u2022 Session Chair, IEEE-CVF CVPR (Computer Vision and Pattern Recognition) 2015, June 7-12, 2015, Boston, Massachusetts<br \/>\u2022 Area Chair, IEEE-CVF CVPR (Computer Vision and Pattern Recognition) 2015, June 7-12, 2015, Boston, Massachusetts<br \/>\u2022 Area Chair, ECCV (European Conference on Computer Vision) 2014, Sep. 5-12, 2014, Zurich, Switzerland<br \/>\u2022 Area Chair, ACCV (Asian Conference on Computer Vision) 2014, Nov. 1-5, 2014, Singapore<br \/>\u2022 Founding General Chair, KCCV (Korean Conf. on Computer Vision) 2014, Aug. 28, Seoul, Korea<br \/>\u2022 Workshop Chair, IEEE-CVF ICCV (International Conference on Computer Vision) 2013, Dec. 1-8, 2013, Sydney, Australia<br \/>\u2022 Area Chair, IEEE-CVF ICCV (International Conference on Computer Vision) 2013, Dec. 1-8, 2013, Sydney, Australia<br \/>\u2022 General Chair, IPIU (Image Processing and Image Understanding) 2013, Feb. 18-20, 2013, Jeju, Korea<br \/>\u2022 Session Chair, IEEE-CVF CVPR (Computer Vision and Pattern Recognition) 2013, June 23-28, 2013, Portland, Oregon<br \/>\u2022 Area Chair, IEEE-CVF CVPR (Computer Vision and Pattern Recognition) 2013, June 23-28, 2013, Portland, Oregon<br \/>\u2022 Program Chair, ACCV (Asian Conference on Computer Vision) 2012, Daejeon, Korea, 2012,<br \/>\u2022 Track Chair, ICPR (International Conference on Pattern Recognition) 2012, Nov. 11-15, 2012, Tsukuba, Japan<br \/>\u2022 Session Chair, IEEE CVPR (Computer Vision and Pattern Recognition) 2012, June 2012, Providence, Rhode Island<br \/>\u2022 Area Chair, IEEE CVPR (Computer Vision and Pattern Recognition) 2012, June 2012, Providence, Rhode Island<br \/>\u2022 Area Chair, ACCV (Asian Conference on Computer Vision) 2010, Nov. 8-12, 2010, Queenstown, New Zealand<br \/>\u2022 Associate Editor, IEEE ICRA (International Conference on Robotics and Automation) 2010, May 3-8, 2010, Anchorage, Alaska<br \/>\u2022 Area Chair, ACCV (Asian Conference on Computer Vision) 2009, Sep. 23-27, 2009, Xian, China<br \/>\u2022 Area Chair, ACCV (Asian Conference on Computer Vision) 2007, Nov. 18-22, 2007, Tokyo Japan<br \/>\u2022 General chair, FCV (Frontiers of Computer Vision) 2007, Jan. 25-27, 2007, Busan, Korea,<br \/>\u2022 Program Chair, FCV (Frontiers of Computer Vision) 2006, Feb. 2-3, 2006, Tokushima, Japan<\/p>\n<p><strong>Award Committee Member<\/strong><br \/>\u2022 Best Paper (Marr Prize) Award Committee, ICCV2021<br \/>\u2022 Best Paper Award Committee, CVPR2021<br \/>\u2022 PAMI TC Thomas S. Huang Memorial Award Committee, CVPR2021<br \/>\u2022 PAMI TC Longuet-Higgins Prize Committee, CVPR2020<br \/>\u2022 AFCV Fellow Committee, ACCV2016<br \/>\u2022 Best Paper Award Committee, ACCV2014<\/p>\n<p><strong>Organizing Committee Member<\/strong><br \/>\u2022 Advances in Image Manipulation (AIM) workshop and challenges on image and video manipulation 2021, Oct 16, 2021, Virtual<br \/>\u2022 New Trends in Image Restoration and Enhancement (NTIRE) workshop and challenge on image super-resolution 2021, June 19, 2021, Virtual<br \/>\u2022 Advances in Image Manipulation (AIM) workshop and challenges on image and video manipulation 2020, Aug. 28, Virtual<br \/>\u2022 New Trends in Image Restoration and Enhancement (NTIRE) workshop and challenge on image super-resolution 2020, June 15, 2020, Seattle<br \/>\u2022 ICPR (International Conf. on Pattern Recognition) 2020, Milan, Italy<br \/>\u2022 Advances in Image Manipulation (AIM) workshop and challenges on image and video manipulation 2019, Nov. 2, Seoul Korea<br \/>\u2022 New Trends in Image Restoration and Enhancement (NTIRE) workshop and challenge on image super-resolution 2019, June 17, 2019, Long Beach<br \/>\u2022 KCCV (Korean Conf. on Computer Vision) 2015, Seoul, Korea<br \/>\u2022 FCV (Frontiers of Computer Vision) 2009, Feb. 5-7, 2009, Andong, Korea,<br \/>\u2022 ICCAS (International Conference on Control, Automation and Systems) 2008, Oct. 14-17, 2008 Seoul, Korea<br \/>\u2022 FCV (Frontiers of Computer Vision) 2008, Jan.23-26, 2008, Bepu, Japan<br \/>\u2022 IEEE ISPACS (International Symposium on Intelligent Signal Processing and Communication System) 2004, Seoul Korea<br \/>\u2022 ACCV (Asian Conference on Computer Vision) 2004, Jan. 27-30, Jeju, Korea<\/p>\n<p><strong>Program Committee Member<\/strong><br \/>\u2022 Senior Program Committee Member, JICAI (International Joint Conferences on Artificial Intelligence) 2021, Aug. 21 -26, Montreal, Canada<br \/>\u2022 New Trends in Image Restoration and Enhancement workshop and challenge on image super-resolution 2018, June 18, 2018, Salt Lake City<br \/>\u2022 New Trends in Image Restoration and Enhancement workshop and challenge on image super-resolution 2017, July 21, 2017, Hawaii<br \/>\u2022 New Trends in Image Restoration and Enhancement workshop 2016, Nov 20, 2016, Taipei, Taiwan<br \/>\u2022 VOT (Visual Object Tracking) Workshop 2017, Oct. 28, 2017, Venice, Italy<br \/>\u2022 VOT (Visual Object Tracking) Workshop 2016, Oct. 10, 2016, Amsterdam, Netherlands<br \/>\u2022 VOT (Visual Object Tracking) Workshop 2015, Dec. 12, 2015, Santiago, Chile<br \/>\u2022 The Tenth International Workshop on Visual Surveillance, VS2010, Nov. 8, 2010, Queenstown, New Zealand<br \/>\u2022 The 3rd International Congress on Image and Signal Processing (CISP&#8217;10), Yantai, China, Oct. 16-18, 2010<br \/>\u2022 FCV (Frontiers of Computer Vision) 2010, Feb. 4-6, 2010, Hiroshima, Japan<br \/>\u2022 VISAPP (International Conference on Computer Vision Theory and Applications) 2010, May. 17-21, 2010, Angers, France<br \/>\u2022 VISAPP (International Conference on Computer Vision Theory and Applications) 2009, Feb. 5-8, 2009, Lisboa, Portugal<br \/>\u2022 IIHMSP (Intelligent Information Hiding and Multimedia Signal Processing) 2008, Aug. 15-17, 2008, Harbin, China<br \/>\u2022 VISAPP (International Conference on Computer Vision Theory and Applications) 2007, 8 \u2013 11, March 2007, Barcelona, Spain<br \/>\u2022 ACCV (Asian Conference on Computer Vision) 2006, Jan. 22-25, 2006, Hyderabad, India<br \/>\u2022 VISAPP (International Conference on Computer Vision Theory and Applications) 2006, Feb. 22-25, 2006, Setubal, Portugal<br \/>\u2022 IEEE ICIP (International Conference on Image Processing) 2004, Oct. 24-27, Singapore<br \/>\u2022 IEEE workshop on Color and Photometric Methods in Computer Vision, July 7, 2003, Nice Acropolis, France<br \/>\u2022 SPIE VCIP (Visual Communication and Image Processing) 2002, Jan. 20-25, 2002, San Jose<\/p>\n<p>&nbsp;<\/p>\n<p>[\/et_pb_text][et_pb_text _builder_version=&#8221;3.0.89&#8243; text_font=&#8221;|600|||||||&#8221; text_text_color=&#8221;#000000&#8243; background_layout=&#8221;light&#8221;]<\/p>\n<h3><em><strong>Selected Awards and Honors<\/strong><\/em><\/h3>\n<p>\u2022 <strong>The 63th Samil Prize<\/strong>, The Samil Foundation 2022<br \/>\u2022 <strong>Excellence in Research Award<\/strong>, Seoul National University 2020<br \/>\u2022 <strong>Medal of Merit<\/strong>, Korean Government 2020<br \/>\u2022 <strong>Winner<\/strong> of the First 3D Poses in the Wild Challenge 2020 (3DPW-Challenge)<br \/>\u2022 <strong>Great Professor Award<\/strong>, College of Engineering, Seoul National University 2019<br \/>\u2022 <strong>Korean Scientist and Engineer of the Month Award<\/strong>, the Ministry of Science and ICT of Korea 2018<br \/>\u2022 <strong>Winner<\/strong> of 2nd NTIRE Single Image Super-Resolution Challenge, CVPRW2017<br \/>\u2022 <strong>Winner<\/strong> of the 2017 Hands in the Million Challenge on 3D Hand Pose Estimation Challenge (HANDS2017)<br \/>\u2022 <strong>MSRA Grant Award<\/strong> from Microsoft Research Asia 2016<br \/>\u2022 <strong>Distinguished Lecturer<\/strong>, The Asia-Pacific Signal and Information Processing Association (APSIPA), 2012-2013<br \/>\u2022 <strong>Sinyang Engineering Academy Award<\/strong>, College of Engineering, Seoul National University 2010<br \/>\u2022 <strong>Outstanding Research Award<\/strong>, College of Engineering, Seoul National University 2010<br \/>\u2022 <strong>Distinguished Professor Award<\/strong>, AIP (Advanced Industrial Strategy Program), Seoul National University 2009<br \/>\u2022 <strong>Winner<\/strong> of the IAPR MVA (Machine Vision Application)&#8217;s Most Influential Paper over the Decade Award 2009<br \/>\u2022 <strong>Winner<\/strong> of the Honorable Mention Award, Asian Conference on Computer Vision (ACCV) 2007<br \/>\u2022 <strong>Okawa Foundation Research Grant Award<\/strong> for Understanding of Information in Images 2006<br \/>\u2022 <strong>Outstanding Research Performance Award<\/strong>, Hong-Ik University 2001, 2002<br \/>\u2022 <strong>Korean Government Overseas Scholarship<\/strong>. 1988-1993<\/p>\n<p>\u00a0<\/p>\n<p>[\/et_pb_text][et_pb_code _builder_version=&#8221;3.0.89&#8243;]&lt;h3&gt;&lt;em&gt;&lt;strong&gt;Recent Publications (Selected)&lt;\/strong&gt;&lt;\/em&gt;&lt;\/h3&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;p&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Sanghyun Son and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;SRWarp: Generalized Image Super-Resolution under Arbitrary Transformation,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;b&gt;&lt;i&gt;CVPR&lt;\/i&gt;&lt;\/b&gt;, 2021.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Hongsuk Choi, Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;Beyond Static Features for Temporally Consistent 3D Human Pose and Shape from a Video,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;b&gt;&lt;i&gt;CVPR&lt;\/i&gt;&lt;\/b&gt;, 2021.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/arxiv.org\/abs\/2011.08627&quot;&gt;[arXiv]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/youtu.be\/WB3nTnSQDII&quot;&gt;[Video]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Suyoung Lee*, Myungsub Choi*, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;DynaVSR: Dynamic Adaptive Blind Video Super-Resolution,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;i&gt;&lt;b&gt;WACV&lt;\/b&gt;&lt;\/i&gt;, 2021.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Sungyong Baik, Myungsub Choi, Janghoon Choi, Heewon Kim, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;Meta-Learning with Adaptive Hyperparameters,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;i&gt;&lt;b&gt;NeurIPS&lt;\/b&gt;&lt;\/i&gt;, 2020.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/papers.nips.cc\/paper\/2020\/file\/ee89223a2b625b5152132ed77abbcc79-Paper.pdf&quot;&gt;[PDF]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/arxiv.org\/abs\/2011.00209&quot;&gt;[arXiv]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/github.com\/baiksung\/ALFA&quot;&gt;[code]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Janghoon Choi, Junseok Kwon, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;Visual Tracking by TridentAlign and Context Embedding,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;i&gt;&lt;b&gt;ACCV&lt;\/b&gt;&lt;\/i&gt;, 2020.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/arxiv.org\/abs\/2007.06887&quot;&gt;[arXiv]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Hongsuk Choi*, Gyeongsik Moon*, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;Pose2Mesh: Graph Convolutional Network for 3D Human Pose and Mesh Recovery from 2D Human Pose,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;b&gt;&lt;i&gt;ECCV&lt;\/i&gt;&lt;\/b&gt;, 2020.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/arxiv.org\/abs\/2008.09047&quot;&gt;[arXiv]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/www.ecva.net\/papers\/eccv_2020\/papers_ECCV\/papers\/123520749.pdf&quot;&gt;[PDF]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/youtu.be\/utaHeByNauc&quot;&gt;[Video]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/github.com\/hongsukchoi\/Pose2Mesh_RELEASE&quot;&gt;[code]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Gyeongsik Moon and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;I2L-MeshNet: Image-to-Lixel Prediction Network for Accurate 3D Human Pose and Mesh Estimation&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;from a Single RGB Image,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;b&gt;&lt;i&gt;ECCV&lt;\/i&gt;&lt;\/b&gt;, 2020.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/arxiv.org\/abs\/2008.03713&quot;&gt;[arXiv]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/www.ecva.net\/papers\/eccv_2020\/papers_ECCV\/papers\/123520732.pdf&quot;&gt;[PDF]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/youtu.be\/vIF9s71QrP0&quot;&gt;[Video]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/github.com\/mks0601\/I2L-MeshNet_RELEASE&quot;&gt;[code]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Gyeongsik Moon, Shoou-I Yu, He Wen, Takaaki Shiratori and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;InterHand2.6M: A Dataset and Baseline for 3D Interacting Hand Pose Estimation from a Single RGB Image,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;b&gt;&lt;i&gt;ECCV&lt;\/i&gt;&lt;\/b&gt;, 2020.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/mks0601.github.io\/InterHand2.6M\/&quot;&gt;[dataset]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/arxiv.org\/abs\/2008.09309&quot;&gt;[arXiv]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/www.ecva.net\/papers\/eccv_2020\/papers_ECCV\/papers\/123650545.pdf&quot;&gt;[PDF]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/youtu.be\/h66jFalMpDQ&quot;&gt;[Video]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/github.com\/facebookresearch\/InterHand2.6M&quot;&gt;[code]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Gyeongsik Moon, Takaaki Shiratori, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;DeepHandMesh: Weakly-supervised Deep Encoder-Decoder Framework for High-fidelity Hand Mesh&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;Modeling from a Single RGB Image,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;b&gt;&lt;i&gt;ECCV&lt;\/i&gt;&lt;\/b&gt;, 2020. (&lt;b&gt;ORAL&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;    presentation&lt;\/b&gt;)&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/arxiv.org\/abs\/2008.08213&quot;&gt;[arXiv]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/www.ecva.net\/papers\/eccv_2020\/papers_ECCV\/papers\/123470426.pdf&quot;&gt;[PDF]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/youtu.be\/KflEovYApsk&quot;&gt;[Video]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Myungsub Choi, Janghoon Choi, Sungyong Baik, Tae Hyun Kim, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;Scene-Adaptive Video Frame Interpolation via Meta-Learning,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;b&gt;&lt;i&gt;CVPR&lt;\/i&gt;&lt;\/b&gt;, 2020.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;http:\/\/openaccess.thecvf.com\/content_CVPR_2020\/papers\/Choi_Scene-Adaptive_Video_Frame_Interpolation_via_Meta-Learning_CVPR_2020_paper.pdf&quot;&gt;[PDF]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/myungsub.github.io\/meta-interpolation&quot;&gt;[Project Page]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Sungyong Baik, Seokil Hong, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;Learning to Forget for Meta-Learning,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;b&gt;&lt;i&gt;CVPR&lt;\/i&gt;&lt;\/b&gt;, 2020.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;http:\/\/openaccess.thecvf.com\/content_CVPR_2020\/papers\/Baik_Learning_to_Forget_for_Meta-Learning_CVPR_2020_paper.pdf&quot;&gt;[PDF]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/github.com\/baiksung\/l2f&quot;&gt;[code]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Myungsub Choi, Heewon KIm, Bohyung Han, Ning Xu, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;Channel Attention Is All You Need for Video Frame Interpolation,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;i&gt;&lt;b&gt;AAAI&lt;\/b&gt;&lt;\/i&gt;, 2020.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/aaai.org\/Papers\/AAAI\/2020GB\/AAAI-ChoiM.4773.pdf&quot;&gt;[PDF]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Janghoon Choi, Junseok Kwon, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;Deep Meta Learning for Real-Time Target-Aware Visual Tracking,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;b&gt;&lt;i&gt;ICCV&lt;\/i&gt;&lt;\/b&gt;, 2019.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/cv.snu.ac.kr\/publication\/conf\/2019\/iccv19_mlt.pdf&quot;&gt;[PDF]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;Camera Distance-aware Top-down Approach for 3D Multi-person Pose Estimation from a Single RGB Image,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;b&gt;&lt;i&gt;ICCV&lt;\/i&gt;&lt;\/b&gt;, 2019.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/arxiv.org\/abs\/1907.11346&quot;&gt;[arXiv]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/github.com\/mks0601\/3DMPPE_ROOTNET_RELEASE&quot;&gt;[code (RootNet)]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/github.com\/mks0601\/3DMPPE_POSENET_RELEASE&quot;&gt;[code (PoseNet)]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Dongmin Park, Seokil Hong, Bohyung Han, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;Continual Learning by Asymmetric Loss Approximation with Single-Side Overestimation,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;b&gt;&lt;i&gt;ICCV&lt;\/i&gt;&lt;\/b&gt;, 2019.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/cv.snu.ac.kr\/publication\/conf\/2019\/iccv19_alasso.pdf&quot;&gt;[PDF]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/github.com\/dmpark04\/alasso&quot;&gt;[code]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;PoseFix: Model-agnostic General Human Pose Refinement Network,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;b&gt;&lt;i&gt;CVPR&lt;\/i&gt;&lt;\/b&gt;, 2019.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/arxiv.org\/abs\/1812.03595&quot;&gt;[arXiv]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/github.com\/mks0601\/PoseFix_RELEASE&quot;&gt;[code]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Seungjun Nah, Sanghyun Son, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;Recurrent Neural Networks with Intra-Frame Iterations for Video Deblurring,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;b&gt;&lt;i&gt;CVPR&lt;\/i&gt;&lt;\/b&gt;, 2019.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;http:\/\/openaccess.thecvf.com\/content_CVPR_2019\/papers\/Nah_Recurrent_Neural_Networks_With_Intra-Frame_Iterations_for_Video_Deblurring_CVPR_2019_paper.pdf&quot;&gt;[PDF]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Yumin Suh, Bohyung Han, Wonsik Kim, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;Stochastic Class-based Hard Example Mining for Deep Metric Learning,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;b&gt;&lt;i&gt;CVPR&lt;\/i&gt;&lt;\/b&gt;, 2019.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;http:\/\/openaccess.thecvf.com\/content_CVPR_2019\/papers\/Suh_Stochastic_Class-Based_Hard_Example_Mining_for_Deep_Metric_Learning_CVPR_2019_paper.pdf&quot;&gt;[PDF]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;- Sungyong Baik, Junseok Kwon, and Kyoung Mu Lee,&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&quot;Learning to Remember Past to Predict Future for Visual Tracking,&quot;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;in &lt;i&gt;&lt;b&gt;ICIP&lt;\/b&gt;&lt;\/i&gt;, 2019.&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;a href=&quot;https:\/\/cv.snu.ac.kr\/publication\/conf\/2019\/icip2019_learning_to_remember.pdf&quot;&gt;[PDF]&lt;\/a&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;&lt;br \/&gt;&lt;!&#8211; [et_pb_line_break_holder] &#8211;&gt;[\/et_pb_code][\/et_pb_column][\/et_pb_row][\/et_pb_section]<\/p>\n","protected":false},"excerpt":{"rendered":"<p>[et_pb_section bb_built=&#8221;1&#8243; background_color=&#8221;rgba(109,58,0,0.08)&#8221; _builder_version=&#8221;3.0.89&#8243; max_width_last_edited=&#8221;on|desktop&#8221; custom_margin_phone=&#8221;|||&#8221; custom_margin_last_edited=&#8221;on|phone&#8221; custom_padding=&#8221;10px|0px|52.358px|0px&#8221;][et_pb_row _builder_version=&#8221;3.0.89&#8243;][et_pb_column type=&#8221;4_4&#8243;][et_pb_text _builder_version=&#8221;3.0.89&#8243; text_font=&#8221;PT Serif||||||||&#8221; text_font_size=&#8221;35px&#8221;] Faculty [\/et_pb_text][\/et_pb_column][\/et_pb_row][et_pb_row use_custom_gutter=&#8221;on&#8221; gutter_width=&#8221;4&#8243; custom_padding=&#8221;20px|180px||20px&#8221; custom_padding_phone=&#8221;|0px||0px&#8221; custom_margin=&#8221;||10px|&#8221; background_color=&#8221;rgba(255,255,255,0)&#8221; padding_1_phone=&#8221;|100px||100px&#8221; padding_2_phone=&#8221;|||&#8221; padding_1_last_edited=&#8221;on|desktop&#8221; padding_2_last_edited=&#8221;on|desktop&#8221; custom_padding_last_edited=&#8221;on|phone&#8221; _builder_version=&#8221;3.0.89&#8243; background_size=&#8221;initial&#8221; background_position=&#8221;top_left&#8221; background_repeat=&#8221;repeat&#8221; module_alignment=&#8221;center&#8221;][et_pb_column type=&#8221;1_4&#8243;][et_pb_image src=&#8221;https:\/\/cv.snu.ac.kr\/wp-content\/uploads\/2017\/11\/portrait_round_professor.png&#8221; align=&#8221;center&#8221; force_fullwidth=&#8221;on&#8221; _builder_version=&#8221;3.0.89&#8243; border_radii=&#8221;on|50px|50px|50px|50px&#8221; custom_padding=&#8221;0px|||&#8221; \/][\/et_pb_column][et_pb_column type=&#8221;3_4&#8243;][et_pb_text _builder_version=&#8221;3.0.89&#8243; text_font=&#8221;||||||||&#8221; custom_padding=&#8221;0px|||50px&#8221;] Kyoung Mu Lee Professor, Ph. D.Department of Electrical and Computer EngineeringCollege [&hellip;]<\/p>\n","protected":false},"author":4,"featured_media":0,"parent":0,"menu_order":0,"comment_status":"closed","ping_status":"closed","template":"","meta":{"footnotes":""},"class_list":["post-1213","page","type-page","status-publish","hentry"],"_links":{"self":[{"href":"https:\/\/cv.snu.ac.kr\/index.php\/wp-json\/wp\/v2\/pages\/1213","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/cv.snu.ac.kr\/index.php\/wp-json\/wp\/v2\/pages"}],"about":[{"href":"https:\/\/cv.snu.ac.kr\/index.php\/wp-json\/wp\/v2\/types\/page"}],"author":[{"embeddable":true,"href":"https:\/\/cv.snu.ac.kr\/index.php\/wp-json\/wp\/v2\/users\/4"}],"replies":[{"embeddable":true,"href":"https:\/\/cv.snu.ac.kr\/index.php\/wp-json\/wp\/v2\/comments?post=1213"}],"version-history":[{"count":18,"href":"https:\/\/cv.snu.ac.kr\/index.php\/wp-json\/wp\/v2\/pages\/1213\/revisions"}],"predecessor-version":[{"id":2911,"href":"https:\/\/cv.snu.ac.kr\/index.php\/wp-json\/wp\/v2\/pages\/1213\/revisions\/2911"}],"wp:attachment":[{"href":"https:\/\/cv.snu.ac.kr\/index.php\/wp-json\/wp\/v2\/media?parent=1213"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}