This paper presents a data-driven framework for generating cartoon-like facial representations from a given portrait image. We solve our problem by an optimization that simultaneously considers a desired artistic style, image-cartoon relationships of facial components as well as automatic adjustment of the image composition. The stylization operation consists of two steps: a face parsing step to localize and extract facial components from the input image; a cartoon generation step to cartoonize the face according to the extracted information. The components of the cartoon are assembled from a database of stylized facial components. Quantifying the similarity between facial components of input and cartoon is done by image feature matching. We incorporate prior knowledge about photo-cartoon relationships and the optimal composition of cartoon facial components extracted from a set of cartoon faces to maintain a natural and attractive look of the results.
@inproceedings{Zhang2014DatadrivenFace, acmid = {2669028}, address = {New York, NY, USA}, articleno = {14}, author = {Y. Zhang, W. Dong, O. Deussen, F. Huang, K. Li, B. Hu}, booktitle = {SIGGRAPH Asia 2014 Technical Briefs}, doi = {10.1145/2669024.2669028}, isbn = {978-1-4503-2895-1}, keywords = {face alignment, face parsing, face stylization}, location = {Shenzhen, China}, numpages = {4}, pages = {14:1--14:4}, publisher = {ACM}, series = {SA '14}, title = {Data-driven Face Cartoon Stylization}, url = {http://graphics.uni-konstanz.de/publikationen/Zhang2014DatadrivenFace}, year = {2014} }