Face2Text: Collecting an Annotated Image Description Corpus for the Generation of Rich Face Descriptions (Gatt, Albert; Tanti, Marc; Muscat, Adrian; Paggio, Patrizia; Farrugia, Reuben A.; Borg, Claudia; Camilleri, Kenneth P.; Rosner, Mike and Van der Plas, Lonneke), In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018) (chair), Nicoletta Calzolari (Conference; Choukri, Khalid; Cieri, Christopher; Declerck, Thierry; Goggi, Sara; Hasida, Koiti; Isahara, Hitoshi; Maegaard, Bente; Mariani, Joseph; Mazo, Hélène; Moreno, Asuncion; Odijk, Jan; Piperidis, Stelios; Tokunaga, Takenobu, eds.), European Language Resources Association (ELRA), 2018.[bib]
@InProceedings{Gatt2018,
author = {Gatt, Albert and Tanti, Marc and Muscat, Adrian and Paggio, Patrizia and Farrugia, Reuben A. and Borg, Claudia and Camilleri, Kenneth P. and Rosner, Mike and Van der Plas, Lonneke},
title = {Face2Text: Collecting an Annotated Image Description Corpus for the Generation of Rich Face Descriptions},
booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
year = {2018},
editor = {Nicoletta Calzolari (Conference chair) and Khalid Choukri and Christopher Cieri and Thierry Declerck and Sara Goggi and Koiti Hasida and Hitoshi Isahara and Bente Maegaard and Joseph Mariani and H\'{e}l\`{e}ne Mazo and Asuncion Moreno and Jan Odijk and Stelios Piperidis and Takenobu Tokunaga},
address = {Paris, France},
month = may,
publisher = {European Language Resources Association (ELRA)},
date = {2018-05-07},
eprint = {https://arxiv.org/abs/1803.03827},
eprinttype = {arxiv},
isbn = {979-10-95546-00-9},
keywords = {vision and language,image captioning,face description, dataset},
language = {english},
location = {Miyazaki, Japan},
url = {http://www.lrec-conf.org/proceedings/lrec2018/summaries/226.html},
}
@Article{Tanti2018,
author = {Tanti, Marc and Gatt, Albert and Camilleri, Kenneth P.},
title = {Where to put the image in an image caption generator},
journal = {Natural Language Engineering},
year = {2018},
volume = {24},
number = {3},
pages = {467--489},
month = apr,
doi = {10.1017/S1351324918000098},
eprint = {https://arxiv.org/abs/1703.09137},
eprinttype = {arxiv},
keywords = {vision and language,image captioning,neural architectures, scene description},
language = {english},
publisher = {Cambridge University Press},
url = {https://www.cambridge.org/core/journals/natural-language-engineering/article/where-to-put-the-image-in-an-image-caption-generator/A5B0ACFFE8E4AEAA5840DC61F93153F3#fndtn-information},
}
@InProceedings{Tanti2017,
author = {Tanti, Marc and Gatt, Albert and Camilleri, Kenneth P.},
title = {What is the Role of Recurrent Neural Networks (RNNs) in an Image Caption Generator?},
booktitle = {Proceedings of the 10th International Conference on Natural Language Generation},
year = {2017},
pages = {51--60},
publisher = {Association for Computational Linguistics},
eprint = {https://arxiv.org/abs/1708.02043},
eprinttype = {arxiv},
keywords = {vision and language,image captioning,neural architectures, scene description},
language = {english},
location = {Santiago de Compostela, Spain},
url = {http://aclweb.org/anthology/W17-3506},
}