1341. | Saphra, Naomi; Lopez, Adam : AMRICA: an AMR Inspector for Cross-language Alignments. In: Proc. of NAACL, 0000. (Type: Inproceedings | Abstract | BibTeX)@inproceedings{naomi_saphra_amrica:_2015,
title = {AMRICA: an AMR Inspector for Cross-language Alignments},
author = {Naomi Saphra and Lopez, Adam},
booktitle = {Proc. of NAACL},
abstract = {Abstract Meaning Representation (AMR), an annotation scheme for natural language semantics, has drawn attention for its simplicity and representational power. Because AMR annotations are not designed for human readability, we present AMRICA, a visual aid for exploration of AMR annotations. AMRICA can visualize an AMR or the difference between two AMRs to help users diagnose interannotator disagreement or errors from an AMR parser. AMRICA can also automatically align and visualize the AMRs of a sentence and its translation in a parallel text. We believe AMRICA will simplify and streamline exploratory research on cross-lingual AMR corpora.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Abstract Meaning Representation (AMR), an annotation scheme for natural language semantics, has drawn attention for its simplicity and representational power. Because AMR annotations are not designed for human readability, we present AMRICA, a visual aid for exploration of AMR annotations. AMRICA can visualize an AMR or the difference between two AMRs to help users diagnose interannotator disagreement or errors from an AMR parser. AMRICA can also automatically align and visualize the AMRs of a sentence and its translation in a parallel text. We believe AMRICA will simplify and streamline exploratory research on cross-lingual AMR corpora. |
1342. | Hori, Takaaki; Watanabe, Shinji; Zhang, Yu; Chan, William: Advances in Joint CTC-Attention based End-to-End Speech Recognition with a Deep CNN Encoder and RNN-LM. In: Proc. Interspeech 2017, pp. 949–953 year=2017, 0000. (Type: Inproceedings | BibTeX)@inproceedings{hori2017advances,
title = {Advances in Joint CTC-Attention based End-to-End Speech Recognition with a Deep CNN Encoder and RNN-LM},
author = {Takaaki Hori and Shinji Watanabe and Yu Zhang and William Chan},
booktitle = {Proc. Interspeech 2017},
pages = {949--953 year=2017},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
1343. | Dredze, Mark; Schilit, Bill: Facet suggestion for search query augmentation. 0000, (US Patent 8,433,705). (Type: Patent | BibTeX)@patent{dredze2013facetd,
title = {Facet suggestion for search query augmentation},
author = {Mark Dredze and Bill Schilit},
publisher = {Google Patents},
note = {US Patent 8,433,705},
keywords = {},
pubstate = {published},
tppubtype = {patent}
}
|
1344. | Snyder, David; Ghahremani, Pegah; Povey, Daniel; Garcia-Romero, Daniel; Carmiel, Yishay; Khudanpur, Sanjeev: DEEP NEURAL NETWORK-BASED SPEAKER EMBEDDINGS FOR END-TO-END SPEAKER VERIFICATION. In: 0000. (Type: Journal Article | BibTeX)@article{snyderdeep,
title = {DEEP NEURAL NETWORK-BASED SPEAKER EMBEDDINGS FOR END-TO-END SPEAKER VERIFICATION},
author = {David Snyder and Pegah Ghahremani and Daniel Povey and Daniel Garcia-Romero and Yishay Carmiel and Sanjeev Khudanpur},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
1345. | Peddinti, Vimal Manohar Vijayaditya; Wang, Yiming; Povey, Daniel; Khudanpur, Sanjeev: Far-field ASR without parallel data. In: 0000. (Type: Journal Article | BibTeX)@article{vijayadityafar,
title = {Far-field ASR without parallel data},
author = {Vimal Manohar Vijayaditya Peddinti and Yiming Wang and Daniel Povey and Sanjeev Khudanpur},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|