-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathref.bib
50 lines (46 loc) · 5.63 KB
/
ref.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
@misc{devlin_bert_2019,
title = {{BERT}: {Pre}-training of {Deep} {Bidirectional} {Transformers} for {Language} {Understanding}},
shorttitle = {{BERT}},
url = {http://arxiv.org/abs/1810.04805},
doi = {10.48550/arXiv.1810.04805},
abstract = {We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5\% (7.7\% point absolute improvement), MultiNLI accuracy to 86.7\% (4.6\% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).},
urldate = {2024-12-17},
publisher = {arXiv},
author = {Devlin, Jacob and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina},
month = may,
year = {2019},
note = {arXiv:1810.04805 [cs]},
keywords = {Computer Science - Computation and Language},
file = {Preprint PDF:/Users/evan/Zotero/storage/Q37NFJS2/Devlin et al. - 2019 - BERT Pre-training of Deep Bidirectional Transform.pdf:application/pdf;Snapshot:/Users/evan/Zotero/storage/S5TRBG7C/1810.html:text/html},
}
@article{lin_evolutionary-scale_2023,
title = {Evolutionary-scale prediction of atomic-level protein structure with a language model},
volume = {379},
url = {https://www.science.org/doi/abs/10.1126/science.ade2574},
doi = {10.1126/science.ade2574},
abstract = {Recent advances in machine learning have leveraged evolutionary information in multiple sequence alignments to predict protein structure. We demonstrate direct inference of full atomic-level protein structure from primary sequence using a large language model. As language models of protein sequences are scaled up to 15 billion parameters, an atomic-resolution picture of protein structure emerges in the learned representations. This results in an order-of-magnitude acceleration of high-resolution structure prediction, which enables large-scale structural characterization of metagenomic proteins. We apply this capability to construct the ESM Metagenomic Atlas by predicting structures for {\textgreater}617 million metagenomic protein sequences, including {\textgreater}225 million that are predicted with high confidence, which gives a view into the vast breadth and diversity of natural proteins.},
number = {6637},
urldate = {2024-06-24},
journal = {Science},
author = {Lin, Zeming and Akin, Halil and Rao, Roshan and Hie, Brian and Zhu, Zhongkai and Lu, Wenting and Smetanin, Nikita and Verkuil, Robert and Kabeli, Ori and Shmueli, Yaniv and dos Santos Costa, Allan and Fazel-Zarandi, Maryam and Sercu, Tom and Candido, Salvatore and Rives, Alexander},
month = mar,
year = {2023},
note = {Publisher: American Association for the Advancement of Science},
pages = {1123--1130},
file = {Lin et al. - 2023 - Evolutionary-scale prediction of atomic-level prot.pdf:/Users/evan/Zotero/storage/ZM24K58U/Lin et al. - 2023 - Evolutionary-scale prediction of atomic-level prot.pdf:application/pdf},
}
@inproceedings{salazar_masked_2020,
title = {Masked {Language} {Model} {Scoring}},
url = {http://arxiv.org/abs/1910.14659},
doi = {10.18653/v1/2020.acl-main.240},
abstract = {Pretrained masked language models (MLMs) require finetuning for most NLP tasks. Instead, we evaluate MLMs out of the box via their pseudo-log-likelihood scores (PLLs), which are computed by masking tokens one by one. We show that PLLs outperform scores from autoregressive language models like GPT-2 in a variety of tasks. By rescoring ASR and NMT hypotheses, RoBERTa reduces an end-to-end LibriSpeech model's WER by 30\% relative and adds up to +1.7 BLEU on state-of-the-art baselines for low-resource translation pairs, with further gains from domain adaptation. We attribute this success to PLL's unsupervised expression of linguistic acceptability without a left-to-right bias, greatly improving on scores from GPT-2 (+10 points on island effects, NPI licensing in BLiMP). One can finetune MLMs to give scores without masking, enabling computation in a single inference pass. In all, PLLs and their associated pseudo-perplexities (PPPLs) enable plug-and-play use of the growing number of pretrained MLMs; e.g., we use a single cross-lingual model to rescore translations in multiple languages. We release our library for language model scoring at https://github.com/awslabs/mlm-scoring.},
urldate = {2024-12-17},
booktitle = {Proceedings of the 58th {Annual} {Meeting} of the {Association} for {Computational} {Linguistics}},
author = {Salazar, Julian and Liang, Davis and Nguyen, Toan Q. and Kirchhoff, Katrin},
year = {2020},
note = {arXiv:1910.14659 [cs]},
keywords = {Computer Science - Computation and Language, Computer Science - Machine Learning, Electrical Engineering and Systems Science - Audio and Speech Processing, Statistics - Machine Learning},
pages = {2699--2712},
annote = {Comment: ACL 2020 camera-ready (presented July 2020)},
file = {Preprint PDF:/Users/evan/Zotero/storage/Z2D5LDGK/Salazar et al. - 2020 - Masked Language Model Scoring.pdf:application/pdf;Snapshot:/Users/evan/Zotero/storage/Q886K5JW/1910.html:text/html},
}