-
Notifications
You must be signed in to change notification settings - Fork 17
/
Copy pathgenerate_dense_embeddings.py
152 lines (115 loc) · 5.62 KB
/
generate_dense_embeddings.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Modified by Microsoft Corporation.
# Licensed under the MIT license.
"""
Command line tool that produces embeddings for a large documents base based on the pretrained ctx & question encoders
Supposed to be used in a 'sharded' way to speed up the process.
"""
import os
import sys
import pathlib
import argparse
import csv
import logging
import pickle
from typing import List, Tuple
import numpy as np
import torch
from torch import nn
from dpr.models import init_biencoder_components
from dpr.options import add_encoder_params, setup_args_gpu, print_args, set_encoder_params_from_state, \
add_tokenizer_params, add_cuda_params
from dpr.utils.data_utils import Tensorizer
from dpr.utils.model_utils import setup_for_distributed_mode, get_model_obj, load_states_from_checkpoint,move_to_device,load_states_from_checkpoint_only_model
csv.field_size_limit(sys.maxsize)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if (logger.hasHandlers()):
logger.handlers.clear()
console = logging.StreamHandler()
logger.addHandler(console)
def gen_ctx_vectors(ctx_rows: List[Tuple[object, str, str]], model: nn.Module, tensorizer: Tensorizer,
args, insert_title: bool = False) -> List[Tuple[object, np.array]]:
n = len(ctx_rows)
bsz = args.batch_size
total = 0
results = []
for j, batch_start in enumerate(range(0, n, bsz)):
batch_token_tensors = [tensorizer.text_to_tensor(ctx[1], title=ctx[2] if insert_title else None) for ctx in
ctx_rows[batch_start:batch_start + bsz]]
ctx_ids_batch = move_to_device(torch.stack(batch_token_tensors, dim=0),args.device)
ctx_seg_batch = move_to_device(torch.zeros_like(ctx_ids_batch),args.device)
ctx_attn_mask = move_to_device(tensorizer.get_attn_mask(ctx_ids_batch),args.device)
with torch.no_grad():
_, out, _ = model(ctx_ids_batch, ctx_seg_batch, ctx_attn_mask)
out = out.cpu()
ctx_ids = [r[0] for r in ctx_rows[batch_start:batch_start + bsz]]
assert len(ctx_ids) == out.size(0)
total += len(ctx_ids)
results.extend([
(ctx_ids[i], out[i].view(-1).numpy())
for i in range(out.size(0))
])
if total % 100000 == 0:
logger.info('Encoded passages %d', total)
return results
def main(args):
if not hasattr(args, 'load_trained_model') or not args.load_trained_model:
saved_state = load_states_from_checkpoint(args.model_file)
else:
saved_state = load_states_from_checkpoint_only_model(args.model_file)
set_encoder_params_from_state(saved_state.encoder_params, args)
print_args(args)
tensorizer, encoder, _ = init_biencoder_components(args.encoder_model_type, args, inference_only=True)
encoder = encoder.ctx_model
encoder, _ = setup_for_distributed_mode(encoder, None, args.device, args.n_gpu,
args.local_rank,
args.fp16,
args.fp16_opt_level)
encoder.eval()
model_to_load = get_model_obj(encoder)
logger.info('Loading saved model state ...')
logger.debug('saved model keys =%s', saved_state.model_dict.keys())
prefix_len = len('ctx_model.')
ctx_state = {key[prefix_len:]: value for (key, value) in saved_state.model_dict.items() if
key.startswith('ctx_model.')}
model_to_load.load_state_dict(ctx_state)
logger.info('reading data from file=%s', args.ctx_file)
rows = []
with open(args.ctx_file) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
rows.extend([(row[0], row[1], row[2]) for row in reader if row[0] != 'id' and all([len(row[0])!=0,len(row[1])>=10,len(row[2])!=0])])
if hasattr(args, 'change_id_over_card') and args.change_id_over_card:
args.shard_id = args.local_rank if args.local_rank != -1 else 0
shard_size = int(len(rows) / args.num_shards)
start_idx = args.shard_id * shard_size
end_idx = start_idx + shard_size
logger.info('Producing encodings for passages range: %d to %d (out of total %d)', start_idx, end_idx, len(rows))
rows = rows[start_idx:end_idx]
data = gen_ctx_vectors(rows, encoder, tensorizer, args, False)
file = args.out_file + '_' + str(args.shard_id)
pathlib.Path(os.path.dirname(file)).mkdir(parents=True, exist_ok=True)
logger.info('Writing results to %s' % file)
with open(file, mode='wb') as f:
pickle.dump(data, f)
logger.info('Total passages processed %d. Written to %s', len(data), file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
add_encoder_params(parser)
add_tokenizer_params(parser)
add_cuda_params(parser)
parser.add_argument('--ctx_file', type=str, default=None, help='Path to passages set .tsv file')
parser.add_argument('--out_file', required=True, type=str, default=None,
help='output .tsv file path to write results to ')
parser.add_argument('--shard_id', type=int, default=0, help="Number(0-based) of data shard to process")
parser.add_argument('--num_shards', type=int, default=1, help="Total amount of data shards")
parser.add_argument('--batch_size', type=int, default=32, help="Batch size for the passage encoder forward pass")
args = parser.parse_args()
assert args.model_file, 'Please specify --model_file checkpoint to init model weights'
setup_args_gpu(args)
main(args)