Skip to content

Commit

Permalink
mypy ignore specific errors (#6278)
Browse files Browse the repository at this point in the history
  • Loading branch information
anakin87 authored Nov 10, 2023
1 parent 1b63cfc commit 92a8704
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 6 deletions.
2 changes: 1 addition & 1 deletion examples/seq2seq_replacement.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
# query_and_docs = "question: {} context: {}".format(query, conditioned_doc)

# Or use the PromptTemplate as shown here
pt = PromptTemplate("lfqa", "question: {query} context: {join(documents, delimiter='<P>')}")
pt = PromptTemplate("lfqa", "question: {query} context: {join(documents, delimiter='<P>')}") # type: ignore [arg-type]

res = p.prompt(prompt_template=pt, query=query, documents=[Document(d) for d in documents])

Expand Down
4 changes: 2 additions & 2 deletions haystack/modeling/data_handler/data_silo.py
Original file line number Diff line number Diff line change
Expand Up @@ -819,12 +819,12 @@ def _get_dataset(self, filename: Optional[Union[str, Path]], dicts: Optional[Lis
corresponding_chunks.append(i)
if len(batch) == self.teacher_batch_size:
self._pass_batches(
batch, corresponding_chunks, teacher_outputs, tensor_names
batch, corresponding_chunks, teacher_outputs, tensor_names # type: ignore [arg-type]
) # doing forward pass on teacher model
batch = []
corresponding_chunks = []
if batch:
self._pass_batches(batch, corresponding_chunks, teacher_outputs, tensor_names)
self._pass_batches(batch, corresponding_chunks, teacher_outputs, tensor_names) # type: ignore [arg-type]

# appending teacher outputs to original dataset
for dataset, teacher_output in zip(concat_datasets.datasets, teacher_outputs):
Expand Down
6 changes: 3 additions & 3 deletions haystack/utils/augment_squad.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def get_replacements(
for i, word in enumerate(words):
if i in word_subword_mapping: # word was not split into subwords so we can use MLM output
subword_index = word_subword_mapping[i]
ranking = predictions[batch_index]
ranking = predictions[batch_index] # type: ignore [assignment]
possible_words_ = [word]
for token in ranking:
word = tokenizer.convert_ids_to_tokens([token])[0]
Expand All @@ -172,8 +172,8 @@ def get_replacements(
glove_vector = glove_vectors[word_id]
with torch.inference_mode():
word_similarities = torch.mm(glove_vectors, glove_vector.unsqueeze(1)).squeeze(1) # type: ignore [arg-type]
ranking = torch.argsort(word_similarities, descending=True)[: word_possibilities + 1]
possible_words.append([glove_id_word_mapping[int(id_)] for id_ in ranking.cpu()])
ranking = torch.argsort(word_similarities, descending=True)[: word_possibilities + 1] # type: ignore [assignment]
possible_words.append([glove_id_word_mapping[int(id_)] for id_ in ranking.cpu()]) # type: ignore [attr-defined]
else: # word was not in glove either so we can't find any replacements
possible_words.append([word])

Expand Down

0 comments on commit 92a8704

Please sign in to comment.