-
-
Notifications
You must be signed in to change notification settings - Fork 4.4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Loading fastText models using only bin file #1341
Changes from 25 commits
7759a95
c12b4fa
8025710
7ee83d9
041a6e9
22c6710
61be613
e11ac44
a63a3bc
f80410f
454d74e
e6b0d8b
9b03ea3
c496be9
2c4a8dd
d2ab903
82507d1
c44b958
0fc1159
f421b05
68ec73b
f7b372e
5f7fe02
8bd56cf
b916187
1a0bfc0
98e0287
f3d2032
bd7e7f6
800cd01
a15233a
431aebf
e52fee4
cebb3fc
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -35,7 +35,7 @@ | |
import numpy as np | ||
from numpy import float32 as REAL, sqrt, newaxis | ||
from gensim import utils | ||
from gensim.models.keyedvectors import KeyedVectors | ||
from gensim.models.keyedvectors import KeyedVectors, Vocab | ||
from gensim.models.word2vec import Word2Vec | ||
|
||
from six import string_types | ||
|
@@ -233,11 +233,12 @@ def load_fasttext_format(cls, model_file, encoding='utf8'): | |
|
||
`model_file` is the path to the FastText output files. | ||
FastText outputs two training files - `/path/to/train.vec` and `/path/to/train.bin` | ||
Expected value for this example: `/path/to/train` | ||
Expected value for this example: `/path/to/train`. However, you only need .bin | ||
file to load the entire model. | ||
|
||
""" | ||
model = cls() | ||
model.wv = cls.load_word2vec_format('%s.vec' % model_file, encoding=encoding) | ||
model.file_name = model_file | ||
model.load_binary_data('%s.bin' % model_file, encoding=encoding) | ||
return model | ||
|
||
|
@@ -284,12 +285,12 @@ def load_model_params(self, file_handle): | |
def load_dict(self, file_handle, encoding='utf8'): | ||
vocab_size, nwords, _ = self.struct_unpack(file_handle, '@3i') | ||
# Vocab stored by [Dictionary::save](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc) | ||
assert len(self.wv.vocab) == nwords, 'mismatch between vocab sizes' | ||
assert len(self.wv.vocab) == vocab_size, 'mismatch between vocab sizes' | ||
logger.info("loading vocabulary words for fastText model from %s.bin", self.file_name) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Adding the number of words would be helpful in this logging statement. |
||
|
||
self.struct_unpack(file_handle, '@1q') # number of tokens | ||
if self.new_format: | ||
pruneidx_size, = self.struct_unpack(file_handle, '@q') | ||
for i in range(nwords): | ||
for i in range(vocab_size): | ||
word_bytes = b'' | ||
char_byte = file_handle.read(1) | ||
# Read vocab word | ||
|
@@ -298,8 +299,25 @@ def load_dict(self, file_handle, encoding='utf8'): | |
char_byte = file_handle.read(1) | ||
word = word_bytes.decode(encoding) | ||
count, _ = self.struct_unpack(file_handle, '@qb') | ||
assert self.wv.vocab[word].index == i, 'mismatch between gensim word index and fastText word index' | ||
self.wv.vocab[word].count = count | ||
|
||
if i == nwords and i < vocab_size: | ||
""" | ||
To handle the error in pretrained vector wiki.fr (French). | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This format -
is generally reserved for docstrings. Regular comments would be preferable. |
||
For more info : https://github.com/facebookresearch/fastText/issues/218 | ||
|
||
""" | ||
assert word == "__label__" | ||
continue # don't add word to vocab | ||
|
||
self.wv.vocab[word] = Vocab(index=i, count=count) | ||
self.wv.index2word.append(word) | ||
|
||
assert len(self.wv.vocab) == nwords, 'mismatch between vocab sizes' | ||
if len(self.wv.vocab) != vocab_size: | ||
logger.warning("mismatch between vocab sizes") | ||
logger.warning("If you are loading any model other than pretrained vector wiki.fr, ") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Any particular reason for two separate warning statements? Why not a single one? |
||
logger.warning("Please report to Gensim.") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please change the multiple warning statements to a single concatenated statement. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Also, "Please report to Gensim" is vague, and probably unnecessary. If people encounter bugs or get exceptions, they'll let us know, don't worry about that. I'd prefer if the logging message was more concrete instead: what mismatch, what are the mismatched "vocab sizes"? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @jayantj I remember a few weeks ago, I received a review comment from @piskvorky that concatenated statement would contain white space therefore better to split into multiple statements. Correct me if I didn't understand that comment There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I believe @piskvorky meant to split the string itself across multiple lines, like this - logger.warning(
"mismatch between vocab sizes "
"If you are loading any model other than pretrained vector wiki.fr, "
"Please report to Gensim.") He left a comment later in the PR clarifying it too. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ohh, thanks for clarifying 😄 |
||
|
||
|
||
if self.new_format: | ||
for j in range(pruneidx_size): | ||
|
@@ -337,8 +355,12 @@ def init_ngrams(self): | |
""" | ||
self.wv.ngrams = {} | ||
all_ngrams = [] | ||
for w, v in self.wv.vocab.items(): | ||
self.wv.syn0 = np.zeros((len(self.wv.vocab), self.vector_size), dtype=REAL) | ||
|
||
for w, vocab in self.wv.vocab.items(): | ||
all_ngrams += self.compute_ngrams(w, self.wv.min_n, self.wv.max_n) | ||
self.wv.syn0[vocab.index] += np.array(self.wv.syn0_all[vocab.index]) | ||
|
||
all_ngrams = set(all_ngrams) | ||
self.num_ngram_vectors = len(all_ngrams) | ||
ngram_indices = [] | ||
|
@@ -348,6 +370,18 @@ def init_ngrams(self): | |
self.wv.ngrams[ngram] = i | ||
self.wv.syn0_all = self.wv.syn0_all.take(ngram_indices, axis=0) | ||
|
||
ngram_weights = self.wv.syn0_all | ||
|
||
logger.info("loading weights for %s vocabulary words for fastText models from %s.bin", len(self.wv.vocab), self.file_name) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Typo: fastText model (not models) |
||
|
||
for w, vocab in self.wv.vocab.items(): | ||
word_ngrams = self.compute_ngrams(w, self.wv.min_n, self.wv.max_n) | ||
for word_ngram in word_ngrams: | ||
self.wv.syn0[vocab.index] += np.array(ngram_weights[self.wv.ngrams[word_ngram]]) | ||
|
||
self.wv.syn0[vocab.index] /= (len(word_ngrams) + 1) | ||
logger.info("loaded %s weight matrix for fastText model from %s.bin", self.wv.syn0.shape, self.file_name) | ||
|
||
@staticmethod | ||
def compute_ngrams(word, min_n, max_n): | ||
ngram_indices = [] | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -64,7 +64,6 @@ def testTraining(self): | |
self.model_sanity(trained_model) | ||
|
||
# Tests temporary training files deleted | ||
self.assertFalse(os.path.exists('%s.vec' % testfile())) | ||
self.assertFalse(os.path.exists('%s.bin' % testfile())) | ||
|
||
def testMinCount(self): | ||
|
@@ -115,7 +114,7 @@ def testNormalizedVectorsNotSaved(self): | |
self.assertTrue(loaded_kv.syn0_all_norm is None) | ||
|
||
def testLoadFastTextFormat(self): | ||
"""Test model successfully loaded from fastText .vec and .bin files""" | ||
"""Test model successfully loaded from fastText .bin files""" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Typo here and below: .bin file (not files) |
||
try: | ||
model = fasttext.FastText.load_fasttext_format(self.test_model_file) | ||
except Exception as exc: | ||
|
@@ -166,7 +165,7 @@ def testLoadFastTextFormat(self): | |
self.model_sanity(model) | ||
|
||
def testLoadFastTextNewFormat(self): | ||
""" Test model successfully loaded from fastText (new format) .vec and .bin files """ | ||
""" Test model successfully loaded from fastText (new format) .bin files """ | ||
try: | ||
new_model = fasttext.FastText.load_fasttext_format(self.test_new_model_file) | ||
except Exception as exc: | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I wonder if it would be a good idea to allow taking the whole model filename (including the .bin) as valid input - with the latest changes, we're loading from the bin file only, so it makes intuitive sense for the entire filename to be valid input.
The only reason IMO we're still allowing the filename without extension as valid input is backward compatibility.