Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Loading fastText models using only bin file #1341

Merged
merged 34 commits into from
Jun 28, 2017
Merged
Show file tree
Hide file tree
Changes from 25 commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
7759a95
french wiki issue resolved
May 22, 2017
c12b4fa
Merge branch 'develop' into french
prakhar2b May 22, 2017
8025710
bin and vec mismatch handled
prakhar2b May 22, 2017
7ee83d9
updating with lastest codes and resolving conflicts
May 23, 2017
041a6e9
Merge branch 'develop' of https://github.com/RaRe-Technologies/gensim…
Jun 2, 2017
22c6710
added test from bin only loading
Jun 2, 2017
61be613
[WIP] loading bin only
Jun 2, 2017
e11ac44
word vec from its ngrams
Jun 6, 2017
a63a3bc
[WIP] word vec from ngrams
Jun 6, 2017
f80410f
Merge branch 'develop' of https://github.com/RaRe-Technologies/gensim…
Jun 7, 2017
454d74e
[WIP] getting syn0 from all n-grams
Jun 7, 2017
e6b0d8b
[TDD] test comparing word vector from bin_only and default loading
Jun 7, 2017
9b03ea3
cleaned up test code
Jun 8, 2017
c496be9
added docstring for bin_only
Jun 8, 2017
2c4a8dd
Merge branch 'ft_oov_fix' of https://github.com/jayantj/gensim into f…
Jun 12, 2017
d2ab903
resolved wiki.fr issue
Jun 12, 2017
82507d1
pep8 fixes
Jun 12, 2017
c44b958
Merge branch 'develop' of https://github.com/RaRe-Technologies/gensim…
Jun 16, 2017
0fc1159
default bin file loading only
Jun 16, 2017
f421b05
logging info modified plus changes a/c review
Jun 19, 2017
68ec73b
removed unused code in fasttext.py
Jun 19, 2017
f7b372e
removed unused codes and vec files from test
Jun 19, 2017
5f7fe02
added lee_fasttext vec files again
Jun 20, 2017
8bd56cf
re-added removed files and unused codes
Jun 21, 2017
b916187
added file name in logging info
Jun 21, 2017
1a0bfc0
removing unused load_word2vec_format code
Jun 22, 2017
98e0287
updated logging info and comments
Jun 22, 2017
f3d2032
input file name with or without .bin both accepted
Jun 22, 2017
bd7e7f6
resolved typo mistake
Jun 22, 2017
800cd01
test for file name
Jun 22, 2017
a15233a
minor change to input filename handling in ft wrapper
jayantj Jun 23, 2017
431aebf
changes to logging and assert messages, pep8 fixes
jayantj Jun 23, 2017
e52fee4
removes redundant .vec files
jayantj Jun 23, 2017
cebb3fc
fixes utf8 bug in flake8_diff.sh script
jayantj Jun 28, 2017
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 43 additions & 9 deletions gensim/models/wrappers/fasttext.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
import numpy as np
from numpy import float32 as REAL, sqrt, newaxis
from gensim import utils
from gensim.models.keyedvectors import KeyedVectors
from gensim.models.keyedvectors import KeyedVectors, Vocab
from gensim.models.word2vec import Word2Vec

from six import string_types
Expand Down Expand Up @@ -233,11 +233,12 @@ def load_fasttext_format(cls, model_file, encoding='utf8'):

`model_file` is the path to the FastText output files.
FastText outputs two training files - `/path/to/train.vec` and `/path/to/train.bin`
Expected value for this example: `/path/to/train`
Expected value for this example: `/path/to/train`. However, you only need .bin
file to load the entire model.

"""
model = cls()
model.wv = cls.load_word2vec_format('%s.vec' % model_file, encoding=encoding)
model.file_name = model_file
model.load_binary_data('%s.bin' % model_file, encoding=encoding)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder if it would be a good idea to allow taking the whole model filename (including the .bin) as valid input - with the latest changes, we're loading from the bin file only, so it makes intuitive sense for the entire filename to be valid input.
The only reason IMO we're still allowing the filename without extension as valid input is backward compatibility.

return model

Expand Down Expand Up @@ -284,12 +285,12 @@ def load_model_params(self, file_handle):
def load_dict(self, file_handle, encoding='utf8'):
vocab_size, nwords, _ = self.struct_unpack(file_handle, '@3i')
# Vocab stored by [Dictionary::save](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)
assert len(self.wv.vocab) == nwords, 'mismatch between vocab sizes'
assert len(self.wv.vocab) == vocab_size, 'mismatch between vocab sizes'
logger.info("loading vocabulary words for fastText model from %s.bin", self.file_name)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Adding the number of words would be helpful in this logging statement.


self.struct_unpack(file_handle, '@1q') # number of tokens
if self.new_format:
pruneidx_size, = self.struct_unpack(file_handle, '@q')
for i in range(nwords):
for i in range(vocab_size):
word_bytes = b''
char_byte = file_handle.read(1)
# Read vocab word
Expand All @@ -298,8 +299,25 @@ def load_dict(self, file_handle, encoding='utf8'):
char_byte = file_handle.read(1)
word = word_bytes.decode(encoding)
count, _ = self.struct_unpack(file_handle, '@qb')
assert self.wv.vocab[word].index == i, 'mismatch between gensim word index and fastText word index'
self.wv.vocab[word].count = count

if i == nwords and i < vocab_size:
"""
To handle the error in pretrained vector wiki.fr (French).
Copy link
Contributor

@jayantj jayantj Jun 22, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This format -

"""
Some comments
"""

is generally reserved for docstrings. Regular comments would be preferable.

For more info : https://github.com/facebookresearch/fastText/issues/218

"""
assert word == "__label__"
continue # don't add word to vocab

self.wv.vocab[word] = Vocab(index=i, count=count)
self.wv.index2word.append(word)

assert len(self.wv.vocab) == nwords, 'mismatch between vocab sizes'
if len(self.wv.vocab) != vocab_size:
logger.warning("mismatch between vocab sizes")
logger.warning("If you are loading any model other than pretrained vector wiki.fr, ")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Any particular reason for two separate warning statements? Why not a single one?

logger.warning("Please report to Gensim.")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please change the multiple warning statements to a single concatenated statement.

Copy link
Owner

@piskvorky piskvorky Jun 22, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, "Please report to Gensim" is vague, and probably unnecessary.

If people encounter bugs or get exceptions, they'll let us know, don't worry about that.

I'd prefer if the logging message was more concrete instead: what mismatch, what are the mismatched "vocab sizes"?
We want the logs as concrete and useful as possible, for our own sanity (remote debugging via mailing list etc).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@jayantj I remember a few weeks ago, I received a review comment from @piskvorky that concatenated statement would contain white space therefore better to split into multiple statements. Correct me if I didn't understand that comment

Copy link
Contributor

@jayantj jayantj Jun 22, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe @piskvorky meant to split the string itself across multiple lines, like this -

logger.warning(
	"mismatch between vocab sizes "
	"If you are loading any model other than pretrained vector wiki.fr, "
	"Please report to Gensim.")

He left a comment later in the PR clarifying it too.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ohh, thanks for clarifying 😄



if self.new_format:
for j in range(pruneidx_size):
Expand Down Expand Up @@ -337,8 +355,12 @@ def init_ngrams(self):
"""
self.wv.ngrams = {}
all_ngrams = []
for w, v in self.wv.vocab.items():
self.wv.syn0 = np.zeros((len(self.wv.vocab), self.vector_size), dtype=REAL)

for w, vocab in self.wv.vocab.items():
all_ngrams += self.compute_ngrams(w, self.wv.min_n, self.wv.max_n)
self.wv.syn0[vocab.index] += np.array(self.wv.syn0_all[vocab.index])

all_ngrams = set(all_ngrams)
self.num_ngram_vectors = len(all_ngrams)
ngram_indices = []
Expand All @@ -348,6 +370,18 @@ def init_ngrams(self):
self.wv.ngrams[ngram] = i
self.wv.syn0_all = self.wv.syn0_all.take(ngram_indices, axis=0)

ngram_weights = self.wv.syn0_all

logger.info("loading weights for %s vocabulary words for fastText models from %s.bin", len(self.wv.vocab), self.file_name)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Typo: fastText model (not models)


for w, vocab in self.wv.vocab.items():
word_ngrams = self.compute_ngrams(w, self.wv.min_n, self.wv.max_n)
for word_ngram in word_ngrams:
self.wv.syn0[vocab.index] += np.array(ngram_weights[self.wv.ngrams[word_ngram]])

self.wv.syn0[vocab.index] /= (len(word_ngrams) + 1)
logger.info("loaded %s weight matrix for fastText model from %s.bin", self.wv.syn0.shape, self.file_name)

@staticmethod
def compute_ngrams(word, min_n, max_n):
ngram_indices = []
Expand Down
5 changes: 2 additions & 3 deletions gensim/test/test_fasttext_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ def testTraining(self):
self.model_sanity(trained_model)

# Tests temporary training files deleted
self.assertFalse(os.path.exists('%s.vec' % testfile()))
self.assertFalse(os.path.exists('%s.bin' % testfile()))

def testMinCount(self):
Expand Down Expand Up @@ -115,7 +114,7 @@ def testNormalizedVectorsNotSaved(self):
self.assertTrue(loaded_kv.syn0_all_norm is None)

def testLoadFastTextFormat(self):
"""Test model successfully loaded from fastText .vec and .bin files"""
"""Test model successfully loaded from fastText .bin files"""
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Typo here and below: .bin file (not files)

try:
model = fasttext.FastText.load_fasttext_format(self.test_model_file)
except Exception as exc:
Expand Down Expand Up @@ -166,7 +165,7 @@ def testLoadFastTextFormat(self):
self.model_sanity(model)

def testLoadFastTextNewFormat(self):
""" Test model successfully loaded from fastText (new format) .vec and .bin files """
""" Test model successfully loaded from fastText (new format) .bin files """
try:
new_model = fasttext.FastText.load_fasttext_format(self.test_new_model_file)
except Exception as exc:
Expand Down