-
-
Notifications
You must be signed in to change notification settings - Fork 4.4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fixed issue #838 #895
Fixed issue #838 #895
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -273,7 +273,14 @@ def train_sg_pair(model, word, context_index, alpha, learn_vectors=True, learn_h | |
l1 += neu1e * lock_factor # learn input -> hidden (mutates model.syn0[word2.index], if that is l1) | ||
return neu1e | ||
|
||
|
||
def sigmoid(p): | ||
if p > 0: | ||
return 1. / (1. + exp(-p)) | ||
elif p <= 0: | ||
exp(p) / (1 + exp(p)) | ||
else: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What is this entire The logic seems obscure (why branch at all? needs at least a comment) and weird (what is the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @markroxor Please remove else and add comment explaining #838 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I suggest replacing the whole function with |
||
raise ValueError | ||
|
||
def train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True): | ||
neu1e = zeros(l1.shape) | ||
|
||
|
@@ -293,7 +300,7 @@ def train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=Tr | |
if w != word.index: | ||
word_indices.append(w) | ||
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size | ||
fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output | ||
fb = sigmoid(dot(l1, l2b.T)) # propagate hidden -> output | ||
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate | ||
if learn_hidden: | ||
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should it say return?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
That's embarrassing, forgot to return in all the haste. Fixing this PR.