-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathincremental_embedding.py
More file actions
160 lines (143 loc) · 5.02 KB
/
incremental_embedding.py
File metadata and controls
160 lines (143 loc) · 5.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
import numpy as np
import operator
import re
from river.base.transformer import Transformer
from river.feature_extraction.vectorize import VectorizerMixin
from storage import Vocabulary, Context, WordRep
from scipy.spatial.distance import cosine
from nltk import word_tokenize
class IncrementalWordEmbedding(Transformer, VectorizerMixin):
def __init__(
self,
v_size,
c_size,
w_size,
normalize=True,
on=None,
strip_accents=True,
lowercase=True,
preprocessor=None,
tokenizer=None,
ngram_range=(1, 1),
):
super().__init__(
on=on,
strip_accents=strip_accents,
lowercase=lowercase,
preprocessor=preprocessor,
tokenizer=tokenizer,
ngram_range=ngram_range,
)
self.v_size = v_size
self.c_size = c_size
self.w_size = w_size
class WordContextMatrix(IncrementalWordEmbedding):
def __init__(
self,
v_size,
c_size,
w_size,
normalize=True,
on=None,
strip_accents=True,
lowercase=True,
preprocessor=None,
tokenizer=None,
ngram_range=(1, 1),
is_ppmi=True
):
super().__init__(
v_size,
c_size,
w_size,
on=on,
strip_accents=strip_accents,
lowercase=lowercase,
preprocessor=preprocessor,
tokenizer=tokenizer,
ngram_range=ngram_range,
)
self.vocabulary = Vocabulary(self.v_size)
self.contexts = Context(self.c_size)
self.d = 0
self.is_ppmi = is_ppmi
def transform_one(self, x):
return self.process_text(x)
# def learn_one(self, x):
# tokens = self.process_text(x)
# #print(tokens)
# for w in tokens:
# if w not in self.vocabulary:
# self.vocabulary.add(WordRep(w, self.c_size))
# self.d += 1
# for i, w in enumerate(tokens):
# contexts = _get_contexts(i, self.w_size, tokens)
# if w in self.vocabulary:
# self.vocabulary[w].counter += 1
# for c in contexts:
# if c not in self.contexts:
# # if context full no add the word
# self.contexts.add(c)
# if c in self.contexts:
# self.vocabulary[w].add_context(c)
# return self
def learn_one(self, x, **kwargs):
tokens = kwargs['tokens']
i = tokens.index(x)
self.d += 1
if x not in self.vocabulary:
self.vocabulary.add(WordRep(x, self.c_size))
contexts = _get_contexts(i, self.w_size, tokens)
if x in self.vocabulary:
self.vocabulary[x].counter += 1
for c in contexts:
if c not in self.contexts:
self.contexts.add(c)
if c in self.contexts and x in self.vocabulary:
self.vocabulary[x].add_context(c)
return self
def get_embedding(self, x):
if x in self.vocabulary:
word_rep = self.vocabulary[x]
embedding = np.zeros(self.c_size, dtype=float)
contexts = word_rep.contexts.items()
if self.is_ppmi:
for context, coocurence in contexts:
ind_c = self.contexts[context]
pmi = np.log2(
(coocurence * self.d) / (word_rep.counter * self.vocabulary[context].counter)
)
embedding[ind_c] = max(0, pmi)
else:
for context, coocurence in contexts:
ind_c = self.contexts[context]
embedding[ind_c] = coocurence
# embedding[ind_c] = coocurence
return embedding
return False
def _get_contexts(ind_word, w_size, tokens):
# to do: agregar try para check que es posible obtener los elementos de los tokens
slice_start = ind_word - w_size if (ind_word - w_size >= 0) else 0
slice_end = len(tokens) if (ind_word + w_size + 1 >= len(tokens)) else ind_word + w_size + 1
first_part = tokens[slice_start: ind_word]
last_part = tokens[ind_word + 1: slice_end]
contexts = tuple(first_part + last_part)
return contexts
def _preprocessing_streps(preprocessing_steps, x):
for step in preprocessing_steps:
x = step(x)
return x
def run(stream_data, model, on=None, tokenizer=None):
preprocessing_steps = []
if on is not None:
preprocessing_steps.append(operator.itemgetter(on))
preprocessing_steps.append(
(re.compile(r"(?u)\b\w\w+\b").findall if tokenizer is None else tokenizer)
)
for text, y in stream_data:
tokens = _preprocessing_streps(preprocessing_steps, text)
for w in tokens:
model = model.learn_one(w, tokens=tokens)
print(cosine(model.get_embedding('she'), model.get_embedding('he')))
print(model.get_embedding('he'))
print(model.get_embedding('she'))