-
Notifications
You must be signed in to change notification settings - Fork 1
/
mmr.py
80 lines (66 loc) · 3.18 KB
/
mmr.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
# Maximal Marginal Relevance
import numpy as np
from operator import itemgetter
from typing import List, Tuple
from sklearn.metrics.pairwise import cosine_similarity
# Linearly combine diversity and relevance, but doesn't take into account mutual coverage.
def my_mmr(query, vectors, top_n, diversity, sim_metric=cosine_similarity):
indeces = my_mmr_helper(query, vectors, top_n, diversity, sim_metric)
return [vectors[i] for i in indeces]
def my_mmr_helper(query, vectors, top_n, diversity, sim_metric=cosine_similarity):
keywords = mmr_orig(query.reshape(1, -1), np.array(vectors), list(range(len(vectors))), top_n, diversity, sim_metric)
return [i for i, _ in keywords]
# Adapted From https://github.com/MaartenGr/KeyBERT/blob/master/keybert/_mmr.py
def mmr_orig(
doc_embedding: np.ndarray,
word_embeddings: np.ndarray,
words: List[str],
top_n: int = 5,
diversity: float = 0.8,
sim_metric = cosine_similarity,
) -> List[Tuple[str, float]]:
"""Calculate Maximal Marginal Relevance (MMR)
between candidate keywords and the document.
MMR considers the similarity of keywords/keyphrases with the
document, along with the similarity of already selected
keywords and keyphrases. This results in a selection of keywords
that maximize their within diversity with respect to the document.
Arguments:
doc_embedding: The document embeddings
word_embeddings: The embeddings of the selected candidate keywords/phrases
words: The selected candidate keywords/keyphrases
top_n: The number of keywords/keyhprases to return
diversity: How diverse the select keywords/keyphrases are.
Values between 0 and 1 with 0 being not diverse at all
and 1 being most diverse.
Returns:
List[Tuple[str, float]]: The selected keywords/keyphrases with their distances
"""
# Extract similarity within words, and between words and the document
word_doc_similarity = sim_metric(word_embeddings, doc_embedding)
word_similarity = sim_metric(word_embeddings)
# Initialize candidates and already choose best keyword/keyphras
keywords_idx = [np.argmax(word_doc_similarity)]
candidates_idx = [i for i in range(len(words)) if i != keywords_idx[0]]
for _ in range(min(top_n - 1, len(words) - 1)):
# Extract similarities within candidates and
# between candidates and selected keywords/phrases
candidate_similarities = word_doc_similarity[candidates_idx, :]
target_similarities = np.max(
word_similarity[candidates_idx][:, keywords_idx], axis=1
)
# Calculate MMR
mmr = (
1 - diversity
) * candidate_similarities - diversity * target_similarities.reshape(-1, 1)
mmr_idx = candidates_idx[np.argmax(mmr)]
# Update keywords & candidates
keywords_idx.append(mmr_idx)
candidates_idx.remove(mmr_idx)
# Extract and sort keywords in descending similarity
keywords = [
(words[idx], round(float(word_doc_similarity.reshape(1, -1)[0][idx]), 4))
for idx in keywords_idx
]
keywords = sorted(keywords, key=itemgetter(1), reverse=True)
return keywords