HaggiVaggi commited on
Commit
38972c8
·
1 Parent(s): 38ce1fa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -20
app.py CHANGED
@@ -13,29 +13,29 @@ def load_data(url):
13
 
14
  df = load_data('data/final_data.csv')
15
 
16
- @st.cache_data
17
- def embedding_and_index():
18
- embeddings_array = np.load('data/embeddings_final.npy')
19
- index = faiss.read_index('data/desc_faiss_index_final.index')
20
- return(embeddings_array, index)
21
 
22
- embeddings_array, index = embedding_and_index()
23
 
24
- @st.cache_resource
25
- def load_tokenizer_and_model():
26
- tokenizer = AutoTokenizer.from_pretrained("DeepPavlov/rubert-base-cased-sentence")
27
- model = AutoModel.from_pretrained("DeepPavlov/rubert-base-cased-sentence")
28
- return tokenizer, model
29
 
30
- tokenizer, model = load_tokenizer_and_model()
31
-
32
- @st.cache_resource
33
- def encode_description(description, tokenizer, model):
34
- tokens = tokenizer(description, return_tensors="pt")
35
- with torch.no_grad():
36
- outputs = model(**tokens)
37
- embeddings = outputs.last_hidden_state.mean(dim=1)
38
- return embeddings.cpu().numpy().astype('float32')
39
 
40
 
41
  st.title('Умный поиск фильмов 🔍🎦')
 
13
 
14
  df = load_data('data/final_data.csv')
15
 
16
+ # @st.cache_data
17
+ # def embedding_and_index():
18
+ # embeddings_array = np.load('data/embeddings_final.npy')
19
+ # index = faiss.read_index('data/desc_faiss_index_final.index')
20
+ # return(embeddings_array, index)
21
 
22
+ # embeddings_array, index = embedding_and_index()
23
 
24
+ # @st.cache_resource
25
+ # def load_tokenizer_and_model():
26
+ # tokenizer = AutoTokenizer.from_pretrained("DeepPavlov/rubert-base-cased-sentence")
27
+ # model = AutoModel.from_pretrained("DeepPavlov/rubert-base-cased-sentence")
28
+ # return tokenizer, model
29
 
30
+ # tokenizer, model = load_tokenizer_and_model()
31
+
32
+ # @st.cache_resource
33
+ # def encode_description(description, tokenizer, model):
34
+ # tokens = tokenizer(description, return_tensors="pt")
35
+ # with torch.no_grad():
36
+ # outputs = model(**tokens)
37
+ # embeddings = outputs.last_hidden_state.mean(dim=1)
38
+ # return embeddings.cpu().numpy().astype('float32')
39
 
40
 
41
  st.title('Умный поиск фильмов 🔍🎦')