Hariharan Vijayachandran commited on
Commit
22427a2
·
1 Parent(s): 3d69c21
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -13,23 +13,23 @@ from annotated_text import annotated_text
13
  ABSOLUTE_PATH = os.path.dirname(__file__)
14
  ASSETS_PATH = os.path.join(ABSOLUTE_PATH, 'model_assets')
15
 
16
- @st.cache(suppress_st_warning=True)
17
  def preprocess_text(s):
18
  return list(filter(lambda x: x!= '', (''.join(c if c.isalnum() or c == ' ' else ' ' for c in s)).split(' ')))
19
 
20
- @st.cache(suppress_st_warning=True)
21
  def get_pairwise_distances(model):
22
  df = pd.read_csv(f"{ASSETS_PATH}/{model}/pairwise_distances.csv").set_index('index')
23
  return df
24
 
25
- @st.cache(suppress_st_warning=True)
26
  def get_pairwise_distances_chunked(model, chunk):
27
  # for df in pd.read_csv(f"{ASSETS_PATH}/{model}/pairwise_distances.csv", chunksize = 16):
28
  # print(df.iloc[0]['queries'])
29
  # if chunk == int(df.iloc[0]['queries']):
30
  # return df
31
  return get_pairwise_distances(model)
32
- @st.cache(suppress_st_warning=True)
33
  def get_query_strings():
34
  df = pd.read_json(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.jsonl", lines = True)
35
  df['index'] = df.reset_index().index
@@ -38,7 +38,7 @@ def get_query_strings():
38
  # df.to_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.parquet", index = 'index', partition_cols = 'partition')
39
 
40
  # return pd.read_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.parquet", columns=['fullText', 'index', 'authorIDs'])
41
- @st.cache(suppress_st_warning=True)
42
  def get_candidate_strings():
43
  df = pd.read_json(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.jsonl", lines = True)
44
  df['i'] = df['index']
@@ -49,24 +49,24 @@ def get_candidate_strings():
49
  # df['partition'] = df['index']%100
50
  # df.to_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.parquet", index = 'index', partition_cols = 'partition')
51
  # return pd.read_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.parquet", columns=['fullText', 'index', 'authorIDs'])
52
- @st.cache(suppress_st_warning=True)
53
  def get_embedding_dataset(model):
54
  data = load_from_disk(f"{ASSETS_PATH}/{model}/embedding")
55
  return data
56
- @st.cache(suppress_st_warning=True)
57
  def get_bad_queries(model):
58
  df = get_query_strings().iloc[list(get_pairwise_distances(model)['queries'].unique())][['fullText', 'index', 'authorIDs']]
59
  return df
60
- @st.cache(suppress_st_warning=True)
61
  def get_gt_candidates(model, author):
62
  gt_candidates = get_candidate_strings()
63
  df = gt_candidates[gt_candidates['authorIDs'] == author]
64
  return df
65
- @st.cache(suppress_st_warning=True)
66
  def get_candidate_text(l):
67
  return get_candidate_strings().at[l,'fullText']
68
 
69
- @st.cache(suppress_st_warning=True)
70
  def get_annotated_text(text, word, pos):
71
  print("here", word, pos)
72
  start= text.index(word, pos)
 
13
  ABSOLUTE_PATH = os.path.dirname(__file__)
14
  ASSETS_PATH = os.path.join(ABSOLUTE_PATH, 'model_assets')
15
 
16
+ @st.cache(suppress_st_warning=True, allow_output_mutation=True)
17
  def preprocess_text(s):
18
  return list(filter(lambda x: x!= '', (''.join(c if c.isalnum() or c == ' ' else ' ' for c in s)).split(' ')))
19
 
20
+ @st.cache(suppress_st_warning=True, allow_output_mutation=True)
21
  def get_pairwise_distances(model):
22
  df = pd.read_csv(f"{ASSETS_PATH}/{model}/pairwise_distances.csv").set_index('index')
23
  return df
24
 
25
+ @st.cache(suppress_st_warning=True, allow_output_mutation=True)
26
  def get_pairwise_distances_chunked(model, chunk):
27
  # for df in pd.read_csv(f"{ASSETS_PATH}/{model}/pairwise_distances.csv", chunksize = 16):
28
  # print(df.iloc[0]['queries'])
29
  # if chunk == int(df.iloc[0]['queries']):
30
  # return df
31
  return get_pairwise_distances(model)
32
+ @st.cache(suppress_st_warning=True, allow_output_mutation=True)
33
  def get_query_strings():
34
  df = pd.read_json(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.jsonl", lines = True)
35
  df['index'] = df.reset_index().index
 
38
  # df.to_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.parquet", index = 'index', partition_cols = 'partition')
39
 
40
  # return pd.read_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_queries_english.parquet", columns=['fullText', 'index', 'authorIDs'])
41
+ @st.cache(suppress_st_warning=True, allow_output_mutation=True)
42
  def get_candidate_strings():
43
  df = pd.read_json(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.jsonl", lines = True)
44
  df['i'] = df['index']
 
49
  # df['partition'] = df['index']%100
50
  # df.to_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.parquet", index = 'index', partition_cols = 'partition')
51
  # return pd.read_parquet(f"{ASSETS_PATH}/IUR_Reddit_test_candidates_english.parquet", columns=['fullText', 'index', 'authorIDs'])
52
+ @st.cache(suppress_st_warning=True, allow_output_mutation=True)
53
  def get_embedding_dataset(model):
54
  data = load_from_disk(f"{ASSETS_PATH}/{model}/embedding")
55
  return data
56
+ @st.cache(suppress_st_warning=True, allow_output_mutation=True)
57
  def get_bad_queries(model):
58
  df = get_query_strings().iloc[list(get_pairwise_distances(model)['queries'].unique())][['fullText', 'index', 'authorIDs']]
59
  return df
60
+ @st.cache(suppress_st_warning=True, allow_output_mutation=True)
61
  def get_gt_candidates(model, author):
62
  gt_candidates = get_candidate_strings()
63
  df = gt_candidates[gt_candidates['authorIDs'] == author]
64
  return df
65
+ @st.cache(suppress_st_warning=True, allow_output_mutation=True)
66
  def get_candidate_text(l):
67
  return get_candidate_strings().at[l,'fullText']
68
 
69
+ @st.cache(suppress_st_warning=True, allow_output_mutation=True)
70
  def get_annotated_text(text, word, pos):
71
  print("here", word, pos)
72
  start= text.index(word, pos)