Spaces:
Running
on
T4
Running
on
T4
multilingual
Browse files
semantic_search/all_search_execute.py
CHANGED
@@ -163,17 +163,17 @@ def handler(input_,session_id):
|
|
163 |
opensearch_translation_pipeline = json.loads((requests.get(host+'_search/pipeline/ml_inference_for_vector_search_and_language_translation', auth=awsauth,headers=headers)).text)
|
164 |
path = "_search/pipeline/ml_inference_for_vector_search_and_language_translation"
|
165 |
url = host + path
|
166 |
-
opensearch_translation_pipeline["phase_results_processors"] = hybrid_search_processor
|
167 |
print(opensearch_translation_pipeline)
|
168 |
-
r = requests.put(url, auth=awsauth, json=opensearch_translation_pipeline, headers=headers)
|
169 |
print("translation hybrid Search Pipeline updated: "+str(r.status_code))
|
170 |
|
171 |
######## Updating opensearch_translation_pipeline_with_rerank Search pipeline #######
|
172 |
opensearch_translation_pipeline_with_rerank = json.loads((requests.get(host+'_search/pipeline/ml_inference_for_vector_search_and_language_translation_with_rerank', auth=awsauth,headers=headers)).text)
|
173 |
path = "_search/pipeline/ml_inference_for_vector_search_and_language_translation_with_rerank"
|
174 |
url = host + path
|
175 |
-
opensearch_translation_pipeline_with_rerank["phase_results_processors"] = hybrid_search_processor
|
176 |
-
r = requests.put(url, auth=awsauth, json=opensearch_translation_pipeline_with_rerank, headers=headers)
|
177 |
print("translation hybrid rerank Search Pipeline updated: "+str(r.status_code))
|
178 |
######## start of Applying LLM filters #######
|
179 |
if(st.session_state.input_rewritten_query!=""):
|
|
|
163 |
opensearch_translation_pipeline = json.loads((requests.get(host+'_search/pipeline/ml_inference_for_vector_search_and_language_translation', auth=awsauth,headers=headers)).text)
|
164 |
path = "_search/pipeline/ml_inference_for_vector_search_and_language_translation"
|
165 |
url = host + path
|
166 |
+
opensearch_translation_pipeline["ml_inference_for_vector_search_and_language_translation"]["phase_results_processors"] = hybrid_search_processor
|
167 |
print(opensearch_translation_pipeline)
|
168 |
+
r = requests.put(url, auth=awsauth, json=opensearch_translation_pipeline["ml_inference_for_vector_search_and_language_translation"], headers=headers)
|
169 |
print("translation hybrid Search Pipeline updated: "+str(r.status_code))
|
170 |
|
171 |
######## Updating opensearch_translation_pipeline_with_rerank Search pipeline #######
|
172 |
opensearch_translation_pipeline_with_rerank = json.loads((requests.get(host+'_search/pipeline/ml_inference_for_vector_search_and_language_translation_with_rerank', auth=awsauth,headers=headers)).text)
|
173 |
path = "_search/pipeline/ml_inference_for_vector_search_and_language_translation_with_rerank"
|
174 |
url = host + path
|
175 |
+
opensearch_translation_pipeline_with_rerank["opensearch_translation_pipeline_with_rerank"]["phase_results_processors"] = hybrid_search_processor
|
176 |
+
r = requests.put(url, auth=awsauth, json=opensearch_translation_pipeline_with_rerank["opensearch_translation_pipeline_with_rerank"], headers=headers)
|
177 |
print("translation hybrid rerank Search Pipeline updated: "+str(r.status_code))
|
178 |
######## start of Applying LLM filters #######
|
179 |
if(st.session_state.input_rewritten_query!=""):
|