|
import streamlit as st |
|
import torch |
|
from sentence_transformers import SentenceTransformer,util |
|
|
|
import pandas as pd |
|
import numpy as np |
|
import pickle |
|
|
|
|
|
|
|
|
|
model = SentenceTransformer('neuml/pubmedbert-base-embeddings') |
|
|
|
with open("embeddings_1.pkl", "rb") as fIn: |
|
stored_data = pickle.load(fIn) |
|
stored_code = stored_data["SBS_code"] |
|
stored_sentences = stored_data["sentences"] |
|
stored_embeddings = stored_data["embeddings"] |
|
|
|
import streamlit as st |
|
|
|
|
|
def mapping_code(user_input): |
|
emb1 = model.encode(user_input.lower()) |
|
similarities = [] |
|
for sentence in stored_embeddings: |
|
similarity = util.cos_sim(sentence, emb1) |
|
similarities.append(similarity) |
|
|
|
|
|
result = list(zip(stored_data["SBS_code"],stored_data["sentences"], similarities)) |
|
|
|
|
|
result.sort(key=lambda x: x[2], reverse=True) |
|
|
|
num_results = min(5, len(result)) |
|
|
|
|
|
top_5_results = [] |
|
if num_results > 0: |
|
for i in range(num_results): |
|
code, description, similarity_score = result[i] |
|
top_5_results.append({"Code": code, "Description": description, "Similarity Score": similarity_score}) |
|
else: |
|
top_5_results.append({"Code": "", "Description": "No similar sentences found", "Similarity Score": 0.0}) |
|
|
|
return top_5_results |
|
|
|
def main(): |
|
st.title("CPT Description Mapping") |
|
|
|
|
|
user_input = st.text_input("Enter CPT description:") |
|
|
|
|
|
if st.button("Map"): |
|
if user_input: |
|
st.write("Please wait for a moment .... ") |
|
|
|
|
|
mapping_results = mapping_code(user_input) |
|
|
|
|
|
st.write("Top 5 similar sentences:") |
|
for i, result in enumerate(mapping_results, 1): |
|
st.write(f"{i}. Code: {result['Code']}, Description: {result['Description']}, Similarity Score: {float(result['Similarity Score']):.4f}") |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|