Spaces:
Runtime error
Runtime error
import gradio as gr | |
from pyvis.network import Network | |
import pyarabic.araby as araby | |
import numpy as np | |
import pandas as pd | |
import os | |
from datasets import load_dataset | |
from datasets import Features | |
from datasets import Value | |
from datasets import Dataset | |
import matplotlib.pyplot as plt | |
Secret_token = os.getenv('HF_Token') | |
dataset = load_dataset('FDSRashid/hadith_info',data_files = 'Basic_Edge_Information.csv', token = Secret_token, split = 'train') | |
dataset2 = load_dataset('FDSRashid/hadith_info',data_files = 'Taraf_Info.csv', token = Secret_token, split = 'train') | |
features = Features({'Rawi ID': Value('int32'), 'Famous Name': Value('string'), 'Narrator Rank': Value('string'), 'Number of Narrations': Value('string'), 'Official Name':Value('string'), 'Title Name':Value('string'), 'Generation': Value('string')} ) | |
narrator_bios = load_dataset("FDSRashid/hadith_info", data_files = 'Teacher_Bios.csv', token = Secret_token,features=features ) | |
narrator_bios = narrator_bios['train'].to_pandas() | |
narrator_bios.loc[49845, 'Narrator Rank'] = 'ุฑุณูู ุงููู' | |
narrator_bios.loc[49845, 'Number of Narrations'] = 0 | |
narrator_bios['Number of Narrations'] = narrator_bios['Number of Narrations'].astype(int) | |
narrator_bios.loc[49845, 'Number of Narrations'] = 327512 | |
narrator_bios['Generation'] = narrator_bios['Generation'].replace([None], [-1]) | |
narrator_bios['Generation'] = narrator_bios['Generation'].astype(int) | |
cols = narrator_bios.columns.tolist() | |
def filter_df(df, col, val): | |
return df[df[col] == val] | |
edge_info = dataset.to_pandas() | |
taraf_info = dataset2.to_pandas() | |
min_year = int(taraf_info['Year'].min()) | |
max_year = int(taraf_info['Year'].max()) | |
cmap = plt.colormaps['cool'] | |
def value_to_hex(value): | |
rgba_color = cmap(value) | |
return "#{:02X}{:02X}{:02X}".format(int(rgba_color[0] * 255), int(rgba_color[1] * 255), int(rgba_color[2] * 255)) | |
def subsetEdges(fstyear, lstyear): | |
info = taraf_info[(taraf_info['Year'] >= fstyear)& (taraf_info['Year'] <= lstyear)] | |
narrators = edge_info[edge_info['Edge_ID'].isin(info['ID'].unique())] | |
return narrators | |
def splitIsnad(dataframe): | |
teacher_student =dataframe['Edge_Name'].str.split(' TO ') | |
dataframe['Teacher'] = teacher_student.apply(lambda x: x[0]) | |
dataframe['Student'] = teacher_student.apply(lambda x: x[1]) | |
return dataframe | |
def network_narrator(narrator_id, fst_year, lst_year, yaxis): | |
edges = subsetEdges(fst_year, lst_year) | |
edges_single = edges[(edges['Teacher_ID']==narrator_id) | (edges['Student_ID']==narrator_id)] | |
edges_prepped = splitIsnad(edges_single) | |
net = Network(directed =True) | |
for _, row in edges_prepped.iterrows(): | |
source = row['Teacher'] | |
target = row['Student'] | |
attribute_value = row[yaxis] | |
edge_color = value_to_hex(attribute_value) | |
teacher_info = narrator_bios[narrator_bios['Rawi ID'] == row['Teacher_ID']] | |
student_info = narrator_bios[narrator_bios['Rawi ID'] == row['Student_ID']] | |
teacher_narrations = teacher_info['Number of Narrations'].to_list()[0] | |
student_narrations = student_info['Number of Narrations'].to_list()[0] | |
net.add_node(source, color=value_to_hex(teacher_narrations), font = {'size':30, 'color': 'orange'}, label = f"{source}\n{teacher_narrations}") | |
net.add_node(target, color=value_to_hex(student_narrations), font = {'size': 20, 'color': 'red'}, label = f"{target}\n{student_narrations}") | |
net.add_edge(source, target, color=edge_color, value=attribute_value, label = f"{yaxis}:{attribute_value}") | |
net.barnes_hut(gravity=-3000, central_gravity=0.3, spring_length=200) | |
html = net.generate_html() | |
html = html.replace("'", "\"") | |
edge_narrator = edge_info[(edge_info['Teacher_ID'] == narrator_id) | (edge_info['Student_ID'] == narrator_id)] | |
edge_full = splitIsnad(edge_narrator[['Tarafs', 'Hadiths', 'Isnads', 'Edge_Name', 'Books']]).drop(['Edge_Name'], axis=1) | |
return f"""<iframe style="width: 100%; height: 600px;margin:0 auto" name="result" allow="midi; geolocation; microphone; camera; | |
display-capture; encrypted-media;" sandbox="allow-modals allow-forms | |
allow-scripts allow-same-origin allow-popups | |
allow-top-navigation-by-user-activation allow-downloads" allowfullscreen="" | |
allowpaymentrequest="" frameborder="0" srcdoc='{html}'></iframe>""", edge_full | |
def narrator_retriever(name): | |
return narrator_bios[(narrator_bios['Official Name'].apply(lambda x: araby.strip_diacritics(x)).str.contains(araby.strip_diacritics(name))) | (narrator_bios['Famous Name'].apply(lambda x: araby.strip_diacritics(x)).str.contains(araby.strip_diacritics(name))) | (narrator_bios['Rawi ID'].astype(str) == name)][['Rawi ID', 'Title Name', 'Official Name', 'Famous Name', 'Number of Narrations', 'Narrator Rank', 'Generation' ]] | |
with gr.Blocks() as demo: | |
gr.Markdown("Search Narrators using this tool or Visualize Network of a Narrator") | |
with gr.Tab("Search Narrator"): | |
text_input = gr.Textbox() | |
text_output = gr.DataFrame() | |
text_button = gr.Button("Search") | |
text_button.click(narrator_retriever, inputs=text_input, outputs=text_output) | |
col = gr.Dropdown(choices = cols) | |
df_filter = gr.Textbox() | |
filter_button = gr.Button('Filter') | |
output2 = gr.DataFrame(filter_df, inputs = [text_output, col, df_filter], outputs= [gr.DataFrame()]) | |
filter_button.click() | |
with gr.Tab("Visualize Network"): | |
with gr.Row(): | |
image_input = gr.Number() | |
FirstYear = gr.Slider(min_year, max_year, value = -11, label = 'Begining', info = 'Choose the first year to display Narrators') | |
Last_Year = gr.Slider(min_year, max_year, value = 9, label = 'End', info = 'Choose the last year to display Narrators') | |
Yaxis = gr.Dropdown(choices = ['Tarafs', 'Hadiths', 'Isnads', 'Books'], value = 'Tarafs', label = 'Variable to Display', info = 'Choose the variable to visualize.') | |
image_output = gr.HTML() | |
image_button = gr.Button("Visualize!") | |
image_button.click(network_narrator, inputs=[image_input, FirstYear, Last_Year, Yaxis], outputs=[image_output, gr.DataFrame()]) | |
demo.launch() | |