from datasets import load_dataset import streamlit as st import pandas as pd from huggingface_hub import HfApi, list_models import os # from datasets import Dataset # Dataset.cleanup_cache_files DATASET_REPO_URL = "https://huggingface.co/datasets/Seetha/visual_cs" DATA_FILENAME = "final_data.csv" DATA_FILE = os.path.join("data", DATA_FILENAME) st.write('dataset') dataset = load_dataset("Seetha/visual_cs") df = pd.DataFrame.from_dict(dataset["train"]) st.write('dataset-retrieved') st.write(df) HF_TOKEN = os.environ.get("HF_TOKEN") st.write("is none?", HF_TOKEN is None) HfApi().delete_file(path_in_repo = DATA_FILENAME ,repo_id = 'Seetha/visual_cs',token= HF_TOKEN,repo_type='dataset') st.write('file-deleted') st.write('Read the CSV file') data_stakeholdercount = pd.read_csv('final_data.csv') st.write(data_stakeholdercount) data_stakeholdercount.push_to_hub('Seetha/visual_cs')