from datasets import load_dataset import streamlit as st import pandas as pd from huggingface_hub import HfApi, list_models import os import datasets from datasets import Dataset, DatasetDict from huggingface_hub import HfFileSystem fs = HfFileSystem() # from datasets import Dataset # Dataset.cleanup_cache_files DATASET_REPO_URL = "https://huggingface.co/datasets/Seetha/visual_cs" DATA_FILENAME = "final_data.csv" DATA_FILE = os.path.join("data", DATA_FILENAME) st.write('dataset') dataset = load_dataset("Seetha/visual_cs") df = pd.DataFrame.from_dict(dataset["train"]) st.write('dataset-retrieved') st.write(df) HF_TOKEN = os.environ.get("HF_TOKEN") st.write("is none?", HF_TOKEN is None) HfApi().delete_file(path_in_repo = DATA_FILENAME ,repo_id = 'Seetha/visual_cs',token= HF_TOKEN,repo_type='dataset') st.write('file-deleted') st.write('Read the CSV file') data_stakeholdercount = pd.read_csv('final_data.csv') st.write(data_stakeholdercount) tds = Dataset.from_pandas(data_stakeholdercount) ds = DatasetDict() ds['train'] = tds st.write(ds) ds.push_to_hub('Seetha/visual_cs',token= HF_TOKEN) df.to_csv("hf://datasets/Seetha/visual_cs/test.csv")