Spaces:
Sleeping
Sleeping
File size: 1,377 Bytes
fbfd701 94d4a5f 7d9da54 a09ade1 a8611fa d10eea6 4ce225a 7d9da54 8cc772c 2f9ce64 bb0b163 004666a d3e7107 0f5b821 d3e7107 7d9da54 623e47c 4ce225a ffc8fec 22cfca3 ffc8fec a8611fa ffc8fec a8611fa ffc8fec 226af91 aa9abe7 04f4781 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
from datasets import load_dataset
import streamlit as st
import pandas as pd
from huggingface_hub import HfApi, list_models
import os
import datasets
from datasets import Dataset, DatasetDict
from huggingface_hub import HfFileSystem
# from datasets import Dataset
# Dataset.cleanup_cache_files
DATASET_REPO_URL = "https://huggingface.co/datasets/Seetha/visual_cs"
DATA_FILENAME = "final_data.csv"
DATA_FILE = os.path.join("data", DATA_FILENAME)
st.write('dataset')
# dataset = load_dataset("Seetha/visual_cs")
# df = pd.DataFrame.from_dict(dataset["train"])
# st.write('dataset-retrieved')
# st.write(df)
HF_TOKEN = os.environ.get("HF_TOKEN")
st.write("is none?", HF_TOKEN is None)
fs = HfFileSystem(token=HF_TOKEN)
# HfApi().delete_file(path_in_repo = DATA_FILENAME ,repo_id = 'Seetha/visual_cs',token= HF_TOKEN,repo_type='dataset')
# st.write('file-deleted')
st.write('Read the CSV file')
data_stakeholdercount = pd.read_csv('final_data.csv')
st.write(data_stakeholdercount)
# tds = Dataset.from_pandas(data_stakeholdercount)
# ds = DatasetDict()
# ds['train'] = tds
# st.write(ds)
# ds.push_to_hub('Seetha/visual_cs',token= HF_TOKEN)
st.write(fs.ls("datasets/Seetha/visual_cs",detail=False))
with fs.open('datasets/Seetha/visual_cs/test.csv','w') as f:
data_stakeholdercount.to_csv(f)
#data_stakeholdercount.to_csv("hf://datasets/Seetha/visual_cs/test.csv") |