muhammedAdnan3 commited on
Commit
bf922be
·
verified ·
1 Parent(s): 2ab421b

Create dataseting.py

Browse files
Files changed (1) hide show
  1. dataseting.py +94 -0
dataseting.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ # Define a function to create a dataset with unique names
4
+ def create_voice_model_dataset(num_names):
5
+ # Generate a dataset with unique names
6
+ dataset = {"voiceModels": []}
7
+
8
+ for i in range(1, num_names + 1):
9
+ voice_model = {
10
+ "name": f"VoiceModel_{i}",
11
+ "description": f"Description for Voice Model {i}",
12
+ "version": "1.0",
13
+ "language": "en-US",
14
+ "voiceSettings": {
15
+ "gender": "neutral",
16
+ "age": "adult",
17
+ "accent": "American",
18
+ "tone": "natural",
19
+ "speakingRate": 1.0,
20
+ "volumeGain": 0.0
21
+ },
22
+ "speechSynthesis": {
23
+ "voiceName": f"VoiceModel_{i}_Voice",
24
+ "sampleRateHertz": 24000,
25
+ "pitch": 1.0,
26
+ "range": {
27
+ "min": 80,
28
+ "max": 250
29
+ },
30
+ "intelligibility": 0.8,
31
+ "emotionalTone": {
32
+ "happy": 0.6,
33
+ "sad": 0.3,
34
+ "angry": 0.2,
35
+ "neutral": 0.9
36
+ }
37
+ },
38
+ "phoneticModels": [
39
+ {
40
+ "name": f"VoiceModel_{i}_Phonetic_Model",
41
+ "description": "Basic phonetic model for standard American English pronunciation.",
42
+ "phonemes": [
43
+ "AA", "AE", "AH", "AO", "AW", "AY", "B", "CH", "D", "DH", "EH", "ER", "EY", "F", "G", "HH", "IH", "IY", "JH", "K", "L", "M", "N", "NG", "OW", "OY", "P", "R", "S", "SH", "T", "TH", "UH", "UW", "V", "W", "Y", "Z", "ZH"
44
+ ]
45
+ }
46
+ ],
47
+ "sampleVoices": [
48
+ {
49
+ "name": f"VoiceModel_{i}_Sample_Voice_1",
50
+ "description": "Sample voice for formal contexts.",
51
+ "gender": "male",
52
+ "age": "adult",
53
+ "audioFiles": [
54
+ f"sample_{i}_1.wav",
55
+ f"sample_{i}_2.wav",
56
+ f"sample_{i}_3.wav"
57
+ ]
58
+ },
59
+ {
60
+ "name": f"VoiceModel_{i}_Sample_Voice_2",
61
+ "description": "Sample voice for informal contexts.",
62
+ "gender": "female",
63
+ "age": "adult",
64
+ "audioFiles": [
65
+ f"sample_{i}_4.wav",
66
+ f"sample_{i}_5.wav",
67
+ f"sample_{i}_6.wav"
68
+ ]
69
+ }
70
+ ],
71
+ "performanceMetrics": {
72
+ "accuracy": 0.95,
73
+ "latency": "100ms",
74
+ "responseTime": "250ms"
75
+ },
76
+ "additionalFeatures": {
77
+ "emotionRecognition": True,
78
+ "contextualAdaptation": True,
79
+ "multiLanguageSupport": False
80
+ }
81
+ }
82
+ dataset["voiceModels"].append(voice_model)
83
+
84
+ return dataset
85
+
86
+ # Create the dataset
87
+ num_names = 4000
88
+ dataset = create_voice_model_dataset(num_names)
89
+
90
+ # Save the dataset to a JSON file
91
+ with open('voice_model_dataset.json', 'w') as f:
92
+ json.dump(dataset, f, indent=4)
93
+
94
+ print(f"Dataset with {num_names} voice models has been created and saved to 'voice_model_dataset.json'.")