darshankr commited on
Commit
287c28c
·
verified ·
1 Parent(s): a2bdec8

Upload 795 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Indic-TTS/.gitignore +7 -0
  2. Indic-TTS/LICENSE.txt +21 -0
  3. Indic-TTS/README.md +93 -0
  4. Indic-TTS/TTS/.cardboardlint.yml +5 -0
  5. Indic-TTS/TTS/.dockerignore +2 -0
  6. Indic-TTS/TTS/.github/ISSUE_TEMPLATE/bug_report.yaml +85 -0
  7. Indic-TTS/TTS/.github/ISSUE_TEMPLATE/config.yml +8 -0
  8. Indic-TTS/TTS/.github/ISSUE_TEMPLATE/feature_request.md +25 -0
  9. Indic-TTS/TTS/.github/PR_TEMPLATE.md +15 -0
  10. Indic-TTS/TTS/.github/stale.yml +18 -0
  11. Indic-TTS/TTS/.github/workflows/aux_tests.yml +49 -0
  12. Indic-TTS/TTS/.github/workflows/data_tests.yml +49 -0
  13. Indic-TTS/TTS/.github/workflows/docker.yaml +65 -0
  14. Indic-TTS/TTS/.github/workflows/inference_tests.yml +49 -0
  15. Indic-TTS/TTS/.github/workflows/pypi-release.yml +96 -0
  16. Indic-TTS/TTS/.github/workflows/style_check.yml +47 -0
  17. Indic-TTS/TTS/.github/workflows/text_tests.yml +48 -0
  18. Indic-TTS/TTS/.github/workflows/tts_tests.yml +51 -0
  19. Indic-TTS/TTS/.github/workflows/vocoder_tests.yml +46 -0
  20. Indic-TTS/TTS/.github/workflows/zoo_tests.yml +50 -0
  21. Indic-TTS/TTS/.gitignore +170 -0
  22. Indic-TTS/TTS/.pre-commit-config.yaml +27 -0
  23. Indic-TTS/TTS/.pylintrc +597 -0
  24. Indic-TTS/TTS/.readthedocs.yml +18 -0
  25. Indic-TTS/TTS/CITATION.cff +20 -0
  26. Indic-TTS/TTS/CODE_OF_CONDUCT.md +133 -0
  27. Indic-TTS/TTS/CODE_OWNERS.rst +75 -0
  28. Indic-TTS/TTS/CONTRIBUTING.md +136 -0
  29. Indic-TTS/TTS/Dockerfile +20 -0
  30. Indic-TTS/TTS/LICENSE.txt +373 -0
  31. Indic-TTS/TTS/MANIFEST.in +14 -0
  32. Indic-TTS/TTS/Makefile +72 -0
  33. Indic-TTS/TTS/README.md +217 -0
  34. Indic-TTS/TTS/TTS.egg-info/PKG-INFO +253 -0
  35. Indic-TTS/TTS/TTS.egg-info/SOURCES.txt +225 -0
  36. Indic-TTS/TTS/TTS.egg-info/dependency_links.txt +1 -0
  37. Indic-TTS/TTS/TTS.egg-info/entry_points.txt +3 -0
  38. Indic-TTS/TTS/TTS.egg-info/not-zip-safe +1 -0
  39. Indic-TTS/TTS/TTS.egg-info/requires.txt +44 -0
  40. Indic-TTS/TTS/TTS.egg-info/top_level.txt +1 -0
  41. Indic-TTS/TTS/TTS/.models.json +500 -0
  42. Indic-TTS/TTS/TTS/VERSION +1 -0
  43. Indic-TTS/TTS/TTS/__init__.py +6 -0
  44. Indic-TTS/TTS/TTS/__pycache__/__init__.cpython-37.pyc +0 -0
  45. Indic-TTS/TTS/TTS/__pycache__/model.cpython-37.pyc +0 -0
  46. Indic-TTS/TTS/TTS/bin/__init__.py +0 -0
  47. Indic-TTS/TTS/TTS/bin/__pycache__/__init__.cpython-37.pyc +0 -0
  48. Indic-TTS/TTS/TTS/bin/__pycache__/synthesize.cpython-37.pyc +0 -0
  49. Indic-TTS/TTS/TTS/bin/collect_env_info.py +48 -0
  50. Indic-TTS/TTS/TTS/bin/compute_attention_masks.py +165 -0
Indic-TTS/.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ *.pyc
2
+ *.pth
3
+ *.json
4
+ *.DS_Store
5
+ *.log
6
+ inference/checkpoints
7
+ *.wav
Indic-TTS/LICENSE.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 AI4Bhārat
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
Indic-TTS/README.md ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AI4Bharat Indic-TTS
2
+
3
+ ## Towards Building Text-To-Speech Systems for the Next Billion Users
4
+
5
+ > 🎉 Accepted at ICASSP 2023
6
+
7
+ Deep learning based text-to-speech (TTS) systems have been evolving rapidly with advances in model architectures, training methodologies, and generalization across speakers and languages. However, these advances have not been thoroughly investigated for Indian language speech synthesis. Such investigation is computationally expensive given the number and diversity of Indian languages, relatively lower resource availability, and the diverse set of advances in neural TTS that remain untested. In this paper, we evaluate the choice of acoustic models, vocoders, supplementary loss functions, training schedules, and speaker and language diversity for Dravidian and Indo-Aryan languages. Based on this, we identify monolingual models with FastPitch and HiFi-GAN V1, trained jointly on male and female speakers to perform the best. With this setup, we train and evaluate TTS models for 13 languages and find our models to significantly improve upon existing models in all languages as measured by mean opinion scores. We open-source all models on the [Bhashini platform](https://bhashini.gov.in/ulca/model/explore-models).
8
+
9
+ **TL;DR:** We open-source SOTA Text-To-Speech models for 13 Indian languages: *Assamese, Bengali, Bodo, Gujarati, Hindi, Kannada, Malayalam, Manipuri, Marathi, Odia, Rajasthani, Tamil and Telugu*.
10
+
11
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/towards-building-text-to-speech-systems-for/speech-synthesis-assamese-on-indictts)](https://paperswithcode.com/sota/speech-synthesis-assamese-on-indictts?p=towards-building-text-to-speech-systems-for)
12
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/towards-building-text-to-speech-systems-for/speech-synthesis-bengali-on-indictts)](https://paperswithcode.com/sota/speech-synthesis-bengali-on-indictts?p=towards-building-text-to-speech-systems-for)
13
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/towards-building-text-to-speech-systems-for/speech-synthesis-bodo-on-indictts)](https://paperswithcode.com/sota/speech-synthesis-bodo-on-indictts?p=towards-building-text-to-speech-systems-for)
14
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/towards-building-text-to-speech-systems-for/speech-synthesis-gujarati-on-indictts)](https://paperswithcode.com/sota/speech-synthesis-gujarati-on-indictts?p=towards-building-text-to-speech-systems-for)
15
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/towards-building-text-to-speech-systems-for/speech-synthesis-hindi-on-indictts)](https://paperswithcode.com/sota/speech-synthesis-hindi-on-indictts?p=towards-building-text-to-speech-systems-for)
16
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/towards-building-text-to-speech-systems-for/speech-synthesis-kannada-on-indictts)](https://paperswithcode.com/sota/speech-synthesis-kannada-on-indictts?p=towards-building-text-to-speech-systems-for)
17
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/towards-building-text-to-speech-systems-for/speech-synthesis-malayalam-on-indictts)](https://paperswithcode.com/sota/speech-synthesis-malayalam-on-indictts?p=towards-building-text-to-speech-systems-for)
18
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/towards-building-text-to-speech-systems-for/speech-synthesis-manipuri-on-indictts)](https://paperswithcode.com/sota/speech-synthesis-manipuri-on-indictts?p=towards-building-text-to-speech-systems-for)
19
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/towards-building-text-to-speech-systems-for/speech-synthesis-marathi-on-indictts)](https://paperswithcode.com/sota/speech-synthesis-marathi-on-indictts?p=towards-building-text-to-speech-systems-for)
20
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/towards-building-text-to-speech-systems-for/speech-synthesis-rajasthani-on-indictts)](https://paperswithcode.com/sota/speech-synthesis-rajasthani-on-indictts?p=towards-building-text-to-speech-systems-for)
21
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/towards-building-text-to-speech-systems-for/speech-synthesis-tamil-on-indictts)](https://paperswithcode.com/sota/speech-synthesis-tamil-on-indictts?p=towards-building-text-to-speech-systems-for)
22
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/towards-building-text-to-speech-systems-for/speech-synthesis-telugu-on-indictts)](https://paperswithcode.com/sota/speech-synthesis-telugu-on-indictts?p=towards-building-text-to-speech-systems-for)
23
+
24
+
25
+ **Authors:** Gokul Karthik Kumar*, Praveen S V*, Pratyush Kumar, Mitesh M. Khapra, Karthik Nandakumar
26
+
27
+ **[[ArXiv Preprint](https://arxiv.org/abs/2211.09536)] [[Audio Samples](https://models.ai4bharat.org/#/tts/samples)] [[Try It Live](https://models.ai4bharat.org/#/tts)] [[Video](https://youtu.be/I3eo8IUAP7s)]**
28
+
29
+ ## Unified architecture of our TTS system
30
+ <img src='images/architecture.png' width=1024>
31
+
32
+ ## Results
33
+ <img src='images/evaluation.png' width=1024>
34
+
35
+ ## Setup:
36
+ ### Environment Setup:
37
+ ```
38
+ # 1. Create environment
39
+ sudo apt-get install libsndfile1-dev ffmpeg enchant
40
+ conda create -n tts-env
41
+ conda activate tts-env
42
+
43
+ # 2. Setup PyTorch
44
+ pip3 install -U torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113
45
+
46
+ # 3. Setup Trainer
47
+ git clone https://github.com/gokulkarthik/Trainer
48
+
49
+ cd Trainer
50
+ pip3 install -e .[all]
51
+ cd ..
52
+ [or]
53
+ cp Trainer/trainer/logging/wandb_logger.py to the local Trainer installation # fixed wandb logger
54
+ cp Trainer/trainer/trainer.py to the local Trainer installation # fixed model.module.test_log and added code to log epoch
55
+ add `gpus = [str(gpu) for gpu in gpus]` in line 53 of trainer/distribute.py
56
+
57
+ # 4. Setup TTS
58
+ git clone https://github.com/gokulkarthik/TTS
59
+
60
+ cd TTS
61
+ pip3 install -e .[all]
62
+ cd ..
63
+ [or]
64
+ cp TTS/TTS/bin/synthesize.py to the local TTS installation # added multiple output support for TTS.bin.synthesis
65
+
66
+ # 5. Install other requirements
67
+ > pip3 install -r requirements.txt
68
+ ```
69
+
70
+
71
+ ### Data Setup:
72
+ 1. Format IndicTTS dataset in LJSpeech format using [preprocessing/FormatDatasets.ipynb](./preprocessing/FormatDatasets.ipynb)
73
+ 2. Analyze IndicTTS dataset to check TTS suitability using [preprocessing/AnalyzeDataset.ipynb](./preprocessing/AnalyzeDataset.ipynb)
74
+
75
+ ### Training Steps:
76
+ 1. Set the configuration with [main.py](./main.py), [vocoder.py](./vocoder.py), [configs](./configs) and [run.sh](./run.sh). Make sure to update the CUDA_VISIBLE_DEVICES in all these files.
77
+ 2. Train and test by executing `sh run.sh`
78
+
79
+ ### Inference:
80
+ Trained model weight and config files can be downloaded at [this link.](https://github.com/AI4Bharat/Indic-TTS/releases/tag/v1-checkpoints-release)
81
+
82
+ ```
83
+ python3 -m TTS.bin.synthesize --text <TEXT> \
84
+ --model_path <LANG>/fastpitch/best_model.pth \
85
+ --config_path <LANG>/config.json \
86
+ --vocoder_path <LANG>/hifigan/best_model.pth \
87
+ --vocoder_config_path <LANG>/hifigan/config.json \
88
+ --out_path <OUT_PATH>
89
+ ```
90
+
91
+ ---
92
+ Code Reference: [https://github.com/coqui-ai/TTS](https://github.com/coqui-ai/TTS)
93
+ `
Indic-TTS/TTS/.cardboardlint.yml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ linters:
2
+ - pylint:
3
+ # pylintrc: pylintrc
4
+ filefilter: ['- test_*.py', '+ *.py', '- *.npy']
5
+ # exclude:
Indic-TTS/TTS/.dockerignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .git/
2
+ Dockerfile
Indic-TTS/TTS/.github/ISSUE_TEMPLATE/bug_report.yaml ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "🐛 Bug report"
2
+ description: Create a bug report to help 🐸 improve
3
+ title: '[Bug] '
4
+ labels: [ "bug" ]
5
+ body:
6
+ - type: markdown
7
+ attributes:
8
+ value: |
9
+ Welcome to the 🐸TTS! Thanks for taking the time to fill out this bug report!
10
+
11
+ - type: textarea
12
+ id: bug-description
13
+ attributes:
14
+ label: Describe the bug
15
+ description: A clear and concise description of what the bug is. If you intend to submit a PR for this issue, tell us in the description. Thanks!
16
+ placeholder: Bug description
17
+ validations:
18
+ required: true
19
+
20
+ - type: textarea
21
+ id: reproduction
22
+ attributes:
23
+ label: To Reproduce
24
+ description: |
25
+ Please share your code to reproduce the error.
26
+
27
+ Issues are fixed faster if you can provide a working example.
28
+
29
+ The best place for sharing code is colab. https://colab.research.google.com/
30
+ So we can directly run your code and reproduce the issue.
31
+
32
+ In the worse case, provide steps to reproduce the behavior.
33
+
34
+ 1. Run the following command '...'
35
+ 2. ...
36
+ 3. See error
37
+ placeholder: Reproduction
38
+ validations:
39
+ required: true
40
+
41
+ - type: textarea
42
+ id: expected-behavior
43
+ attributes:
44
+ label: Expected behavior
45
+ description: "Write down what the expected behaviour"
46
+
47
+ - type: textarea
48
+ id: logs
49
+ attributes:
50
+ label: Logs
51
+ description: "Please include the relevant logs if you can."
52
+ render: shell
53
+
54
+ - type: textarea
55
+ id: system-info
56
+ attributes:
57
+ label: Environment
58
+ description: |
59
+ You can either run `TTS/bin/collect_env_info.py`
60
+
61
+ ```bash
62
+ wget https://raw.githubusercontent.com/coqui-ai/TTS/main/TTS/bin/collect_env_info.py
63
+ python collect_env_info.py
64
+ ```
65
+
66
+ or fill in the fields below manually.
67
+ render: shell
68
+ placeholder: |
69
+ - 🐸TTS Version (e.g., 1.3.0):
70
+ - PyTorch Version (e.g., 1.8)
71
+ - Python version:
72
+ - OS (e.g., Linux):
73
+ - CUDA/cuDNN version:
74
+ - GPU models and configuration:
75
+ - How you installed PyTorch (`conda`, `pip`, source):
76
+ - Any other relevant information:
77
+ validations:
78
+ required: true
79
+ - type: textarea
80
+ id: context
81
+ attributes:
82
+ label: Additional context
83
+ description: Add any other context about the problem here.
84
+ validations:
85
+ required: false
Indic-TTS/TTS/.github/ISSUE_TEMPLATE/config.yml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ blank_issues_enabled: false
2
+ contact_links:
3
+ - name: CoquiTTS GitHub Discussions
4
+ url: https://github.com/coqui-ai/TTS/discussions
5
+ about: Please ask and answer questions here.
6
+ - name: Coqui Security issue disclosure
7
+ url: mailto:[email protected]
8
+ about: Please report security vulnerabilities here.
Indic-TTS/TTS/.github/ISSUE_TEMPLATE/feature_request.md ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: 🚀 Feature request
3
+ about: Suggest a feature or an idea for this project
4
+ title: '[Feature request] '
5
+ labels: feature request
6
+ assignees: ''
7
+
8
+ ---
9
+ <!-- Welcome to the 🐸TTS project!
10
+ We are excited to see your interest, and appreciate your support! --->
11
+ **🚀 Feature Description**
12
+
13
+ <!--A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
14
+
15
+ **Solution**
16
+
17
+ <!-- A clear and concise description of what you want to happen. -->
18
+
19
+ **Alternative Solutions**
20
+
21
+ <!-- A clear and concise description of any alternative solutions or features you've considered. -->
22
+
23
+ **Additional context**
24
+
25
+ <!-- Add any other context or screenshots about the feature request here. -->
Indic-TTS/TTS/.github/PR_TEMPLATE.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pull request guidelines
2
+
3
+ Welcome to the 🐸TTS project! We are excited to see your interest, and appreciate your support!
4
+
5
+ This repository is governed by the Contributor Covenant Code of Conduct. For more details, see the [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) file.
6
+
7
+ In order to make a good pull request, please see our [CONTRIBUTING.md](CONTRIBUTING.md) file.
8
+
9
+ Before accepting your pull request, you will be asked to sign a [Contributor License Agreement](https://cla-assistant.io/coqui-ai/TTS).
10
+
11
+ This [Contributor License Agreement](https://cla-assistant.io/coqui-ai/TTS):
12
+
13
+ - Protects you, Coqui, and the users of the code.
14
+ - Does not change your rights to use your contributions for any purpose.
15
+ - Does not change the license of the 🐸TTS project. It just makes the terms of your contribution clearer and lets us know you are OK to contribute.
Indic-TTS/TTS/.github/stale.yml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Number of days of inactivity before an issue becomes stale
2
+ daysUntilStale: 30
3
+ # Number of days of inactivity before a stale issue is closed
4
+ daysUntilClose: 7
5
+ # Issues with these labels will never be considered stale
6
+ exemptLabels:
7
+ - pinned
8
+ - security
9
+ # Label to use when marking an issue as stale
10
+ staleLabel: wontfix
11
+ # Comment to post when marking an issue as stale. Set to `false` to disable
12
+ markComment: >
13
+ This issue has been automatically marked as stale because it has not had
14
+ recent activity. It will be closed if no further activity occurs. Thank you
15
+ for your contributions. You might also look our discussion channels.
16
+ # Comment to post when closing a stale issue. Set to `false` to disable
17
+ closeComment: false
18
+
Indic-TTS/TTS/.github/workflows/aux_tests.yml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: aux-tests
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ pull_request:
8
+ types: [opened, synchronize, reopened]
9
+ jobs:
10
+ check_skip:
11
+ runs-on: ubuntu-latest
12
+ if: "! contains(github.event.head_commit.message, '[ci skip]')"
13
+ steps:
14
+ - run: echo "${{ github.event.head_commit.message }}"
15
+
16
+ test:
17
+ runs-on: ubuntu-latest
18
+ strategy:
19
+ fail-fast: false
20
+ matrix:
21
+ python-version: [3.7, 3.8, 3.9, "3.10"]
22
+ experimental: [false]
23
+ steps:
24
+ - uses: actions/checkout@v2
25
+ - name: Set up Python ${{ matrix.python-version }}
26
+ uses: coqui-ai/setup-python@pip-cache-key-py-ver
27
+ with:
28
+ python-version: ${{ matrix.python-version }}
29
+ architecture: x64
30
+ cache: 'pip'
31
+ cache-dependency-path: 'requirements*'
32
+ - name: check OS
33
+ run: cat /etc/os-release
34
+ - name: Install dependencies
35
+ run: |
36
+ sudo apt-get update
37
+ sudo apt-get install -y git make gcc
38
+ make system-deps
39
+ - name: Install/upgrade Python setup deps
40
+ run: python3 -m pip install --upgrade pip setuptools wheel
41
+ - name: Replace scarf urls
42
+ run: |
43
+ sed -i 's/https:\/\/coqui.gateway.scarf.sh\//https:\/\/github.com\/coqui-ai\/TTS\/releases\/download\//g' TTS/.models.json
44
+ - name: Install TTS
45
+ run: |
46
+ python3 -m pip install .[all]
47
+ python3 setup.py egg_info
48
+ - name: Unit tests
49
+ run: make test_aux
Indic-TTS/TTS/.github/workflows/data_tests.yml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: data-tests
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ pull_request:
8
+ types: [opened, synchronize, reopened]
9
+ jobs:
10
+ check_skip:
11
+ runs-on: ubuntu-latest
12
+ if: "! contains(github.event.head_commit.message, '[ci skip]')"
13
+ steps:
14
+ - run: echo "${{ github.event.head_commit.message }}"
15
+
16
+ test:
17
+ runs-on: ubuntu-latest
18
+ strategy:
19
+ fail-fast: false
20
+ matrix:
21
+ python-version: [3.7, 3.8, 3.9, "3.10"]
22
+ experimental: [false]
23
+ steps:
24
+ - uses: actions/checkout@v2
25
+ - name: Set up Python ${{ matrix.python-version }}
26
+ uses: coqui-ai/setup-python@pip-cache-key-py-ver
27
+ with:
28
+ python-version: ${{ matrix.python-version }}
29
+ architecture: x64
30
+ cache: 'pip'
31
+ cache-dependency-path: 'requirements*'
32
+ - name: check OS
33
+ run: cat /etc/os-release
34
+ - name: Install dependencies
35
+ run: |
36
+ sudo apt-get update
37
+ sudo apt-get install -y --no-install-recommends git make gcc
38
+ make system-deps
39
+ - name: Install/upgrade Python setup deps
40
+ run: python3 -m pip install --upgrade pip setuptools wheel
41
+ - name: Replace scarf urls
42
+ run: |
43
+ sed -i 's/https:\/\/coqui.gateway.scarf.sh\//https:\/\/github.com\/coqui-ai\/TTS\/releases\/download\//g' TTS/.models.json
44
+ - name: Install TTS
45
+ run: |
46
+ python3 -m pip install .[all]
47
+ python3 setup.py egg_info
48
+ - name: Unit tests
49
+ run: make data_tests
Indic-TTS/TTS/.github/workflows/docker.yaml ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "Docker build and push"
2
+ on:
3
+ pull_request:
4
+ push:
5
+ branches:
6
+ - main
7
+ - dev
8
+ tags:
9
+ - v*
10
+ jobs:
11
+ docker-build:
12
+ name: "Build and push Docker image"
13
+ runs-on: ubuntu-20.04
14
+ strategy:
15
+ matrix:
16
+ arch: ["amd64"]
17
+ base:
18
+ - "nvcr.io/nvidia/pytorch:22.03-py3" # GPU enabled
19
+ - "ubuntu:20.04" # CPU only
20
+ steps:
21
+ - uses: actions/checkout@v2
22
+ - name: Log in to the Container registry
23
+ uses: docker/login-action@v1
24
+ with:
25
+ registry: ghcr.io
26
+ username: ${{ github.actor }}
27
+ password: ${{ secrets.GITHUB_TOKEN }}
28
+ - name: Compute Docker tags, check VERSION file matches tag
29
+ id: compute-tag
30
+ run: |
31
+ set -ex
32
+ base="ghcr.io/coqui-ai/tts"
33
+ tags="" # PR build
34
+
35
+ if [[ ${{ matrix.base }} = "ubuntu:20.04" ]]; then
36
+ base="ghcr.io/coqui-ai/tts-cpu"
37
+ fi
38
+
39
+ if [[ "${{ startsWith(github.ref, 'refs/heads/') }}" = "true" ]]; then
40
+ # Push to branch
41
+ github_ref="${{ github.ref }}"
42
+ branch=${github_ref#*refs/heads/} # strip prefix to get branch name
43
+ tags="${base}:${branch},${base}:${{ github.sha }},"
44
+ elif [[ "${{ startsWith(github.ref, 'refs/tags/') }}" = "true" ]]; then
45
+ VERSION="v$(cat TTS/VERSION)"
46
+ if [[ "${{ github.ref }}" != "refs/tags/${VERSION}" ]]; then
47
+ echo "Pushed tag does not match VERSION file. Aborting push."
48
+ exit 1
49
+ fi
50
+ tags="${base}:${VERSION},${base}:latest,${base}:${{ github.sha }}"
51
+ fi
52
+ echo "::set-output name=tags::${tags}"
53
+ - name: Set up QEMU
54
+ uses: docker/setup-qemu-action@v1
55
+ - name: Set up Docker Buildx
56
+ id: buildx
57
+ uses: docker/setup-buildx-action@v1
58
+ - name: Build and push
59
+ uses: docker/build-push-action@v2
60
+ with:
61
+ context: .
62
+ platforms: linux/${{ matrix.arch }}
63
+ push: ${{ github.event_name == 'push' }}
64
+ build-args: "BASE=${{ matrix.base }}"
65
+ tags: ${{ steps.compute-tag.outputs.tags }}
Indic-TTS/TTS/.github/workflows/inference_tests.yml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: inference_tests
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ pull_request:
8
+ types: [opened, synchronize, reopened]
9
+ jobs:
10
+ check_skip:
11
+ runs-on: ubuntu-latest
12
+ if: "! contains(github.event.head_commit.message, '[ci skip]')"
13
+ steps:
14
+ - run: echo "${{ github.event.head_commit.message }}"
15
+
16
+ test:
17
+ runs-on: ubuntu-latest
18
+ strategy:
19
+ fail-fast: false
20
+ matrix:
21
+ python-version: [3.7, 3.8, 3.9, "3.10"]
22
+ experimental: [false]
23
+ steps:
24
+ - uses: actions/checkout@v2
25
+ - name: Set up Python ${{ matrix.python-version }}
26
+ uses: coqui-ai/setup-python@pip-cache-key-py-ver
27
+ with:
28
+ python-version: ${{ matrix.python-version }}
29
+ architecture: x64
30
+ cache: 'pip'
31
+ cache-dependency-path: 'requirements*'
32
+ - name: check OS
33
+ run: cat /etc/os-release
34
+ - name: Install dependencies
35
+ run: |
36
+ sudo apt-get update
37
+ sudo apt-get install -y --no-install-recommends git make gcc
38
+ make system-deps
39
+ - name: Install/upgrade Python setup deps
40
+ run: python3 -m pip install --upgrade pip setuptools wheel
41
+ - name: Replace scarf urls
42
+ run: |
43
+ sed -i 's/https:\/\/coqui.gateway.scarf.sh\//https:\/\/github.com\/coqui-ai\/TTS\/releases\/download\//g' TTS/.models.json
44
+ - name: Install TTS
45
+ run: |
46
+ python3 -m pip install .[all]
47
+ python3 setup.py egg_info
48
+ - name: Unit tests
49
+ run: make inference_tests
Indic-TTS/TTS/.github/workflows/pypi-release.yml ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Publish Python 🐍 distributions 📦 to PyPI
2
+ on:
3
+ release:
4
+ types: [published]
5
+ defaults:
6
+ run:
7
+ shell:
8
+ bash
9
+ jobs:
10
+ build-sdist:
11
+ runs-on: ubuntu-20.04
12
+ steps:
13
+ - uses: actions/checkout@v2
14
+ - name: Verify tag matches version
15
+ run: |
16
+ set -ex
17
+ version=$(cat TTS/VERSION)
18
+ tag="${GITHUB_REF/refs\/tags\/}"
19
+ if [[ "v$version" != "$tag" ]]; then
20
+ exit 1
21
+ fi
22
+ - uses: actions/setup-python@v2
23
+ with:
24
+ python-version: 3.8
25
+ - run: |
26
+ python -m pip install -U pip setuptools wheel build
27
+ - run: |
28
+ python -m build
29
+ - run: |
30
+ pip install dist/*.tar.gz
31
+ - uses: actions/upload-artifact@v2
32
+ with:
33
+ name: sdist
34
+ path: dist/*.tar.gz
35
+ build-wheels:
36
+ runs-on: ubuntu-20.04
37
+ strategy:
38
+ matrix:
39
+ python-version: ["3.7", "3.8", "3.9", "3.10"]
40
+ steps:
41
+ - uses: actions/checkout@v2
42
+ - uses: actions/setup-python@v2
43
+ with:
44
+ python-version: ${{ matrix.python-version }}
45
+ - run: |
46
+ python -m pip install -U pip setuptools wheel build
47
+ - run: |
48
+ python -m build
49
+ - run: |
50
+ python -m pip install dist/*.whl
51
+ - uses: actions/upload-artifact@v2
52
+ with:
53
+ name: wheel-${{ matrix.python-version }}
54
+ path: dist/*.whl
55
+ publish-artifacts:
56
+ runs-on: ubuntu-20.04
57
+ needs: [build-sdist, build-wheels]
58
+ steps:
59
+ - run: |
60
+ mkdir dist
61
+ - uses: actions/download-artifact@v2
62
+ with:
63
+ name: "sdist"
64
+ path: "dist/"
65
+ - uses: actions/download-artifact@v2
66
+ with:
67
+ name: "wheel-3.7"
68
+ path: "dist/"
69
+ - uses: actions/download-artifact@v2
70
+ with:
71
+ name: "wheel-3.8"
72
+ path: "dist/"
73
+ - uses: actions/download-artifact@v2
74
+ with:
75
+ name: "wheel-3.9"
76
+ path: "dist/"
77
+ - uses: actions/download-artifact@v2
78
+ with:
79
+ name: "wheel-3.10"
80
+ path: "dist/"
81
+ - run: |
82
+ ls -lh dist/
83
+ - name: Setup PyPI config
84
+ run: |
85
+ cat << EOF > ~/.pypirc
86
+ [pypi]
87
+ username=__token__
88
+ password=${{ secrets.PYPI_TOKEN }}
89
+ EOF
90
+ - uses: actions/setup-python@v2
91
+ with:
92
+ python-version: 3.8
93
+ - run: |
94
+ python -m pip install twine
95
+ - run: |
96
+ twine upload --repository pypi dist/*
Indic-TTS/TTS/.github/workflows/style_check.yml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: style-check
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ pull_request:
8
+ types: [opened, synchronize, reopened]
9
+ jobs:
10
+ check_skip:
11
+ runs-on: ubuntu-latest
12
+ if: "! contains(github.event.head_commit.message, '[ci skip]')"
13
+ steps:
14
+ - run: echo "${{ github.event.head_commit.message }}"
15
+
16
+ test:
17
+ runs-on: ubuntu-latest
18
+ strategy:
19
+ fail-fast: false
20
+ matrix:
21
+ python-version: [3.9]
22
+ experimental: [false]
23
+ steps:
24
+ - uses: actions/checkout@v2
25
+ - name: Set up Python ${{ matrix.python-version }}
26
+ uses: coqui-ai/setup-python@pip-cache-key-py-ver
27
+ with:
28
+ python-version: ${{ matrix.python-version }}
29
+ architecture: x64
30
+ cache: 'pip'
31
+ cache-dependency-path: 'requirements*'
32
+ - name: check OS
33
+ run: cat /etc/os-release
34
+ - name: Install dependencies
35
+ run: |
36
+ sudo apt-get update
37
+ sudo apt-get install -y git make gcc
38
+ make system-deps
39
+ - name: Install/upgrade Python setup deps
40
+ run: python3 -m pip install --upgrade pip setuptools wheel
41
+ - name: Install TTS
42
+ run: |
43
+ python3 -m pip install .[all]
44
+ python3 setup.py egg_info
45
+ - name: Lint check
46
+ run: |
47
+ make lint
Indic-TTS/TTS/.github/workflows/text_tests.yml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: text-tests
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ pull_request:
8
+ types: [opened, synchronize, reopened]
9
+ jobs:
10
+ check_skip:
11
+ runs-on: ubuntu-latest
12
+ if: "! contains(github.event.head_commit.message, '[ci skip]')"
13
+ steps:
14
+ - run: echo "${{ github.event.head_commit.message }}"
15
+
16
+ test:
17
+ runs-on: ubuntu-latest
18
+ strategy:
19
+ fail-fast: false
20
+ matrix:
21
+ python-version: [3.7, 3.8, 3.9, "3.10"]
22
+ experimental: [false]
23
+ steps:
24
+ - uses: actions/checkout@v2
25
+ - name: Set up Python ${{ matrix.python-version }}
26
+ uses: coqui-ai/setup-python@pip-cache-key-py-ver
27
+ with:
28
+ python-version: ${{ matrix.python-version }}
29
+ architecture: x64
30
+ cache: 'pip'
31
+ cache-dependency-path: 'requirements*'
32
+ - name: check OS
33
+ run: cat /etc/os-release
34
+ - name: Install dependencies
35
+ run: |
36
+ sudo apt-get update
37
+ sudo apt-get install -y --no-install-recommends git make gcc
38
+ sudo apt-get install espeak
39
+ sudo apt-get install espeak-ng
40
+ make system-deps
41
+ - name: Install/upgrade Python setup deps
42
+ run: python3 -m pip install --upgrade pip setuptools wheel
43
+ - name: Install TTS
44
+ run: |
45
+ python3 -m pip install .[all]
46
+ python3 setup.py egg_info
47
+ - name: Unit tests
48
+ run: make test_text
Indic-TTS/TTS/.github/workflows/tts_tests.yml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: tts-tests
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ pull_request:
8
+ types: [opened, synchronize, reopened]
9
+ jobs:
10
+ check_skip:
11
+ runs-on: ubuntu-latest
12
+ if: "! contains(github.event.head_commit.message, '[ci skip]')"
13
+ steps:
14
+ - run: echo "${{ github.event.head_commit.message }}"
15
+
16
+ test:
17
+ runs-on: ubuntu-latest
18
+ strategy:
19
+ fail-fast: false
20
+ matrix:
21
+ python-version: [3.7, 3.8, 3.9, "3.10"]
22
+ experimental: [false]
23
+ steps:
24
+ - uses: actions/checkout@v2
25
+ - name: Set up Python ${{ matrix.python-version }}
26
+ uses: coqui-ai/setup-python@pip-cache-key-py-ver
27
+ with:
28
+ python-version: ${{ matrix.python-version }}
29
+ architecture: x64
30
+ cache: 'pip'
31
+ cache-dependency-path: 'requirements*'
32
+ - name: check OS
33
+ run: cat /etc/os-release
34
+ - name: Install dependencies
35
+ run: |
36
+ sudo apt-get update
37
+ sudo apt-get install -y --no-install-recommends git make gcc
38
+ sudo apt-get install espeak
39
+ sudo apt-get install espeak-ng
40
+ make system-deps
41
+ - name: Install/upgrade Python setup deps
42
+ run: python3 -m pip install --upgrade pip setuptools wheel
43
+ - name: Replace scarf urls
44
+ run: |
45
+ sed -i 's/https:\/\/coqui.gateway.scarf.sh\//https:\/\/github.com\/coqui-ai\/TTS\/releases\/download\//g' TTS/.models.json
46
+ - name: Install TTS
47
+ run: |
48
+ python3 -m pip install .[all]
49
+ python3 setup.py egg_info
50
+ - name: Unit tests
51
+ run: make test_tts
Indic-TTS/TTS/.github/workflows/vocoder_tests.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: vocoder-tests
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ pull_request:
8
+ types: [opened, synchronize, reopened]
9
+ jobs:
10
+ check_skip:
11
+ runs-on: ubuntu-latest
12
+ if: "! contains(github.event.head_commit.message, '[ci skip]')"
13
+ steps:
14
+ - run: echo "${{ github.event.head_commit.message }}"
15
+
16
+ test:
17
+ runs-on: ubuntu-latest
18
+ strategy:
19
+ fail-fast: false
20
+ matrix:
21
+ python-version: [3.7, 3.8, 3.9, "3.10"]
22
+ experimental: [false]
23
+ steps:
24
+ - uses: actions/checkout@v2
25
+ - name: Set up Python ${{ matrix.python-version }}
26
+ uses: coqui-ai/setup-python@pip-cache-key-py-ver
27
+ with:
28
+ python-version: ${{ matrix.python-version }}
29
+ architecture: x64
30
+ cache: 'pip'
31
+ cache-dependency-path: 'requirements*'
32
+ - name: check OS
33
+ run: cat /etc/os-release
34
+ - name: Install dependencies
35
+ run: |
36
+ sudo apt-get update
37
+ sudo apt-get install -y git make gcc
38
+ make system-deps
39
+ - name: Install/upgrade Python setup deps
40
+ run: python3 -m pip install --upgrade pip setuptools wheel
41
+ - name: Install TTS
42
+ run: |
43
+ python3 -m pip install .[all]
44
+ python3 setup.py egg_info
45
+ - name: Unit tests
46
+ run: make test_vocoder
Indic-TTS/TTS/.github/workflows/zoo_tests.yml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: zoo-tests
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ pull_request:
8
+ types: [opened, synchronize, reopened]
9
+ jobs:
10
+ check_skip:
11
+ runs-on: ubuntu-latest
12
+ if: "! contains(github.event.head_commit.message, '[ci skip]')"
13
+ steps:
14
+ - run: echo "${{ github.event.head_commit.message }}"
15
+
16
+ test:
17
+ runs-on: ubuntu-latest
18
+ strategy:
19
+ fail-fast: false
20
+ matrix:
21
+ python-version: [3.7, 3.8, 3.9, "3.10"]
22
+ experimental: [false]
23
+ steps:
24
+ - uses: actions/checkout@v2
25
+ - name: Set up Python ${{ matrix.python-version }}
26
+ uses: coqui-ai/setup-python@pip-cache-key-py-ver
27
+ with:
28
+ python-version: ${{ matrix.python-version }}
29
+ architecture: x64
30
+ cache: 'pip'
31
+ cache-dependency-path: 'requirements*'
32
+ - name: check OS
33
+ run: cat /etc/os-release
34
+ - name: Install dependencies
35
+ run: |
36
+ sudo apt-get update
37
+ sudo apt-get install -y git make gcc
38
+ sudo apt-get install espeak espeak-ng
39
+ make system-deps
40
+ - name: Install/upgrade Python setup deps
41
+ run: python3 -m pip install --upgrade pip setuptools wheel
42
+ - name: Replace scarf urls
43
+ run: |
44
+ sed -i 's/https:\/\/coqui.gateway.scarf.sh\//https:\/\/github.com\/coqui-ai\/TTS\/releases\/download\//g' TTS/.models.json
45
+ - name: Install TTS
46
+ run: |
47
+ python3 -m pip install .[all]
48
+ python3 setup.py egg_info
49
+ - name: Unit tests
50
+ run: make test_zoo
Indic-TTS/TTS/.gitignore ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ WadaSNR/
2
+ .idea/
3
+ *.pyc
4
+ .DS_Store
5
+ ./__init__.py
6
+ # Byte-compiled / optimized / DLL files
7
+ __pycache__/
8
+ *.py[cod]
9
+ *$py.class
10
+
11
+ # C extensions
12
+ *.so
13
+
14
+ # Distribution / packaging
15
+ .Python
16
+ build/
17
+ develop-eggs/
18
+ dist/
19
+ downloads/
20
+ eggs/
21
+ .eggs/
22
+ lib/
23
+ lib64/
24
+ parts/
25
+ sdist/
26
+ var/
27
+ wheels/
28
+ *.egg-info/
29
+ .installed.cfg
30
+ *.egg
31
+ MANIFEST
32
+
33
+ # PyInstaller
34
+ # Usually these files are written by a python script from a template
35
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
36
+ *.manifest
37
+ *.spec
38
+
39
+ # Installer logs
40
+ pip-log.txt
41
+ pip-delete-this-directory.txt
42
+
43
+ # Unit test / coverage reports
44
+ htmlcov/
45
+ .tox/
46
+ .coverage
47
+ .coverage.*
48
+ .cache
49
+ nosetests.xml
50
+ coverage.xml
51
+ *.cover
52
+ .hypothesis/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ .static_storage/
61
+ .media/
62
+ local_settings.py
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # Jupyter Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # pyenv
81
+ .python-version
82
+
83
+ # celery beat schedule file
84
+ celerybeat-schedule
85
+
86
+ # SageMath parsed files
87
+ *.sage.py
88
+
89
+ # Environments
90
+ .env
91
+ .venv
92
+ env/
93
+ venv/
94
+ ENV/
95
+ env.bak/
96
+ venv.bak/
97
+
98
+ # Spyder project settings
99
+ .spyderproject
100
+ .spyproject
101
+
102
+ # Rope project settings
103
+ .ropeproject
104
+
105
+ # mkdocs documentation
106
+ /site
107
+
108
+ # mypy
109
+ .mypy_cache/
110
+
111
+ # vim
112
+ *.swp
113
+ *.swm
114
+ *.swn
115
+ *.swo
116
+
117
+ # pytorch models
118
+ *.pth
119
+ *.pth.tar
120
+ !dummy_speakers.pth
121
+ result/
122
+
123
+ # setup.py
124
+ version.py
125
+
126
+ # jupyter dummy files
127
+ core
128
+
129
+ # ignore local datasets
130
+ recipes/WIP/*
131
+ recipes/ljspeech/LJSpeech-1.1/*
132
+ recipes/vctk/VCTK/*
133
+ recipes/**/*.npy
134
+ recipes/**/*.json
135
+ VCTK-Corpus-removed-silence/*
136
+
137
+ # ignore training logs
138
+ trainer_*_log.txt
139
+
140
+ # files used internally fro dev, test etc.
141
+ tests/outputs/*
142
+ tests/train_outputs/*
143
+ TODO.txt
144
+ .vscode/*
145
+ data/*
146
+ notebooks/data/*
147
+ TTS/tts/utils/monotonic_align/core.c
148
+ .vscode-upload.json
149
+ temp_build/*
150
+ events.out*
151
+ old_configs/*
152
+ model_importers/*
153
+ model_profiling/*
154
+ docs/source/TODO/*
155
+ .noseids
156
+ .dccache
157
+ log.txt
158
+ umap.png
159
+ *.out
160
+ SocialMedia.txt
161
+ output.wav
162
+ tts_output.wav
163
+ deps.json
164
+ speakers.json
165
+ internal/*
166
+ *_pitch.npy
167
+ *_phoneme.npy
168
+ wandb
169
+ depot/*
170
+ coqui_recipes/*
Indic-TTS/TTS/.pre-commit-config.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: 'https://github.com/pre-commit/pre-commit-hooks'
3
+ rev: v2.3.0
4
+ hooks:
5
+ - id: check-yaml
6
+ - id: end-of-file-fixer
7
+ - id: trailing-whitespace
8
+ - repo: 'https://github.com/psf/black'
9
+ rev: 20.8b1
10
+ hooks:
11
+ - id: black
12
+ language_version: python3
13
+ - repo: https://github.com/pycqa/isort
14
+ rev: 5.8.0
15
+ hooks:
16
+ - id: isort
17
+ name: isort (python)
18
+ - id: isort
19
+ name: isort (cython)
20
+ types: [cython]
21
+ - id: isort
22
+ name: isort (pyi)
23
+ types: [pyi]
24
+ - repo: https://github.com/pycqa/pylint
25
+ rev: v2.8.2
26
+ hooks:
27
+ - id: pylint
Indic-TTS/TTS/.pylintrc ADDED
@@ -0,0 +1,597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [MASTER]
2
+
3
+ # A comma-separated list of package or module names from where C extensions may
4
+ # be loaded. Extensions are loading into the active Python interpreter and may
5
+ # run arbitrary code.
6
+ extension-pkg-whitelist=
7
+
8
+ # Add files or directories to the blacklist. They should be base names, not
9
+ # paths.
10
+ ignore=CVS
11
+
12
+ # Add files or directories matching the regex patterns to the blacklist. The
13
+ # regex matches against base names, not paths.
14
+ ignore-patterns=
15
+
16
+ # Python code to execute, usually for sys.path manipulation such as
17
+ # pygtk.require().
18
+ #init-hook=
19
+
20
+ # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
21
+ # number of processors available to use.
22
+ jobs=1
23
+
24
+ # Control the amount of potential inferred values when inferring a single
25
+ # object. This can help the performance when dealing with large functions or
26
+ # complex, nested conditions.
27
+ limit-inference-results=100
28
+
29
+ # List of plugins (as comma separated values of python modules names) to load,
30
+ # usually to register additional checkers.
31
+ load-plugins=
32
+
33
+ # Pickle collected data for later comparisons.
34
+ persistent=yes
35
+
36
+ # Specify a configuration file.
37
+ #rcfile=
38
+
39
+ # When enabled, pylint would attempt to guess common misconfiguration and emit
40
+ # user-friendly hints instead of false-positive error messages.
41
+ suggestion-mode=yes
42
+
43
+ # Allow loading of arbitrary C extensions. Extensions are imported into the
44
+ # active Python interpreter and may run arbitrary code.
45
+ unsafe-load-any-extension=no
46
+
47
+
48
+ [MESSAGES CONTROL]
49
+
50
+ # Only show warnings with the listed confidence levels. Leave empty to show
51
+ # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED.
52
+ confidence=
53
+
54
+ # Disable the message, report, category or checker with the given id(s). You
55
+ # can either give multiple identifiers separated by comma (,) or put this
56
+ # option multiple times (only on the command line, not in the configuration
57
+ # file where it should appear only once). You can also use "--disable=all" to
58
+ # disable everything first and then reenable specific checks. For example, if
59
+ # you want to run only the similarities checker, you can use "--disable=all
60
+ # --enable=similarities". If you want to run only the classes checker, but have
61
+ # no Warning level messages displayed, use "--disable=all --enable=classes
62
+ # --disable=W".
63
+ disable=missing-docstring,
64
+ too-many-public-methods,
65
+ too-many-lines,
66
+ bare-except,
67
+ ## for avoiding weird p3.6 CI linter error
68
+ ## TODO: see later if we can remove this
69
+ assigning-non-slot,
70
+ unsupported-assignment-operation,
71
+ ## end
72
+ line-too-long,
73
+ fixme,
74
+ wrong-import-order,
75
+ ungrouped-imports,
76
+ wrong-import-position,
77
+ import-error,
78
+ invalid-name,
79
+ too-many-instance-attributes,
80
+ arguments-differ,
81
+ arguments-renamed,
82
+ no-name-in-module,
83
+ no-member,
84
+ unsubscriptable-object,
85
+ print-statement,
86
+ parameter-unpacking,
87
+ unpacking-in-except,
88
+ old-raise-syntax,
89
+ backtick,
90
+ long-suffix,
91
+ old-ne-operator,
92
+ old-octal-literal,
93
+ import-star-module-level,
94
+ non-ascii-bytes-literal,
95
+ raw-checker-failed,
96
+ bad-inline-option,
97
+ locally-disabled,
98
+ file-ignored,
99
+ suppressed-message,
100
+ useless-suppression,
101
+ deprecated-pragma,
102
+ use-symbolic-message-instead,
103
+ useless-object-inheritance,
104
+ too-few-public-methods,
105
+ too-many-branches,
106
+ too-many-arguments,
107
+ too-many-locals,
108
+ too-many-statements,
109
+ apply-builtin,
110
+ basestring-builtin,
111
+ buffer-builtin,
112
+ cmp-builtin,
113
+ coerce-builtin,
114
+ execfile-builtin,
115
+ file-builtin,
116
+ long-builtin,
117
+ raw_input-builtin,
118
+ reduce-builtin,
119
+ standarderror-builtin,
120
+ unicode-builtin,
121
+ xrange-builtin,
122
+ coerce-method,
123
+ delslice-method,
124
+ getslice-method,
125
+ setslice-method,
126
+ no-absolute-import,
127
+ old-division,
128
+ dict-iter-method,
129
+ dict-view-method,
130
+ next-method-called,
131
+ metaclass-assignment,
132
+ indexing-exception,
133
+ raising-string,
134
+ reload-builtin,
135
+ oct-method,
136
+ hex-method,
137
+ nonzero-method,
138
+ cmp-method,
139
+ input-builtin,
140
+ round-builtin,
141
+ intern-builtin,
142
+ unichr-builtin,
143
+ map-builtin-not-iterating,
144
+ zip-builtin-not-iterating,
145
+ range-builtin-not-iterating,
146
+ filter-builtin-not-iterating,
147
+ using-cmp-argument,
148
+ eq-without-hash,
149
+ div-method,
150
+ idiv-method,
151
+ rdiv-method,
152
+ exception-message-attribute,
153
+ invalid-str-codec,
154
+ sys-max-int,
155
+ bad-python3-import,
156
+ deprecated-string-function,
157
+ deprecated-str-translate-call,
158
+ deprecated-itertools-function,
159
+ deprecated-types-field,
160
+ next-method-defined,
161
+ dict-items-not-iterating,
162
+ dict-keys-not-iterating,
163
+ dict-values-not-iterating,
164
+ deprecated-operator-function,
165
+ deprecated-urllib-function,
166
+ xreadlines-attribute,
167
+ deprecated-sys-function,
168
+ exception-escape,
169
+ comprehension-escape,
170
+ duplicate-code,
171
+ not-callable,
172
+ import-outside-toplevel
173
+
174
+ # Enable the message, report, category or checker with the given id(s). You can
175
+ # either give multiple identifier separated by comma (,) or put this option
176
+ # multiple time (only on the command line, not in the configuration file where
177
+ # it should appear only once). See also the "--disable" option for examples.
178
+ enable=c-extension-no-member
179
+
180
+
181
+ [REPORTS]
182
+
183
+ # Python expression which should return a note less than 10 (10 is the highest
184
+ # note). You have access to the variables errors warning, statement which
185
+ # respectively contain the number of errors / warnings messages and the total
186
+ # number of statements analyzed. This is used by the global evaluation report
187
+ # (RP0004).
188
+ evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
189
+
190
+ # Template used to display messages. This is a python new-style format string
191
+ # used to format the message information. See doc for all details.
192
+ #msg-template=
193
+
194
+ # Set the output format. Available formats are text, parseable, colorized, json
195
+ # and msvs (visual studio). You can also give a reporter class, e.g.
196
+ # mypackage.mymodule.MyReporterClass.
197
+ output-format=text
198
+
199
+ # Tells whether to display a full report or only the messages.
200
+ reports=no
201
+
202
+ # Activate the evaluation score.
203
+ score=yes
204
+
205
+
206
+ [REFACTORING]
207
+
208
+ # Maximum number of nested blocks for function / method body
209
+ max-nested-blocks=5
210
+
211
+ # Complete name of functions that never returns. When checking for
212
+ # inconsistent-return-statements if a never returning function is called then
213
+ # it will be considered as an explicit return statement and no message will be
214
+ # printed.
215
+ never-returning-functions=sys.exit
216
+
217
+
218
+ [LOGGING]
219
+
220
+ # Format style used to check logging format string. `old` means using %
221
+ # formatting, while `new` is for `{}` formatting.
222
+ logging-format-style=old
223
+
224
+ # Logging modules to check that the string format arguments are in logging
225
+ # function parameter format.
226
+ logging-modules=logging
227
+
228
+
229
+ [SPELLING]
230
+
231
+ # Limits count of emitted suggestions for spelling mistakes.
232
+ max-spelling-suggestions=4
233
+
234
+ # Spelling dictionary name. Available dictionaries: none. To make it working
235
+ # install python-enchant package..
236
+ spelling-dict=
237
+
238
+ # List of comma separated words that should not be checked.
239
+ spelling-ignore-words=
240
+
241
+ # A path to a file that contains private dictionary; one word per line.
242
+ spelling-private-dict-file=
243
+
244
+ # Tells whether to store unknown words to indicated private dictionary in
245
+ # --spelling-private-dict-file option instead of raising a message.
246
+ spelling-store-unknown-words=no
247
+
248
+
249
+ [MISCELLANEOUS]
250
+
251
+ # List of note tags to take in consideration, separated by a comma.
252
+ notes=FIXME,
253
+ XXX,
254
+ TODO
255
+
256
+
257
+ [TYPECHECK]
258
+
259
+ # List of decorators that produce context managers, such as
260
+ # contextlib.contextmanager. Add to this list to register other decorators that
261
+ # produce valid context managers.
262
+ contextmanager-decorators=contextlib.contextmanager
263
+
264
+ # List of members which are set dynamically and missed by pylint inference
265
+ # system, and so shouldn't trigger E1101 when accessed. Python regular
266
+ # expressions are accepted.
267
+ generated-members=numpy.*,torch.*
268
+
269
+ # Tells whether missing members accessed in mixin class should be ignored. A
270
+ # mixin class is detected if its name ends with "mixin" (case insensitive).
271
+ ignore-mixin-members=yes
272
+
273
+ # Tells whether to warn about missing members when the owner of the attribute
274
+ # is inferred to be None.
275
+ ignore-none=yes
276
+
277
+ # This flag controls whether pylint should warn about no-member and similar
278
+ # checks whenever an opaque object is returned when inferring. The inference
279
+ # can return multiple potential results while evaluating a Python object, but
280
+ # some branches might not be evaluated, which results in partial inference. In
281
+ # that case, it might be useful to still emit no-member and other checks for
282
+ # the rest of the inferred objects.
283
+ ignore-on-opaque-inference=yes
284
+
285
+ # List of class names for which member attributes should not be checked (useful
286
+ # for classes with dynamically set attributes). This supports the use of
287
+ # qualified names.
288
+ ignored-classes=optparse.Values,thread._local,_thread._local
289
+
290
+ # List of module names for which member attributes should not be checked
291
+ # (useful for modules/projects where namespaces are manipulated during runtime
292
+ # and thus existing member attributes cannot be deduced by static analysis. It
293
+ # supports qualified module names, as well as Unix pattern matching.
294
+ ignored-modules=
295
+
296
+ # Show a hint with possible names when a member name was not found. The aspect
297
+ # of finding the hint is based on edit distance.
298
+ missing-member-hint=yes
299
+
300
+ # The minimum edit distance a name should have in order to be considered a
301
+ # similar match for a missing member name.
302
+ missing-member-hint-distance=1
303
+
304
+ # The total number of similar names that should be taken in consideration when
305
+ # showing a hint for a missing member.
306
+ missing-member-max-choices=1
307
+
308
+
309
+ [VARIABLES]
310
+
311
+ # List of additional names supposed to be defined in builtins. Remember that
312
+ # you should avoid defining new builtins when possible.
313
+ additional-builtins=
314
+
315
+ # Tells whether unused global variables should be treated as a violation.
316
+ allow-global-unused-variables=yes
317
+
318
+ # List of strings which can identify a callback function by name. A callback
319
+ # name must start or end with one of those strings.
320
+ callbacks=cb_,
321
+ _cb
322
+
323
+ # A regular expression matching the name of dummy variables (i.e. expected to
324
+ # not be used).
325
+ dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
326
+
327
+ # Argument names that match this expression will be ignored. Default to name
328
+ # with leading underscore.
329
+ ignored-argument-names=_.*|^ignored_|^unused_
330
+
331
+ # Tells whether we should check for unused import in __init__ files.
332
+ init-import=no
333
+
334
+ # List of qualified module names which can have objects that can redefine
335
+ # builtins.
336
+ redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
337
+
338
+
339
+ [FORMAT]
340
+
341
+ # Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
342
+ expected-line-ending-format=
343
+
344
+ # Regexp for a line that is allowed to be longer than the limit.
345
+ ignore-long-lines=^\s*(# )?<?https?://\S+>?$
346
+
347
+ # Number of spaces of indent required inside a hanging or continued line.
348
+ indent-after-paren=4
349
+
350
+ # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
351
+ # tab).
352
+ indent-string=' '
353
+
354
+ # Maximum number of characters on a single line.
355
+ max-line-length=120
356
+
357
+ # Maximum number of lines in a module.
358
+ max-module-lines=1000
359
+
360
+ # List of optional constructs for which whitespace checking is disabled. `dict-
361
+ # separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
362
+ # `trailing-comma` allows a space between comma and closing bracket: (a, ).
363
+ # `empty-line` allows space-only lines.
364
+ no-space-check=trailing-comma,
365
+ dict-separator
366
+
367
+ # Allow the body of a class to be on the same line as the declaration if body
368
+ # contains single statement.
369
+ single-line-class-stmt=no
370
+
371
+ # Allow the body of an if to be on the same line as the test if there is no
372
+ # else.
373
+ single-line-if-stmt=no
374
+
375
+
376
+ [SIMILARITIES]
377
+
378
+ # Ignore comments when computing similarities.
379
+ ignore-comments=yes
380
+
381
+ # Ignore docstrings when computing similarities.
382
+ ignore-docstrings=yes
383
+
384
+ # Ignore imports when computing similarities.
385
+ ignore-imports=no
386
+
387
+ # Minimum lines number of a similarity.
388
+ min-similarity-lines=4
389
+
390
+
391
+ [BASIC]
392
+
393
+ # Naming style matching correct argument names.
394
+ argument-naming-style=snake_case
395
+
396
+ # Regular expression matching correct argument names. Overrides argument-
397
+ # naming-style.
398
+ argument-rgx=[a-z_][a-z0-9_]{0,30}$
399
+
400
+ # Naming style matching correct attribute names.
401
+ attr-naming-style=snake_case
402
+
403
+ # Regular expression matching correct attribute names. Overrides attr-naming-
404
+ # style.
405
+ #attr-rgx=
406
+
407
+ # Bad variable names which should always be refused, separated by a comma.
408
+ bad-names=
409
+
410
+ # Naming style matching correct class attribute names.
411
+ class-attribute-naming-style=any
412
+
413
+ # Regular expression matching correct class attribute names. Overrides class-
414
+ # attribute-naming-style.
415
+ #class-attribute-rgx=
416
+
417
+ # Naming style matching correct class names.
418
+ class-naming-style=PascalCase
419
+
420
+ # Regular expression matching correct class names. Overrides class-naming-
421
+ # style.
422
+ #class-rgx=
423
+
424
+ # Naming style matching correct constant names.
425
+ const-naming-style=UPPER_CASE
426
+
427
+ # Regular expression matching correct constant names. Overrides const-naming-
428
+ # style.
429
+ #const-rgx=
430
+
431
+ # Minimum line length for functions/classes that require docstrings, shorter
432
+ # ones are exempt.
433
+ docstring-min-length=-1
434
+
435
+ # Naming style matching correct function names.
436
+ function-naming-style=snake_case
437
+
438
+ # Regular expression matching correct function names. Overrides function-
439
+ # naming-style.
440
+ #function-rgx=
441
+
442
+ # Good variable names which should always be accepted, separated by a comma.
443
+ good-names=i,
444
+ j,
445
+ k,
446
+ x,
447
+ ex,
448
+ Run,
449
+ _
450
+
451
+ # Include a hint for the correct naming format with invalid-name.
452
+ include-naming-hint=no
453
+
454
+ # Naming style matching correct inline iteration names.
455
+ inlinevar-naming-style=any
456
+
457
+ # Regular expression matching correct inline iteration names. Overrides
458
+ # inlinevar-naming-style.
459
+ #inlinevar-rgx=
460
+
461
+ # Naming style matching correct method names.
462
+ method-naming-style=snake_case
463
+
464
+ # Regular expression matching correct method names. Overrides method-naming-
465
+ # style.
466
+ #method-rgx=
467
+
468
+ # Naming style matching correct module names.
469
+ module-naming-style=snake_case
470
+
471
+ # Regular expression matching correct module names. Overrides module-naming-
472
+ # style.
473
+ #module-rgx=
474
+
475
+ # Colon-delimited sets of names that determine each other's naming style when
476
+ # the name regexes allow several styles.
477
+ name-group=
478
+
479
+ # Regular expression which should only match function or class names that do
480
+ # not require a docstring.
481
+ no-docstring-rgx=^_
482
+
483
+ # List of decorators that produce properties, such as abc.abstractproperty. Add
484
+ # to this list to register other decorators that produce valid properties.
485
+ # These decorators are taken in consideration only for invalid-name.
486
+ property-classes=abc.abstractproperty
487
+
488
+ # Naming style matching correct variable names.
489
+ variable-naming-style=snake_case
490
+
491
+ # Regular expression matching correct variable names. Overrides variable-
492
+ # naming-style.
493
+ variable-rgx=[a-z_][a-z0-9_]{0,30}$
494
+
495
+
496
+ [STRING]
497
+
498
+ # This flag controls whether the implicit-str-concat-in-sequence should
499
+ # generate a warning on implicit string concatenation in sequences defined over
500
+ # several lines.
501
+ check-str-concat-over-line-jumps=no
502
+
503
+
504
+ [IMPORTS]
505
+
506
+ # Allow wildcard imports from modules that define __all__.
507
+ allow-wildcard-with-all=no
508
+
509
+ # Analyse import fallback blocks. This can be used to support both Python 2 and
510
+ # 3 compatible code, which means that the block might have code that exists
511
+ # only in one or another interpreter, leading to false positives when analysed.
512
+ analyse-fallback-blocks=no
513
+
514
+ # Deprecated modules which should not be used, separated by a comma.
515
+ deprecated-modules=optparse,tkinter.tix
516
+
517
+ # Create a graph of external dependencies in the given file (report RP0402 must
518
+ # not be disabled).
519
+ ext-import-graph=
520
+
521
+ # Create a graph of every (i.e. internal and external) dependencies in the
522
+ # given file (report RP0402 must not be disabled).
523
+ import-graph=
524
+
525
+ # Create a graph of internal dependencies in the given file (report RP0402 must
526
+ # not be disabled).
527
+ int-import-graph=
528
+
529
+ # Force import order to recognize a module as part of the standard
530
+ # compatibility libraries.
531
+ known-standard-library=
532
+
533
+ # Force import order to recognize a module as part of a third party library.
534
+ known-third-party=enchant
535
+
536
+
537
+ [CLASSES]
538
+
539
+ # List of method names used to declare (i.e. assign) instance attributes.
540
+ defining-attr-methods=__init__,
541
+ __new__,
542
+ setUp
543
+
544
+ # List of member names, which should be excluded from the protected access
545
+ # warning.
546
+ exclude-protected=_asdict,
547
+ _fields,
548
+ _replace,
549
+ _source,
550
+ _make
551
+
552
+ # List of valid names for the first argument in a class method.
553
+ valid-classmethod-first-arg=cls
554
+
555
+ # List of valid names for the first argument in a metaclass class method.
556
+ valid-metaclass-classmethod-first-arg=cls
557
+
558
+
559
+ [DESIGN]
560
+
561
+ # Maximum number of arguments for function / method.
562
+ max-args=5
563
+
564
+ # Maximum number of attributes for a class (see R0902).
565
+ max-attributes=7
566
+
567
+ # Maximum number of boolean expressions in an if statement.
568
+ max-bool-expr=5
569
+
570
+ # Maximum number of branch for function / method body.
571
+ max-branches=12
572
+
573
+ # Maximum number of locals for function / method body.
574
+ max-locals=15
575
+
576
+ # Maximum number of parents for a class (see R0901).
577
+ max-parents=15
578
+
579
+ # Maximum number of public methods for a class (see R0904).
580
+ max-public-methods=20
581
+
582
+ # Maximum number of return / yield for function / method body.
583
+ max-returns=6
584
+
585
+ # Maximum number of statements in function / method body.
586
+ max-statements=50
587
+
588
+ # Minimum number of public methods for a class (see R0903).
589
+ min-public-methods=2
590
+
591
+
592
+ [EXCEPTIONS]
593
+
594
+ # Exceptions that will emit a warning when being caught. Defaults to
595
+ # "BaseException, Exception".
596
+ overgeneral-exceptions=BaseException,
597
+ Exception
Indic-TTS/TTS/.readthedocs.yml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # .readthedocs.yml
2
+ # Read the Docs configuration file
3
+ # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4
+
5
+ # Required
6
+ version: 2
7
+
8
+ # Build documentation in the docs/ directory with Sphinx
9
+ sphinx:
10
+ builder: html
11
+ configuration: docs/source/conf.py
12
+
13
+ # Optionally set the version of Python and requirements required to build your docs
14
+ python:
15
+ version: 3.7
16
+ install:
17
+ - requirements: docs/requirements.txt
18
+ - requirements: requirements.txt
Indic-TTS/TTS/CITATION.cff ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cff-version: 1.2.0
2
+ message: "If you want to cite 🐸💬, feel free to use this (but only if you loved it 😊)"
3
+ title: "Coqui TTS"
4
+ abstract: "A deep learning toolkit for Text-to-Speech, battle-tested in research and production"
5
+ date-released: 2021-01-01
6
+ authors:
7
+ - family-names: "Eren"
8
+ given-names: "Gölge"
9
+ - name: "The Coqui TTS Team"
10
+ version: 1.4
11
+ doi: 10.5281/zenodo.6334862
12
+ license: "MPL-2.0"
13
+ url: "https://www.coqui.ai"
14
+ repository-code: "https://github.com/coqui-ai/TTS"
15
+ keywords:
16
+ - machine learning
17
+ - deep learning
18
+ - artificial intelligence
19
+ - text to speech
20
+ - TTS
Indic-TTS/TTS/CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Contributor Covenant Code of Conduct
3
+
4
+ ## Our Pledge
5
+
6
+ We as members, contributors, and leaders pledge to make participation in our
7
+ community a harassment-free experience for everyone, regardless of age, body
8
+ size, visible or invisible disability, ethnicity, sex characteristics, gender
9
+ identity and expression, level of experience, education, socio-economic status,
10
+ nationality, personal appearance, race, caste, color, religion, or sexual identity
11
+ and orientation.
12
+
13
+ We pledge to act and interact in ways that contribute to an open, welcoming,
14
+ diverse, inclusive, and healthy community.
15
+
16
+ ## Our Standards
17
+
18
+ Examples of behavior that contributes to a positive environment for our
19
+ community include:
20
+
21
+ * Demonstrating empathy and kindness toward other people
22
+ * Being respectful of differing opinions, viewpoints, and experiences
23
+ * Giving and gracefully accepting constructive feedback
24
+ * Accepting responsibility and apologizing to those affected by our mistakes,
25
+ and learning from the experience
26
+ * Focusing on what is best not just for us as individuals, but for the
27
+ overall community
28
+
29
+ Examples of unacceptable behavior include:
30
+
31
+ * The use of sexualized language or imagery, and sexual attention or
32
+ advances of any kind
33
+ * Trolling, insulting or derogatory comments, and personal or political attacks
34
+ * Public or private harassment
35
+ * Publishing others' private information, such as a physical or email
36
+ address, without their explicit permission
37
+ * Other conduct which could reasonably be considered inappropriate in a
38
+ professional setting
39
+
40
+ ## Enforcement Responsibilities
41
+
42
+ Community leaders are responsible for clarifying and enforcing our standards of
43
+ acceptable behavior and will take appropriate and fair corrective action in
44
+ response to any behavior that they deem inappropriate, threatening, offensive,
45
+ or harmful.
46
+
47
+ Community leaders have the right and responsibility to remove, edit, or reject
48
+ comments, commits, code, wiki edits, issues, and other contributions that are
49
+ not aligned to this Code of Conduct, and will communicate reasons for moderation
50
+ decisions when appropriate.
51
+
52
+ ## Scope
53
+
54
+ This Code of Conduct applies within all community spaces, and also applies when
55
+ an individual is officially representing the community in public spaces.
56
+ Examples of representing our community include using an official e-mail address,
57
+ posting via an official social media account, or acting as an appointed
58
+ representative at an online or offline event.
59
+
60
+ ## Enforcement
61
+
62
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
63
+ reported to the community leaders responsible for enforcement at
64
65
+ All complaints will be reviewed and investigated promptly and fairly.
66
+
67
+ All community leaders are obligated to respect the privacy and security of the
68
+ reporter of any incident.
69
+
70
+ ## Enforcement Guidelines
71
+
72
+ Community leaders will follow these Community Impact Guidelines in determining
73
+ the consequences for any action they deem in violation of this Code of Conduct:
74
+
75
+ ### 1. Correction
76
+
77
+ **Community Impact**: Use of inappropriate language or other behavior deemed
78
+ unprofessional or unwelcome in the community.
79
+
80
+ **Consequence**: A private, written warning from community leaders, providing
81
+ clarity around the nature of the violation and an explanation of why the
82
+ behavior was inappropriate. A public apology may be requested.
83
+
84
+ ### 2. Warning
85
+
86
+ **Community Impact**: A violation through a single incident or series
87
+ of actions.
88
+
89
+ **Consequence**: A warning with consequences for continued behavior. No
90
+ interaction with the people involved, including unsolicited interaction with
91
+ those enforcing the Code of Conduct, for a specified period of time. This
92
+ includes avoiding interactions in community spaces as well as external channels
93
+ like social media. Violating these terms may lead to a temporary or
94
+ permanent ban.
95
+
96
+ ### 3. Temporary Ban
97
+
98
+ **Community Impact**: A serious violation of community standards, including
99
+ sustained inappropriate behavior.
100
+
101
+ **Consequence**: A temporary ban from any sort of interaction or public
102
+ communication with the community for a specified period of time. No public or
103
+ private interaction with the people involved, including unsolicited interaction
104
+ with those enforcing the Code of Conduct, is allowed during this period.
105
+ Violating these terms may lead to a permanent ban.
106
+
107
+ ### 4. Permanent Ban
108
+
109
+ **Community Impact**: Demonstrating a pattern of violation of community
110
+ standards, including sustained inappropriate behavior, harassment of an
111
+ individual, or aggression toward or disparagement of classes of individuals.
112
+
113
+ **Consequence**: A permanent ban from any sort of public interaction within
114
+ the community.
115
+
116
+ ## Attribution
117
+
118
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage],
119
+ version 2.0, available at
120
+ [https://www.contributor-covenant.org/version/2/0/code_of_conduct.html][v2.0].
121
+
122
+ Community Impact Guidelines were inspired by
123
+ [Mozilla's code of conduct enforcement ladder][Mozilla CoC].
124
+
125
+ For answers to common questions about this code of conduct, see the FAQ at
126
+ [https://www.contributor-covenant.org/faq][FAQ]. Translations are available
127
+ at [https://www.contributor-covenant.org/translations][translations].
128
+
129
+ [homepage]: https://www.contributor-covenant.org
130
+ [v2.0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct.html
131
+ [Mozilla CoC]: https://github.com/mozilla/diversity
132
+ [FAQ]: https://www.contributor-covenant.org/faq
133
+ [translations]: https://www.contributor-covenant.org/translations
Indic-TTS/TTS/CODE_OWNERS.rst ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ TTS code owners / governance system
2
+ ==========================================
3
+
4
+ TTS is run under a governance system inspired (and partially copied from) by the `Mozilla module ownership system <https://www.mozilla.org/about/governance/policies/module-ownership/>`_. The project is roughly divided into modules, and each module has its owners, which are responsible for reviewing pull requests and deciding on technical direction for their modules. Module ownership authority is given to people who have worked extensively on areas of the project.
5
+
6
+ Module owners also have the authority of naming other module owners or appointing module peers, which are people with authority to review pull requests in that module. They can also sub-divide their module into sub-modules with their owners.
7
+
8
+ Module owners are not tyrants. They are chartered to make decisions with input from the community and in the best interest of the community. Module owners are not required to make code changes or additions solely because the community wants them to do so. (Like anyone else, the module owners may write code because they want to, because their employers want them to, because the community wants them to, or for some other reason.) Module owners do need to pay attention to patches submitted to that module. However “pay attention” does not mean agreeing to every patch. Some patches may not make sense for the WebThings project; some may be poorly implemented. Module owners have the authority to decline a patch; this is a necessary part of the role. We ask the module owners to describe in the relevant issue their reasons for wanting changes to a patch, for declining it altogether, or for postponing review for some period. We don’t ask or expect them to rewrite patches to make them acceptable. Similarly, module owners may need to delay review of a promising patch due to an upcoming deadline. For example, a patch may be of interest, but not for the next milestone. In such a case it may make sense for the module owner to postpone review of a patch until after matters needed for a milestone have been finalized. Again, we expect this to be described in the relevant issue. And of course, it shouldn’t go on very often or for very long or escalation and review is likely.
9
+
10
+ The work of the various module owners and peers is overseen by the global owners, which are responsible for making final decisions in case there's conflict between owners as well as set the direction for the project as a whole.
11
+
12
+ This file describes module owners who are active on the project and which parts of the code they have expertise on (and interest in). If you're making changes to the code and are wondering who's an appropriate person to talk to, this list will tell you who to ping.
13
+
14
+ There's overlap in the areas of expertise of each owner, and in particular when looking at which files are covered by each area, there is a lot of overlap. Don't worry about getting it exactly right when requesting review, any code owner will be happy to redirect the request to a more appropriate person.
15
+
16
+ Global owners
17
+ ----------------
18
+
19
+ These are people who have worked on the project extensively and are familiar with all or most parts of it. Their expertise and review guidance is trusted by other code owners to cover their own areas of expertise. In case of conflicting opinions from other owners, global owners will make a final decision.
20
+
21
+ - Eren Gölge (@erogol)
22
+ - Reuben Morais (@reuben)
23
+
24
+ Training, feeding
25
+ -----------------
26
+
27
+ - Eren Gölge (@erogol)
28
+
29
+ Model exporting
30
+ ---------------
31
+
32
+ - Eren Gölge (@erogol)
33
+
34
+ Multi-Speaker TTS
35
+ -----------------
36
+
37
+ - Eren Gölge (@erogol)
38
+ - Edresson Casanova (@edresson)
39
+
40
+ TTS
41
+ ---
42
+
43
+ - Eren Gölge (@erogol)
44
+
45
+ Vocoders
46
+ --------
47
+
48
+ - Eren Gölge (@erogol)
49
+
50
+ Speaker Encoder
51
+ ---------------
52
+
53
+ - Eren Gölge (@erogol)
54
+
55
+ Testing & CI
56
+ ------------
57
+
58
+ - Eren Gölge (@erogol)
59
+ - Reuben Morais (@reuben)
60
+
61
+ Python bindings
62
+ ---------------
63
+
64
+ - Eren Gölge (@erogol)
65
+ - Reuben Morais (@reuben)
66
+
67
+ Documentation
68
+ -------------
69
+
70
+ - Eren Gölge (@erogol)
71
+
72
+ Third party bindings
73
+ --------------------
74
+
75
+ Owned by the author.
Indic-TTS/TTS/CONTRIBUTING.md ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contribution guidelines
2
+
3
+ Welcome to the 🐸TTS!
4
+
5
+ This repository is governed by [the Contributor Covenant Code of Conduct](https://github.com/coqui-ai/TTS/blob/main/CODE_OF_CONDUCT.md).
6
+
7
+ ## Where to start.
8
+ We welcome everyone who likes to contribute to 🐸TTS.
9
+
10
+ You can contribute not only with code but with bug reports, comments, questions, answers, or just a simple tweet to spread the word.
11
+
12
+ If you like to contribute code, squash a bug but if you don't know where to start, here are some pointers.
13
+
14
+ - [Development Road Map](https://github.com/coqui-ai/TTS/issues/378)
15
+
16
+ You can pick something out of our road map. We keep the progess of the project in this simple issue thread. It has new model proposals or developmental updates etc.
17
+
18
+ - [Github Issues Tracker](https://github.com/coqui-ai/TTS/issues)
19
+
20
+ This is a place to find feature requests, bugs.
21
+
22
+ Issues with the ```good first issue``` tag are good place for beginners to take on.
23
+
24
+ - ✨**PR**✨ [pages](https://github.com/coqui-ai/TTS/pulls) with the ```🚀new version``` tag.
25
+
26
+ We list all the target improvements for the next version. You can pick one of them and start contributing.
27
+
28
+ - Also feel free to suggest new features, ideas and models. We're always open for new things.
29
+
30
+ ## Call for sharing language models
31
+ If possible, please consider sharing your pre-trained models in any language (if the licences allow for you to do so). We will include them in our model catalogue for public use and give the proper attribution, whether it be your name, company, website or any other source specified.
32
+
33
+ This model can be shared in two ways:
34
+ 1. Share the model files with us and we serve them with the next 🐸 TTS release.
35
+ 2. Upload your models on GDrive and share the link.
36
+
37
+ Models are served under `.models.json` file and any model is available under TTS CLI or Server end points.
38
+
39
+ Either way you choose, please make sure you send the models [here](https://github.com/coqui-ai/TTS/issues/380).
40
+
41
+ ## Sending a ✨**PR**✨
42
+
43
+ If you have a new feature, a model to implement, or a bug to squash, go ahead and send a ✨**PR**✨.
44
+ Please use the following steps to send a ✨**PR**✨.
45
+ Let us know if you encounter a problem along the way.
46
+
47
+ The following steps are tested on an Ubuntu system.
48
+
49
+ 1. Fork 🐸TTS[https://github.com/coqui-ai/TTS] by clicking the fork button at the top right corner of the project page.
50
+
51
+ 2. Clone 🐸TTS and add the main repo as a new remote named ```upsteam```.
52
+
53
+ ```bash
54
+ $ git clone [email protected]:<your Github name>/TTS.git
55
+ $ cd TTS
56
+ $ git remote add upstream https://github.com/coqui-ai/TTS.git
57
+ ```
58
+
59
+ 3. Install 🐸TTS for development.
60
+
61
+ ```bash
62
+ $ make system-deps # intended to be used on Ubuntu (Debian). Let us know if you have a different OS.
63
+ $ make install
64
+ ```
65
+
66
+ 4. Create a new branch with an informative name for your goal.
67
+
68
+ ```bash
69
+ $ git checkout -b an_informative_name_for_my_branch
70
+ ```
71
+
72
+ 5. Implement your changes on your new branch.
73
+
74
+ 6. Explain your code using [Google Style](https://google.github.io/styleguide/pyguide.html#381-docstrings) docstrings.
75
+
76
+ 7. Add your tests to our test suite under ```tests``` folder. It is important to show that your code works, edge cases are considered, and inform others about the intended use.
77
+
78
+ 8. Run the tests to see how your updates work with the rest of the project. You can repeat this step multiple times as you implement your changes to make sure you are on the right direction.
79
+
80
+ ```bash
81
+ $ make test # stop at the first error
82
+ $ make test_all # run all the tests, report all the errors
83
+ ```
84
+
85
+ 9. Format your code. We use ```black``` for code and ```isort``` for ```import``` formatting.
86
+
87
+ ```bash
88
+ $ make style
89
+ ```
90
+
91
+ 10. Run the linter and correct the issues raised. We use ```pylint``` for linting. It helps to enforce a coding standard, offers simple refactoring suggestions.
92
+
93
+ ```bash
94
+ $ make lint
95
+ ```
96
+
97
+ 11. When things are good, add new files and commit your changes.
98
+
99
+ ```bash
100
+ $ git add my_file1.py my_file2.py ...
101
+ $ git commit
102
+ ```
103
+
104
+ It's a good practice to regularly sync your local copy of the project with the upstream code to keep up with the recent updates.
105
+
106
+ ```bash
107
+ $ git fetch upstream
108
+ $ git rebase upstream/master
109
+ # or for the development version
110
+ $ git rebase upstream/dev
111
+ ```
112
+
113
+ 12. Send a PR to ```dev``` branch.
114
+
115
+ Push your branch to your fork.
116
+
117
+ ```bash
118
+ $ git push -u origin an_informative_name_for_my_branch
119
+ ```
120
+
121
+ Then go to your fork's Github page and click on 'Pull request' to send your ✨**PR**✨.
122
+
123
+ Please set ✨**PR**✨'s target branch to ```dev``` as we use ```dev``` to work on the next version.
124
+
125
+ 13. Let's discuss until it is perfect. 💪
126
+
127
+ We might ask you for certain changes that would appear in the ✨**PR**✨'s page under 🐸TTS[https://github.com/coqui-ai/TTS/pulls].
128
+
129
+ 14. Once things look perfect, We merge it to the ```dev``` branch and make it ready for the next version.
130
+
131
+ Feel free to ping us at any step you need help using our communication channels.
132
+
133
+ If you are new to Github or open-source contribution, These are good resources.
134
+
135
+ - [Github Docs](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/proposing-changes-to-your-work-with-pull-requests)
136
+ - [First-Contribution](https://github.com/firstcontributions/first-contributions)
Indic-TTS/TTS/Dockerfile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG BASE=nvcr.io/nvidia/pytorch:22.03-py3
2
+ FROM ${BASE}
3
+ RUN apt-get update && apt-get install -y --no-install-recommends gcc g++ make python3 python3-dev python3-pip python3-venv python3-wheel espeak espeak-ng libsndfile1-dev && rm -rf /var/lib/apt/lists/*
4
+ RUN pip install llvmlite --ignore-installed
5
+
6
+ # Create and activate virtual env
7
+ ENV VIRTUAL_ENV=/venv
8
+ RUN python3 -m venv $VIRTUAL_ENV
9
+ ENV PATH="$VIRTUAL_ENV/bin:$PATH"
10
+ RUN pip install -U pip setuptools wheel
11
+
12
+ WORKDIR /root
13
+ COPY requirements.txt /root
14
+ COPY requirements.dev.txt /root
15
+ COPY requirements.notebooks.txt /root
16
+ RUN ["/bin/bash", "-c", "pip install -r <(cat requirements.txt requirements.dev.txt requirements.notebooks.txt)"]
17
+ COPY . /root
18
+ RUN make install
19
+ ENTRYPOINT ["tts"]
20
+ CMD ["--help"]
Indic-TTS/TTS/LICENSE.txt ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Mozilla Public License Version 2.0
2
+ ==================================
3
+
4
+ 1. Definitions
5
+ --------------
6
+
7
+ 1.1. "Contributor"
8
+ means each individual or legal entity that creates, contributes to
9
+ the creation of, or owns Covered Software.
10
+
11
+ 1.2. "Contributor Version"
12
+ means the combination of the Contributions of others (if any) used
13
+ by a Contributor and that particular Contributor's Contribution.
14
+
15
+ 1.3. "Contribution"
16
+ means Covered Software of a particular Contributor.
17
+
18
+ 1.4. "Covered Software"
19
+ means Source Code Form to which the initial Contributor has attached
20
+ the notice in Exhibit A, the Executable Form of such Source Code
21
+ Form, and Modifications of such Source Code Form, in each case
22
+ including portions thereof.
23
+
24
+ 1.5. "Incompatible With Secondary Licenses"
25
+ means
26
+
27
+ (a) that the initial Contributor has attached the notice described
28
+ in Exhibit B to the Covered Software; or
29
+
30
+ (b) that the Covered Software was made available under the terms of
31
+ version 1.1 or earlier of the License, but not also under the
32
+ terms of a Secondary License.
33
+
34
+ 1.6. "Executable Form"
35
+ means any form of the work other than Source Code Form.
36
+
37
+ 1.7. "Larger Work"
38
+ means a work that combines Covered Software with other material, in
39
+ a separate file or files, that is not Covered Software.
40
+
41
+ 1.8. "License"
42
+ means this document.
43
+
44
+ 1.9. "Licensable"
45
+ means having the right to grant, to the maximum extent possible,
46
+ whether at the time of the initial grant or subsequently, any and
47
+ all of the rights conveyed by this License.
48
+
49
+ 1.10. "Modifications"
50
+ means any of the following:
51
+
52
+ (a) any file in Source Code Form that results from an addition to,
53
+ deletion from, or modification of the contents of Covered
54
+ Software; or
55
+
56
+ (b) any new file in Source Code Form that contains any Covered
57
+ Software.
58
+
59
+ 1.11. "Patent Claims" of a Contributor
60
+ means any patent claim(s), including without limitation, method,
61
+ process, and apparatus claims, in any patent Licensable by such
62
+ Contributor that would be infringed, but for the grant of the
63
+ License, by the making, using, selling, offering for sale, having
64
+ made, import, or transfer of either its Contributions or its
65
+ Contributor Version.
66
+
67
+ 1.12. "Secondary License"
68
+ means either the GNU General Public License, Version 2.0, the GNU
69
+ Lesser General Public License, Version 2.1, the GNU Affero General
70
+ Public License, Version 3.0, or any later versions of those
71
+ licenses.
72
+
73
+ 1.13. "Source Code Form"
74
+ means the form of the work preferred for making modifications.
75
+
76
+ 1.14. "You" (or "Your")
77
+ means an individual or a legal entity exercising rights under this
78
+ License. For legal entities, "You" includes any entity that
79
+ controls, is controlled by, or is under common control with You. For
80
+ purposes of this definition, "control" means (a) the power, direct
81
+ or indirect, to cause the direction or management of such entity,
82
+ whether by contract or otherwise, or (b) ownership of more than
83
+ fifty percent (50%) of the outstanding shares or beneficial
84
+ ownership of such entity.
85
+
86
+ 2. License Grants and Conditions
87
+ --------------------------------
88
+
89
+ 2.1. Grants
90
+
91
+ Each Contributor hereby grants You a world-wide, royalty-free,
92
+ non-exclusive license:
93
+
94
+ (a) under intellectual property rights (other than patent or trademark)
95
+ Licensable by such Contributor to use, reproduce, make available,
96
+ modify, display, perform, distribute, and otherwise exploit its
97
+ Contributions, either on an unmodified basis, with Modifications, or
98
+ as part of a Larger Work; and
99
+
100
+ (b) under Patent Claims of such Contributor to make, use, sell, offer
101
+ for sale, have made, import, and otherwise transfer either its
102
+ Contributions or its Contributor Version.
103
+
104
+ 2.2. Effective Date
105
+
106
+ The licenses granted in Section 2.1 with respect to any Contribution
107
+ become effective for each Contribution on the date the Contributor first
108
+ distributes such Contribution.
109
+
110
+ 2.3. Limitations on Grant Scope
111
+
112
+ The licenses granted in this Section 2 are the only rights granted under
113
+ this License. No additional rights or licenses will be implied from the
114
+ distribution or licensing of Covered Software under this License.
115
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
116
+ Contributor:
117
+
118
+ (a) for any code that a Contributor has removed from Covered Software;
119
+ or
120
+
121
+ (b) for infringements caused by: (i) Your and any other third party's
122
+ modifications of Covered Software, or (ii) the combination of its
123
+ Contributions with other software (except as part of its Contributor
124
+ Version); or
125
+
126
+ (c) under Patent Claims infringed by Covered Software in the absence of
127
+ its Contributions.
128
+
129
+ This License does not grant any rights in the trademarks, service marks,
130
+ or logos of any Contributor (except as may be necessary to comply with
131
+ the notice requirements in Section 3.4).
132
+
133
+ 2.4. Subsequent Licenses
134
+
135
+ No Contributor makes additional grants as a result of Your choice to
136
+ distribute the Covered Software under a subsequent version of this
137
+ License (see Section 10.2) or under the terms of a Secondary License (if
138
+ permitted under the terms of Section 3.3).
139
+
140
+ 2.5. Representation
141
+
142
+ Each Contributor represents that the Contributor believes its
143
+ Contributions are its original creation(s) or it has sufficient rights
144
+ to grant the rights to its Contributions conveyed by this License.
145
+
146
+ 2.6. Fair Use
147
+
148
+ This License is not intended to limit any rights You have under
149
+ applicable copyright doctrines of fair use, fair dealing, or other
150
+ equivalents.
151
+
152
+ 2.7. Conditions
153
+
154
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
155
+ in Section 2.1.
156
+
157
+ 3. Responsibilities
158
+ -------------------
159
+
160
+ 3.1. Distribution of Source Form
161
+
162
+ All distribution of Covered Software in Source Code Form, including any
163
+ Modifications that You create or to which You contribute, must be under
164
+ the terms of this License. You must inform recipients that the Source
165
+ Code Form of the Covered Software is governed by the terms of this
166
+ License, and how they can obtain a copy of this License. You may not
167
+ attempt to alter or restrict the recipients' rights in the Source Code
168
+ Form.
169
+
170
+ 3.2. Distribution of Executable Form
171
+
172
+ If You distribute Covered Software in Executable Form then:
173
+
174
+ (a) such Covered Software must also be made available in Source Code
175
+ Form, as described in Section 3.1, and You must inform recipients of
176
+ the Executable Form how they can obtain a copy of such Source Code
177
+ Form by reasonable means in a timely manner, at a charge no more
178
+ than the cost of distribution to the recipient; and
179
+
180
+ (b) You may distribute such Executable Form under the terms of this
181
+ License, or sublicense it under different terms, provided that the
182
+ license for the Executable Form does not attempt to limit or alter
183
+ the recipients' rights in the Source Code Form under this License.
184
+
185
+ 3.3. Distribution of a Larger Work
186
+
187
+ You may create and distribute a Larger Work under terms of Your choice,
188
+ provided that You also comply with the requirements of this License for
189
+ the Covered Software. If the Larger Work is a combination of Covered
190
+ Software with a work governed by one or more Secondary Licenses, and the
191
+ Covered Software is not Incompatible With Secondary Licenses, this
192
+ License permits You to additionally distribute such Covered Software
193
+ under the terms of such Secondary License(s), so that the recipient of
194
+ the Larger Work may, at their option, further distribute the Covered
195
+ Software under the terms of either this License or such Secondary
196
+ License(s).
197
+
198
+ 3.4. Notices
199
+
200
+ You may not remove or alter the substance of any license notices
201
+ (including copyright notices, patent notices, disclaimers of warranty,
202
+ or limitations of liability) contained within the Source Code Form of
203
+ the Covered Software, except that You may alter any license notices to
204
+ the extent required to remedy known factual inaccuracies.
205
+
206
+ 3.5. Application of Additional Terms
207
+
208
+ You may choose to offer, and to charge a fee for, warranty, support,
209
+ indemnity or liability obligations to one or more recipients of Covered
210
+ Software. However, You may do so only on Your own behalf, and not on
211
+ behalf of any Contributor. You must make it absolutely clear that any
212
+ such warranty, support, indemnity, or liability obligation is offered by
213
+ You alone, and You hereby agree to indemnify every Contributor for any
214
+ liability incurred by such Contributor as a result of warranty, support,
215
+ indemnity or liability terms You offer. You may include additional
216
+ disclaimers of warranty and limitations of liability specific to any
217
+ jurisdiction.
218
+
219
+ 4. Inability to Comply Due to Statute or Regulation
220
+ ---------------------------------------------------
221
+
222
+ If it is impossible for You to comply with any of the terms of this
223
+ License with respect to some or all of the Covered Software due to
224
+ statute, judicial order, or regulation then You must: (a) comply with
225
+ the terms of this License to the maximum extent possible; and (b)
226
+ describe the limitations and the code they affect. Such description must
227
+ be placed in a text file included with all distributions of the Covered
228
+ Software under this License. Except to the extent prohibited by statute
229
+ or regulation, such description must be sufficiently detailed for a
230
+ recipient of ordinary skill to be able to understand it.
231
+
232
+ 5. Termination
233
+ --------------
234
+
235
+ 5.1. The rights granted under this License will terminate automatically
236
+ if You fail to comply with any of its terms. However, if You become
237
+ compliant, then the rights granted under this License from a particular
238
+ Contributor are reinstated (a) provisionally, unless and until such
239
+ Contributor explicitly and finally terminates Your grants, and (b) on an
240
+ ongoing basis, if such Contributor fails to notify You of the
241
+ non-compliance by some reasonable means prior to 60 days after You have
242
+ come back into compliance. Moreover, Your grants from a particular
243
+ Contributor are reinstated on an ongoing basis if such Contributor
244
+ notifies You of the non-compliance by some reasonable means, this is the
245
+ first time You have received notice of non-compliance with this License
246
+ from such Contributor, and You become compliant prior to 30 days after
247
+ Your receipt of the notice.
248
+
249
+ 5.2. If You initiate litigation against any entity by asserting a patent
250
+ infringement claim (excluding declaratory judgment actions,
251
+ counter-claims, and cross-claims) alleging that a Contributor Version
252
+ directly or indirectly infringes any patent, then the rights granted to
253
+ You by any and all Contributors for the Covered Software under Section
254
+ 2.1 of this License shall terminate.
255
+
256
+ 5.3. In the event of termination under Sections 5.1 or 5.2 above, all
257
+ end user license agreements (excluding distributors and resellers) which
258
+ have been validly granted by You or Your distributors under this License
259
+ prior to termination shall survive termination.
260
+
261
+ ************************************************************************
262
+ * *
263
+ * 6. Disclaimer of Warranty *
264
+ * ------------------------- *
265
+ * *
266
+ * Covered Software is provided under this License on an "as is" *
267
+ * basis, without warranty of any kind, either expressed, implied, or *
268
+ * statutory, including, without limitation, warranties that the *
269
+ * Covered Software is free of defects, merchantable, fit for a *
270
+ * particular purpose or non-infringing. The entire risk as to the *
271
+ * quality and performance of the Covered Software is with You. *
272
+ * Should any Covered Software prove defective in any respect, You *
273
+ * (not any Contributor) assume the cost of any necessary servicing, *
274
+ * repair, or correction. This disclaimer of warranty constitutes an *
275
+ * essential part of this License. No use of any Covered Software is *
276
+ * authorized under this License except under this disclaimer. *
277
+ * *
278
+ ************************************************************************
279
+
280
+ ************************************************************************
281
+ * *
282
+ * 7. Limitation of Liability *
283
+ * -------------------------- *
284
+ * *
285
+ * Under no circumstances and under no legal theory, whether tort *
286
+ * (including negligence), contract, or otherwise, shall any *
287
+ * Contributor, or anyone who distributes Covered Software as *
288
+ * permitted above, be liable to You for any direct, indirect, *
289
+ * special, incidental, or consequential damages of any character *
290
+ * including, without limitation, damages for lost profits, loss of *
291
+ * goodwill, work stoppage, computer failure or malfunction, or any *
292
+ * and all other commercial damages or losses, even if such party *
293
+ * shall have been informed of the possibility of such damages. This *
294
+ * limitation of liability shall not apply to liability for death or *
295
+ * personal injury resulting from such party's negligence to the *
296
+ * extent applicable law prohibits such limitation. Some *
297
+ * jurisdictions do not allow the exclusion or limitation of *
298
+ * incidental or consequential damages, so this exclusion and *
299
+ * limitation may not apply to You. *
300
+ * *
301
+ ************************************************************************
302
+
303
+ 8. Litigation
304
+ -------------
305
+
306
+ Any litigation relating to this License may be brought only in the
307
+ courts of a jurisdiction where the defendant maintains its principal
308
+ place of business and such litigation shall be governed by laws of that
309
+ jurisdiction, without reference to its conflict-of-law provisions.
310
+ Nothing in this Section shall prevent a party's ability to bring
311
+ cross-claims or counter-claims.
312
+
313
+ 9. Miscellaneous
314
+ ----------------
315
+
316
+ This License represents the complete agreement concerning the subject
317
+ matter hereof. If any provision of this License is held to be
318
+ unenforceable, such provision shall be reformed only to the extent
319
+ necessary to make it enforceable. Any law or regulation which provides
320
+ that the language of a contract shall be construed against the drafter
321
+ shall not be used to construe this License against a Contributor.
322
+
323
+ 10. Versions of the License
324
+ ---------------------------
325
+
326
+ 10.1. New Versions
327
+
328
+ Mozilla Foundation is the license steward. Except as provided in Section
329
+ 10.3, no one other than the license steward has the right to modify or
330
+ publish new versions of this License. Each version will be given a
331
+ distinguishing version number.
332
+
333
+ 10.2. Effect of New Versions
334
+
335
+ You may distribute the Covered Software under the terms of the version
336
+ of the License under which You originally received the Covered Software,
337
+ or under the terms of any subsequent version published by the license
338
+ steward.
339
+
340
+ 10.3. Modified Versions
341
+
342
+ If you create software not governed by this License, and you want to
343
+ create a new license for such software, you may create and use a
344
+ modified version of this License if you rename the license and remove
345
+ any references to the name of the license steward (except to note that
346
+ such modified license differs from this License).
347
+
348
+ 10.4. Distributing Source Code Form that is Incompatible With Secondary
349
+ Licenses
350
+
351
+ If You choose to distribute Source Code Form that is Incompatible With
352
+ Secondary Licenses under the terms of this version of the License, the
353
+ notice described in Exhibit B of this License must be attached.
354
+
355
+ Exhibit A - Source Code Form License Notice
356
+ -------------------------------------------
357
+
358
+ This Source Code Form is subject to the terms of the Mozilla Public
359
+ License, v. 2.0. If a copy of the MPL was not distributed with this
360
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
361
+
362
+ If it is not possible or desirable to put the notice in a particular
363
+ file, then You may include the notice in a location (such as a LICENSE
364
+ file in a relevant directory) where a recipient would be likely to look
365
+ for such a notice.
366
+
367
+ You may add additional accurate notices of copyright ownership.
368
+
369
+ Exhibit B - "Incompatible With Secondary Licenses" Notice
370
+ ---------------------------------------------------------
371
+
372
+ This Source Code Form is "Incompatible With Secondary Licenses", as
373
+ defined by the Mozilla Public License, v. 2.0.
Indic-TTS/TTS/MANIFEST.in ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include README.md
2
+ include LICENSE.txt
3
+ include requirements.*.txt
4
+ include *.cff
5
+ include requirements.txt
6
+ include TTS/VERSION
7
+ recursive-include TTS *.json
8
+ recursive-include TTS *.html
9
+ recursive-include TTS *.png
10
+ recursive-include TTS *.md
11
+ recursive-include TTS *.py
12
+ recursive-include TTS *.pyx
13
+ recursive-include images *.png
14
+
Indic-TTS/TTS/Makefile ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .DEFAULT_GOAL := help
2
+ .PHONY: test system-deps dev-deps deps style lint install help docs
3
+
4
+ help:
5
+ @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
6
+
7
+ target_dirs := tests TTS notebooks recipes
8
+
9
+ test_all: ## run tests and don't stop on an error.
10
+ nose2 --with-coverage --coverage TTS tests
11
+ ./run_bash_tests.sh
12
+
13
+ test: ## run tests.
14
+ nose2 -F -v -B --with-coverage --coverage TTS tests
15
+
16
+ test_vocoder: ## run vocoder tests.
17
+ nose2 -F -v -B --with-coverage --coverage TTS tests.vocoder_tests
18
+
19
+ test_tts: ## run tts tests.
20
+ nose2 -F -v -B --with-coverage --coverage TTS tests.tts_tests
21
+
22
+ test_aux: ## run aux tests.
23
+ nose2 -F -v -B --with-coverage --coverage TTS tests.aux_tests
24
+ ./run_bash_tests.sh
25
+
26
+ test_zoo: ## run zoo tests.
27
+ nose2 -F -v -B --with-coverage --coverage TTS tests.zoo_tests
28
+
29
+ inference_tests: ## run inference tests.
30
+ nose2 -F -v -B --with-coverage --coverage TTS tests.inference_tests
31
+
32
+ data_tests: ## run data tests.
33
+ nose2 -F -v -B --with-coverage --coverage TTS tests.data_tests
34
+
35
+ test_text: ## run text tests.
36
+ nose2 -F -v -B --with-coverage --coverage TTS tests.text_tests
37
+
38
+ test_failed: ## only run tests failed the last time.
39
+ nose2 -F -v -B --with-coverage --coverage TTS tests
40
+
41
+ style: ## update code style.
42
+ black ${target_dirs}
43
+ isort ${target_dirs}
44
+
45
+ lint: ## run pylint linter.
46
+ pylint ${target_dirs}
47
+ black ${target_dirs} --check
48
+ isort ${target_dirs} --check-only
49
+
50
+ system-deps: ## install linux system deps
51
+ sudo apt-get install -y libsndfile1-dev
52
+
53
+ dev-deps: ## install development deps
54
+ pip install -r requirements.dev.txt
55
+
56
+ doc-deps: ## install docs dependencies
57
+ pip install -r docs/requirements.txt
58
+
59
+ build-docs: ## build the docs
60
+ cd docs && make clean && make build
61
+
62
+ hub-deps: ## install deps for torch hub use
63
+ pip install -r requirements.hub.txt
64
+
65
+ deps: ## install 🐸 requirements.
66
+ pip install -r requirements.txt
67
+
68
+ install: ## install 🐸 TTS for development.
69
+ pip install -e .[all]
70
+
71
+ docs: ## build the docs
72
+ $(MAKE) -C docs clean && $(MAKE) -C docs html
Indic-TTS/TTS/README.md ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # <img src="https://raw.githubusercontent.com/coqui-ai/TTS/main/images/coqui-log-green-TTS.png" height="56"/>
2
+
3
+ 🐸TTS is a library for advanced Text-to-Speech generation. It's built on the latest research, was designed to achieve the best trade-off among ease-of-training, speed and quality.
4
+ 🐸TTS comes with pretrained models, tools for measuring dataset quality and already used in **20+ languages** for products and research projects.
5
+
6
+ [![Gitter](https://badges.gitter.im/coqui-ai/TTS.svg)](https://gitter.im/coqui-ai/TTS?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
7
+ [![License](<https://img.shields.io/badge/License-MPL%202.0-brightgreen.svg>)](https://opensource.org/licenses/MPL-2.0)
8
+ [![PyPI version](https://badge.fury.io/py/TTS.svg)](https://badge.fury.io/py/TTS)
9
+ [![Covenant](https://camo.githubusercontent.com/7d620efaa3eac1c5b060ece5d6aacfcc8b81a74a04d05cd0398689c01c4463bb/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f436f6e7472696275746f72253230436f76656e616e742d76322e3025323061646f707465642d6666363962342e737667)](https://github.com/coqui-ai/TTS/blob/master/CODE_OF_CONDUCT.md)
10
+ [![Downloads](https://pepy.tech/badge/tts)](https://pepy.tech/project/tts)
11
+ [![DOI](https://zenodo.org/badge/265612440.svg)](https://zenodo.org/badge/latestdoi/265612440)
12
+
13
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/aux_tests.yml/badge.svg)
14
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/data_tests.yml/badge.svg)
15
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/docker.yaml/badge.svg)
16
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/inference_tests.yml/badge.svg)
17
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/style_check.yml/badge.svg)
18
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/text_tests.yml/badge.svg)
19
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/tts_tests.yml/badge.svg)
20
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/vocoder_tests.yml/badge.svg)
21
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/zoo_tests.yml/badge.svg)
22
+ [![Docs](<https://readthedocs.org/projects/tts/badge/?version=latest&style=plastic>)](https://tts.readthedocs.io/en/latest/)
23
+
24
+ 📰 [**Subscribe to 🐸Coqui.ai Newsletter**](https://coqui.ai/?subscription=true)
25
+
26
+ 📢 [English Voice Samples](https://erogol.github.io/ddc-samples/) and [SoundCloud playlist](https://soundcloud.com/user-565970875/pocket-article-wavernn-and-tacotron2)
27
+
28
+ 📄 [Text-to-Speech paper collection](https://github.com/erogol/TTS-papers)
29
+
30
+ <img src="https://static.scarf.sh/a.png?x-pxid=cf317fe7-2188-4721-bc01-124bb5d5dbb2" />
31
+
32
+ ## 💬 Where to ask questions
33
+ Please use our dedicated channels for questions and discussion. Help is much more valuable if it's shared publicly so that more people can benefit from it.
34
+
35
+ | Type | Platforms |
36
+ | ------------------------------- | --------------------------------------- |
37
+ | 🚨 **Bug Reports** | [GitHub Issue Tracker] |
38
+ | 🎁 **Feature Requests & Ideas** | [GitHub Issue Tracker] |
39
+ | 👩‍💻 **Usage Questions** | [Github Discussions] |
40
+ | 🗯 **General Discussion** | [Github Discussions] or [Gitter Room] |
41
+
42
+ [github issue tracker]: https://github.com/coqui-ai/tts/issues
43
+ [github discussions]: https://github.com/coqui-ai/TTS/discussions
44
+ [gitter room]: https://gitter.im/coqui-ai/TTS?utm_source=share-link&utm_medium=link&utm_campaign=share-link
45
+ [Tutorials and Examples]: https://github.com/coqui-ai/TTS/wiki/TTS-Notebooks-and-Tutorials
46
+
47
+
48
+ ## 🔗 Links and Resources
49
+ | Type | Links |
50
+ | ------------------------------- | --------------------------------------- |
51
+ | 💼 **Documentation** | [ReadTheDocs](https://tts.readthedocs.io/en/latest/)
52
+ | 💾 **Installation** | [TTS/README.md](https://github.com/coqui-ai/TTS/tree/dev#install-tts)|
53
+ | 👩‍💻 **Contributing** | [CONTRIBUTING.md](https://github.com/coqui-ai/TTS/blob/main/CONTRIBUTING.md)|
54
+ | 📌 **Road Map** | [Main Development Plans](https://github.com/coqui-ai/TTS/issues/378)
55
+ | 🚀 **Released Models** | [TTS Releases](https://github.com/coqui-ai/TTS/releases) and [Experimental Models](https://github.com/coqui-ai/TTS/wiki/Experimental-Released-Models)|
56
+
57
+ ## 🥇 TTS Performance
58
+ <p align="center"><img src="https://raw.githubusercontent.com/coqui-ai/TTS/main/images/TTS-performance.png" width="800" /></p>
59
+
60
+ Underlined "TTS*" and "Judy*" are 🐸TTS models
61
+ <!-- [Details...](https://github.com/coqui-ai/TTS/wiki/Mean-Opinion-Score-Results) -->
62
+
63
+ ## Features
64
+ - High-performance Deep Learning models for Text2Speech tasks.
65
+ - Text2Spec models (Tacotron, Tacotron2, Glow-TTS, SpeedySpeech).
66
+ - Speaker Encoder to compute speaker embeddings efficiently.
67
+ - Vocoder models (MelGAN, Multiband-MelGAN, GAN-TTS, ParallelWaveGAN, WaveGrad, WaveRNN)
68
+ - Fast and efficient model training.
69
+ - Detailed training logs on the terminal and Tensorboard.
70
+ - Support for Multi-speaker TTS.
71
+ - Efficient, flexible, lightweight but feature complete `Trainer API`.
72
+ - Released and ready-to-use models.
73
+ - Tools to curate Text2Speech datasets under```dataset_analysis```.
74
+ - Utilities to use and test your models.
75
+ - Modular (but not too much) code base enabling easy implementation of new ideas.
76
+
77
+ ## Implemented Models
78
+ ### Text-to-Spectrogram
79
+ - Tacotron: [paper](https://arxiv.org/abs/1703.10135)
80
+ - Tacotron2: [paper](https://arxiv.org/abs/1712.05884)
81
+ - Glow-TTS: [paper](https://arxiv.org/abs/2005.11129)
82
+ - Speedy-Speech: [paper](https://arxiv.org/abs/2008.03802)
83
+ - Align-TTS: [paper](https://arxiv.org/abs/2003.01950)
84
+ - FastPitch: [paper](https://arxiv.org/pdf/2006.06873.pdf)
85
+ - FastSpeech: [paper](https://arxiv.org/abs/1905.09263)
86
+
87
+ ### End-to-End Models
88
+ - VITS: [paper](https://arxiv.org/pdf/2106.06103)
89
+
90
+ ### Attention Methods
91
+ - Guided Attention: [paper](https://arxiv.org/abs/1710.08969)
92
+ - Forward Backward Decoding: [paper](https://arxiv.org/abs/1907.09006)
93
+ - Graves Attention: [paper](https://arxiv.org/abs/1910.10288)
94
+ - Double Decoder Consistency: [blog](https://erogol.com/solving-attention-problems-of-tts-models-with-double-decoder-consistency/)
95
+ - Dynamic Convolutional Attention: [paper](https://arxiv.org/pdf/1910.10288.pdf)
96
+ - Alignment Network: [paper](https://arxiv.org/abs/2108.10447)
97
+
98
+ ### Speaker Encoder
99
+ - GE2E: [paper](https://arxiv.org/abs/1710.10467)
100
+ - Angular Loss: [paper](https://arxiv.org/pdf/2003.11982.pdf)
101
+
102
+ ### Vocoders
103
+ - MelGAN: [paper](https://arxiv.org/abs/1910.06711)
104
+ - MultiBandMelGAN: [paper](https://arxiv.org/abs/2005.05106)
105
+ - ParallelWaveGAN: [paper](https://arxiv.org/abs/1910.11480)
106
+ - GAN-TTS discriminators: [paper](https://arxiv.org/abs/1909.11646)
107
+ - WaveRNN: [origin](https://github.com/fatchord/WaveRNN/)
108
+ - WaveGrad: [paper](https://arxiv.org/abs/2009.00713)
109
+ - HiFiGAN: [paper](https://arxiv.org/abs/2010.05646)
110
+ - UnivNet: [paper](https://arxiv.org/abs/2106.07889)
111
+
112
+ You can also help us implement more models.
113
+
114
+ ## Install TTS
115
+ 🐸TTS is tested on Ubuntu 18.04 with **python >= 3.7, < 3.11.**.
116
+
117
+ If you are only interested in [synthesizing speech](https://tts.readthedocs.io/en/latest/inference.html) with the released 🐸TTS models, installing from PyPI is the easiest option.
118
+
119
+ ```bash
120
+ pip install TTS
121
+ ```
122
+
123
+ If you plan to code or train models, clone 🐸TTS and install it locally.
124
+
125
+ ```bash
126
+ git clone https://github.com/coqui-ai/TTS
127
+ pip install -e .[all,dev,notebooks] # Select the relevant extras
128
+ ```
129
+
130
+ If you are on Ubuntu (Debian), you can also run following commands for installation.
131
+
132
+ ```bash
133
+ $ make system-deps # intended to be used on Ubuntu (Debian). Let us know if you have a diffent OS.
134
+ $ make install
135
+ ```
136
+
137
+ If you are on Windows, 👑@GuyPaddock wrote installation instructions [here](https://stackoverflow.com/questions/66726331/how-can-i-run-mozilla-tts-coqui-tts-training-with-cuda-on-a-windows-system).
138
+
139
+ ## Use TTS
140
+
141
+ ### Single Speaker Models
142
+
143
+ - List provided models:
144
+
145
+ ```
146
+ $ tts --list_models
147
+ ```
148
+
149
+ - Run TTS with default models:
150
+
151
+ ```
152
+ $ tts --text "Text for TTS"
153
+ ```
154
+
155
+ - Run a TTS model with its default vocoder model:
156
+
157
+ ```
158
+ $ tts --text "Text for TTS" --model_name "<language>/<dataset>/<model_name>
159
+ ```
160
+
161
+ - Run with specific TTS and vocoder models from the list:
162
+
163
+ ```
164
+ $ tts --text "Text for TTS" --model_name "<language>/<dataset>/<model_name>" --vocoder_name "<language>/<dataset>/<model_name>" --output_path
165
+ ```
166
+
167
+ - Run your own TTS model (Using Griffin-Lim Vocoder):
168
+
169
+ ```
170
+ $ tts --text "Text for TTS" --model_path path/to/model.pth --config_path path/to/config.json --out_path output/path/speech.wav
171
+ ```
172
+
173
+ - Run your own TTS and Vocoder models:
174
+ ```
175
+ $ tts --text "Text for TTS" --model_path path/to/config.json --config_path path/to/model.pth --out_path output/path/speech.wav
176
+ --vocoder_path path/to/vocoder.pth --vocoder_config_path path/to/vocoder_config.json
177
+ ```
178
+
179
+ ### Multi-speaker Models
180
+
181
+ - List the available speakers and choose as <speaker_id> among them:
182
+
183
+ ```
184
+ $ tts --model_name "<language>/<dataset>/<model_name>" --list_speaker_idxs
185
+ ```
186
+
187
+ - Run the multi-speaker TTS model with the target speaker ID:
188
+
189
+ ```
190
+ $ tts --text "Text for TTS." --out_path output/path/speech.wav --model_name "<language>/<dataset>/<model_name>" --speaker_idx <speaker_id>
191
+ ```
192
+
193
+ - Run your own multi-speaker TTS model:
194
+
195
+ ```
196
+ $ tts --text "Text for TTS" --out_path output/path/speech.wav --model_path path/to/config.json --config_path path/to/model.pth --speakers_file_path path/to/speaker.json --speaker_idx <speaker_id>
197
+ ```
198
+
199
+ ## Directory Structure
200
+ ```
201
+ |- notebooks/ (Jupyter Notebooks for model evaluation, parameter selection and data analysis.)
202
+ |- utils/ (common utilities.)
203
+ |- TTS
204
+ |- bin/ (folder for all the executables.)
205
+ |- train*.py (train your target model.)
206
+ |- distribute.py (train your TTS model using Multiple GPUs.)
207
+ |- compute_statistics.py (compute dataset statistics for normalization.)
208
+ |- ...
209
+ |- tts/ (text to speech models)
210
+ |- layers/ (model layer definitions)
211
+ |- models/ (model definitions)
212
+ |- utils/ (model specific utilities.)
213
+ |- speaker_encoder/ (Speaker Encoder models.)
214
+ |- (same)
215
+ |- vocoder/ (Vocoder models.)
216
+ |- (same)
217
+ ```
Indic-TTS/TTS/TTS.egg-info/PKG-INFO ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: TTS
3
+ Version: 0.7.1
4
+ Summary: Deep learning for Text to Speech by Coqui.
5
+ Home-page: https://github.com/coqui-ai/TTS
6
+ Author: Eren Gölge
7
+ Author-email: [email protected]
8
+ License: MPL-2.0
9
+ Project-URL: Documentation, https://github.com/coqui-ai/TTS/wiki
10
+ Project-URL: Tracker, https://github.com/coqui-ai/TTS/issues
11
+ Project-URL: Repository, https://github.com/coqui-ai/TTS
12
+ Project-URL: Discussions, https://github.com/coqui-ai/TTS/discussions
13
+ Classifier: Programming Language :: Python
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.7
16
+ Classifier: Programming Language :: Python :: 3.8
17
+ Classifier: Programming Language :: Python :: 3.9
18
+ Classifier: Programming Language :: Python :: 3.10
19
+ Classifier: Development Status :: 3 - Alpha
20
+ Classifier: Intended Audience :: Science/Research
21
+ Classifier: Intended Audience :: Developers
22
+ Classifier: Operating System :: POSIX :: Linux
23
+ Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
24
+ Classifier: Topic :: Software Development
25
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
26
+ Classifier: Topic :: Multimedia :: Sound/Audio :: Speech
27
+ Classifier: Topic :: Multimedia :: Sound/Audio
28
+ Classifier: Topic :: Multimedia
29
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
30
+ Requires-Python: >=3.7.0, <3.11
31
+ Description-Content-Type: text/markdown
32
+ Provides-Extra: all
33
+ Provides-Extra: dev
34
+ Provides-Extra: notebooks
35
+ License-File: LICENSE.txt
36
+
37
+ # <img src="https://raw.githubusercontent.com/coqui-ai/TTS/main/images/coqui-log-green-TTS.png" height="56"/>
38
+
39
+ 🐸TTS is a library for advanced Text-to-Speech generation. It's built on the latest research, was designed to achieve the best trade-off among ease-of-training, speed and quality.
40
+ 🐸TTS comes with pretrained models, tools for measuring dataset quality and already used in **20+ languages** for products and research projects.
41
+
42
+ [![Gitter](https://badges.gitter.im/coqui-ai/TTS.svg)](https://gitter.im/coqui-ai/TTS?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
43
+ [![License](<https://img.shields.io/badge/License-MPL%202.0-brightgreen.svg>)](https://opensource.org/licenses/MPL-2.0)
44
+ [![PyPI version](https://badge.fury.io/py/TTS.svg)](https://badge.fury.io/py/TTS)
45
+ [![Covenant](https://camo.githubusercontent.com/7d620efaa3eac1c5b060ece5d6aacfcc8b81a74a04d05cd0398689c01c4463bb/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f436f6e7472696275746f72253230436f76656e616e742d76322e3025323061646f707465642d6666363962342e737667)](https://github.com/coqui-ai/TTS/blob/master/CODE_OF_CONDUCT.md)
46
+ [![Downloads](https://pepy.tech/badge/tts)](https://pepy.tech/project/tts)
47
+ [![DOI](https://zenodo.org/badge/265612440.svg)](https://zenodo.org/badge/latestdoi/265612440)
48
+
49
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/aux_tests.yml/badge.svg)
50
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/data_tests.yml/badge.svg)
51
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/docker.yaml/badge.svg)
52
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/inference_tests.yml/badge.svg)
53
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/style_check.yml/badge.svg)
54
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/text_tests.yml/badge.svg)
55
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/tts_tests.yml/badge.svg)
56
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/vocoder_tests.yml/badge.svg)
57
+ ![GithubActions](https://github.com/coqui-ai/TTS/actions/workflows/zoo_tests.yml/badge.svg)
58
+ [![Docs](<https://readthedocs.org/projects/tts/badge/?version=latest&style=plastic>)](https://tts.readthedocs.io/en/latest/)
59
+
60
+ 📰 [**Subscribe to 🐸Coqui.ai Newsletter**](https://coqui.ai/?subscription=true)
61
+
62
+ 📢 [English Voice Samples](https://erogol.github.io/ddc-samples/) and [SoundCloud playlist](https://soundcloud.com/user-565970875/pocket-article-wavernn-and-tacotron2)
63
+
64
+ 📄 [Text-to-Speech paper collection](https://github.com/erogol/TTS-papers)
65
+
66
+ <img src="https://static.scarf.sh/a.png?x-pxid=cf317fe7-2188-4721-bc01-124bb5d5dbb2" />
67
+
68
+ ## 💬 Where to ask questions
69
+ Please use our dedicated channels for questions and discussion. Help is much more valuable if it's shared publicly so that more people can benefit from it.
70
+
71
+ | Type | Platforms |
72
+ | ------------------------------- | --------------------------------------- |
73
+ | 🚨 **Bug Reports** | [GitHub Issue Tracker] |
74
+ | 🎁 **Feature Requests & Ideas** | [GitHub Issue Tracker] |
75
+ | 👩‍💻 **Usage Questions** | [Github Discussions] |
76
+ | 🗯 **General Discussion** | [Github Discussions] or [Gitter Room] |
77
+
78
+ [github issue tracker]: https://github.com/coqui-ai/tts/issues
79
+ [github discussions]: https://github.com/coqui-ai/TTS/discussions
80
+ [gitter room]: https://gitter.im/coqui-ai/TTS?utm_source=share-link&utm_medium=link&utm_campaign=share-link
81
+ [Tutorials and Examples]: https://github.com/coqui-ai/TTS/wiki/TTS-Notebooks-and-Tutorials
82
+
83
+
84
+ ## 🔗 Links and Resources
85
+ | Type | Links |
86
+ | ------------------------------- | --------------------------------------- |
87
+ | 💼 **Documentation** | [ReadTheDocs](https://tts.readthedocs.io/en/latest/)
88
+ | 💾 **Installation** | [TTS/README.md](https://github.com/coqui-ai/TTS/tree/dev#install-tts)|
89
+ | 👩‍💻 **Contributing** | [CONTRIBUTING.md](https://github.com/coqui-ai/TTS/blob/main/CONTRIBUTING.md)|
90
+ | 📌 **Road Map** | [Main Development Plans](https://github.com/coqui-ai/TTS/issues/378)
91
+ | 🚀 **Released Models** | [TTS Releases](https://github.com/coqui-ai/TTS/releases) and [Experimental Models](https://github.com/coqui-ai/TTS/wiki/Experimental-Released-Models)|
92
+
93
+ ## 🥇 TTS Performance
94
+ <p align="center"><img src="https://raw.githubusercontent.com/coqui-ai/TTS/main/images/TTS-performance.png" width="800" /></p>
95
+
96
+ Underlined "TTS*" and "Judy*" are 🐸TTS models
97
+ <!-- [Details...](https://github.com/coqui-ai/TTS/wiki/Mean-Opinion-Score-Results) -->
98
+
99
+ ## Features
100
+ - High-performance Deep Learning models for Text2Speech tasks.
101
+ - Text2Spec models (Tacotron, Tacotron2, Glow-TTS, SpeedySpeech).
102
+ - Speaker Encoder to compute speaker embeddings efficiently.
103
+ - Vocoder models (MelGAN, Multiband-MelGAN, GAN-TTS, ParallelWaveGAN, WaveGrad, WaveRNN)
104
+ - Fast and efficient model training.
105
+ - Detailed training logs on the terminal and Tensorboard.
106
+ - Support for Multi-speaker TTS.
107
+ - Efficient, flexible, lightweight but feature complete `Trainer API`.
108
+ - Released and ready-to-use models.
109
+ - Tools to curate Text2Speech datasets under```dataset_analysis```.
110
+ - Utilities to use and test your models.
111
+ - Modular (but not too much) code base enabling easy implementation of new ideas.
112
+
113
+ ## Implemented Models
114
+ ### Text-to-Spectrogram
115
+ - Tacotron: [paper](https://arxiv.org/abs/1703.10135)
116
+ - Tacotron2: [paper](https://arxiv.org/abs/1712.05884)
117
+ - Glow-TTS: [paper](https://arxiv.org/abs/2005.11129)
118
+ - Speedy-Speech: [paper](https://arxiv.org/abs/2008.03802)
119
+ - Align-TTS: [paper](https://arxiv.org/abs/2003.01950)
120
+ - FastPitch: [paper](https://arxiv.org/pdf/2006.06873.pdf)
121
+ - FastSpeech: [paper](https://arxiv.org/abs/1905.09263)
122
+
123
+ ### End-to-End Models
124
+ - VITS: [paper](https://arxiv.org/pdf/2106.06103)
125
+
126
+ ### Attention Methods
127
+ - Guided Attention: [paper](https://arxiv.org/abs/1710.08969)
128
+ - Forward Backward Decoding: [paper](https://arxiv.org/abs/1907.09006)
129
+ - Graves Attention: [paper](https://arxiv.org/abs/1910.10288)
130
+ - Double Decoder Consistency: [blog](https://erogol.com/solving-attention-problems-of-tts-models-with-double-decoder-consistency/)
131
+ - Dynamic Convolutional Attention: [paper](https://arxiv.org/pdf/1910.10288.pdf)
132
+ - Alignment Network: [paper](https://arxiv.org/abs/2108.10447)
133
+
134
+ ### Speaker Encoder
135
+ - GE2E: [paper](https://arxiv.org/abs/1710.10467)
136
+ - Angular Loss: [paper](https://arxiv.org/pdf/2003.11982.pdf)
137
+
138
+ ### Vocoders
139
+ - MelGAN: [paper](https://arxiv.org/abs/1910.06711)
140
+ - MultiBandMelGAN: [paper](https://arxiv.org/abs/2005.05106)
141
+ - ParallelWaveGAN: [paper](https://arxiv.org/abs/1910.11480)
142
+ - GAN-TTS discriminators: [paper](https://arxiv.org/abs/1909.11646)
143
+ - WaveRNN: [origin](https://github.com/fatchord/WaveRNN/)
144
+ - WaveGrad: [paper](https://arxiv.org/abs/2009.00713)
145
+ - HiFiGAN: [paper](https://arxiv.org/abs/2010.05646)
146
+ - UnivNet: [paper](https://arxiv.org/abs/2106.07889)
147
+
148
+ You can also help us implement more models.
149
+
150
+ ## Install TTS
151
+ 🐸TTS is tested on Ubuntu 18.04 with **python >= 3.7, < 3.11.**.
152
+
153
+ If you are only interested in [synthesizing speech](https://tts.readthedocs.io/en/latest/inference.html) with the released 🐸TTS models, installing from PyPI is the easiest option.
154
+
155
+ ```bash
156
+ pip install TTS
157
+ ```
158
+
159
+ If you plan to code or train models, clone 🐸TTS and install it locally.
160
+
161
+ ```bash
162
+ git clone https://github.com/coqui-ai/TTS
163
+ pip install -e .[all,dev,notebooks] # Select the relevant extras
164
+ ```
165
+
166
+ If you are on Ubuntu (Debian), you can also run following commands for installation.
167
+
168
+ ```bash
169
+ $ make system-deps # intended to be used on Ubuntu (Debian). Let us know if you have a diffent OS.
170
+ $ make install
171
+ ```
172
+
173
+ If you are on Windows, 👑@GuyPaddock wrote installation instructions [here](https://stackoverflow.com/questions/66726331/how-can-i-run-mozilla-tts-coqui-tts-training-with-cuda-on-a-windows-system).
174
+
175
+ ## Use TTS
176
+
177
+ ### Single Speaker Models
178
+
179
+ - List provided models:
180
+
181
+ ```
182
+ $ tts --list_models
183
+ ```
184
+
185
+ - Run TTS with default models:
186
+
187
+ ```
188
+ $ tts --text "Text for TTS"
189
+ ```
190
+
191
+ - Run a TTS model with its default vocoder model:
192
+
193
+ ```
194
+ $ tts --text "Text for TTS" --model_name "<language>/<dataset>/<model_name>
195
+ ```
196
+
197
+ - Run with specific TTS and vocoder models from the list:
198
+
199
+ ```
200
+ $ tts --text "Text for TTS" --model_name "<language>/<dataset>/<model_name>" --vocoder_name "<language>/<dataset>/<model_name>" --output_path
201
+ ```
202
+
203
+ - Run your own TTS model (Using Griffin-Lim Vocoder):
204
+
205
+ ```
206
+ $ tts --text "Text for TTS" --model_path path/to/model.pth --config_path path/to/config.json --out_path output/path/speech.wav
207
+ ```
208
+
209
+ - Run your own TTS and Vocoder models:
210
+ ```
211
+ $ tts --text "Text for TTS" --model_path path/to/config.json --config_path path/to/model.pth --out_path output/path/speech.wav
212
+ --vocoder_path path/to/vocoder.pth --vocoder_config_path path/to/vocoder_config.json
213
+ ```
214
+
215
+ ### Multi-speaker Models
216
+
217
+ - List the available speakers and choose as <speaker_id> among them:
218
+
219
+ ```
220
+ $ tts --model_name "<language>/<dataset>/<model_name>" --list_speaker_idxs
221
+ ```
222
+
223
+ - Run the multi-speaker TTS model with the target speaker ID:
224
+
225
+ ```
226
+ $ tts --text "Text for TTS." --out_path output/path/speech.wav --model_name "<language>/<dataset>/<model_name>" --speaker_idx <speaker_id>
227
+ ```
228
+
229
+ - Run your own multi-speaker TTS model:
230
+
231
+ ```
232
+ $ tts --text "Text for TTS" --out_path output/path/speech.wav --model_path path/to/config.json --config_path path/to/model.pth --speakers_file_path path/to/speaker.json --speaker_idx <speaker_id>
233
+ ```
234
+
235
+ ## Directory Structure
236
+ ```
237
+ |- notebooks/ (Jupyter Notebooks for model evaluation, parameter selection and data analysis.)
238
+ |- utils/ (common utilities.)
239
+ |- TTS
240
+ |- bin/ (folder for all the executables.)
241
+ |- train*.py (train your target model.)
242
+ |- distribute.py (train your TTS model using Multiple GPUs.)
243
+ |- compute_statistics.py (compute dataset statistics for normalization.)
244
+ |- ...
245
+ |- tts/ (text to speech models)
246
+ |- layers/ (model layer definitions)
247
+ |- models/ (model definitions)
248
+ |- utils/ (model specific utilities.)
249
+ |- speaker_encoder/ (Speaker Encoder models.)
250
+ |- (same)
251
+ |- vocoder/ (Vocoder models.)
252
+ |- (same)
253
+ ```
Indic-TTS/TTS/TTS.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CITATION.cff
2
+ LICENSE.txt
3
+ MANIFEST.in
4
+ README.md
5
+ pyproject.toml
6
+ requirements.dev.txt
7
+ requirements.notebooks.txt
8
+ requirements.txt
9
+ setup.cfg
10
+ setup.py
11
+ TTS/.models.json
12
+ TTS/VERSION
13
+ TTS/__init__.py
14
+ TTS/model.py
15
+ TTS.egg-info/PKG-INFO
16
+ TTS.egg-info/SOURCES.txt
17
+ TTS.egg-info/dependency_links.txt
18
+ TTS.egg-info/entry_points.txt
19
+ TTS.egg-info/not-zip-safe
20
+ TTS.egg-info/requires.txt
21
+ TTS.egg-info/top_level.txt
22
+ TTS/bin/__init__.py
23
+ TTS/bin/collect_env_info.py
24
+ TTS/bin/compute_attention_masks.py
25
+ TTS/bin/compute_embeddings.py
26
+ TTS/bin/compute_statistics.py
27
+ TTS/bin/eval_encoder.py
28
+ TTS/bin/extract_tts_spectrograms.py
29
+ TTS/bin/find_unique_chars.py
30
+ TTS/bin/find_unique_phonemes.py
31
+ TTS/bin/remove_silence_using_vad.py
32
+ TTS/bin/resample.py
33
+ TTS/bin/synthesize.py
34
+ TTS/bin/train_encoder.py
35
+ TTS/bin/train_tts.py
36
+ TTS/bin/train_vocoder.py
37
+ TTS/bin/tune_wavegrad.py
38
+ TTS/config/__init__.py
39
+ TTS/config/shared_configs.py
40
+ TTS/encoder/README.md
41
+ TTS/encoder/__init__.py
42
+ TTS/encoder/dataset.py
43
+ TTS/encoder/losses.py
44
+ TTS/encoder/configs/base_encoder_config.py
45
+ TTS/encoder/configs/emotion_encoder_config.py
46
+ TTS/encoder/configs/speaker_encoder_config.py
47
+ TTS/encoder/models/base_encoder.py
48
+ TTS/encoder/models/lstm.py
49
+ TTS/encoder/models/resnet.py
50
+ TTS/encoder/utils/__init__.py
51
+ TTS/encoder/utils/generic_utils.py
52
+ TTS/encoder/utils/io.py
53
+ TTS/encoder/utils/prepare_voxceleb.py
54
+ TTS/encoder/utils/samplers.py
55
+ TTS/encoder/utils/training.py
56
+ TTS/encoder/utils/visual.py
57
+ TTS/server/README.md
58
+ TTS/server/__init__.py
59
+ TTS/server/conf.json
60
+ TTS/server/server.py
61
+ TTS/server/static/coqui-log-green-TTS.png
62
+ TTS/server/templates/details.html
63
+ TTS/server/templates/index.html
64
+ TTS/tts/__init__.py
65
+ TTS/tts/configs/__init__.py
66
+ TTS/tts/configs/align_tts_config.py
67
+ TTS/tts/configs/fast_pitch_config.py
68
+ TTS/tts/configs/fast_speech_config.py
69
+ TTS/tts/configs/glow_tts_config.py
70
+ TTS/tts/configs/shared_configs.py
71
+ TTS/tts/configs/speedy_speech_config.py
72
+ TTS/tts/configs/tacotron2_config.py
73
+ TTS/tts/configs/tacotron_config.py
74
+ TTS/tts/configs/vits_config.py
75
+ TTS/tts/datasets/__init__.py
76
+ TTS/tts/datasets/dataset.py
77
+ TTS/tts/datasets/formatters.py
78
+ TTS/tts/layers/__init__.py
79
+ TTS/tts/layers/losses.py
80
+ TTS/tts/layers/align_tts/__init__.py
81
+ TTS/tts/layers/align_tts/duration_predictor.py
82
+ TTS/tts/layers/align_tts/mdn.py
83
+ TTS/tts/layers/feed_forward/__init__.py
84
+ TTS/tts/layers/feed_forward/decoder.py
85
+ TTS/tts/layers/feed_forward/duration_predictor.py
86
+ TTS/tts/layers/feed_forward/encoder.py
87
+ TTS/tts/layers/generic/__init__.py
88
+ TTS/tts/layers/generic/aligner.py
89
+ TTS/tts/layers/generic/gated_conv.py
90
+ TTS/tts/layers/generic/normalization.py
91
+ TTS/tts/layers/generic/pos_encoding.py
92
+ TTS/tts/layers/generic/res_conv_bn.py
93
+ TTS/tts/layers/generic/time_depth_sep_conv.py
94
+ TTS/tts/layers/generic/transformer.py
95
+ TTS/tts/layers/generic/wavenet.py
96
+ TTS/tts/layers/glow_tts/__init__.py
97
+ TTS/tts/layers/glow_tts/decoder.py
98
+ TTS/tts/layers/glow_tts/duration_predictor.py
99
+ TTS/tts/layers/glow_tts/encoder.py
100
+ TTS/tts/layers/glow_tts/glow.py
101
+ TTS/tts/layers/glow_tts/transformer.py
102
+ TTS/tts/layers/tacotron/__init__.py
103
+ TTS/tts/layers/tacotron/attentions.py
104
+ TTS/tts/layers/tacotron/capacitron_layers.py
105
+ TTS/tts/layers/tacotron/common_layers.py
106
+ TTS/tts/layers/tacotron/gst_layers.py
107
+ TTS/tts/layers/tacotron/tacotron.py
108
+ TTS/tts/layers/tacotron/tacotron2.py
109
+ TTS/tts/layers/vits/discriminator.py
110
+ TTS/tts/layers/vits/networks.py
111
+ TTS/tts/layers/vits/stochastic_duration_predictor.py
112
+ TTS/tts/layers/vits/transforms.py
113
+ TTS/tts/models/__init__.py
114
+ TTS/tts/models/align_tts.py
115
+ TTS/tts/models/base_tacotron.py
116
+ TTS/tts/models/base_tts.py
117
+ TTS/tts/models/forward_tts.py
118
+ TTS/tts/models/glow_tts.py
119
+ TTS/tts/models/tacotron.py
120
+ TTS/tts/models/tacotron2.py
121
+ TTS/tts/models/vits.py
122
+ TTS/tts/utils/__init__.py
123
+ TTS/tts/utils/data.py
124
+ TTS/tts/utils/helpers.py
125
+ TTS/tts/utils/languages.py
126
+ TTS/tts/utils/managers.py
127
+ TTS/tts/utils/measures.py
128
+ TTS/tts/utils/speakers.py
129
+ TTS/tts/utils/ssim.py
130
+ TTS/tts/utils/synthesis.py
131
+ TTS/tts/utils/visual.py
132
+ TTS/tts/utils/monotonic_align/__init__.py
133
+ TTS/tts/utils/monotonic_align/core.c
134
+ TTS/tts/utils/monotonic_align/core.pyx
135
+ TTS/tts/utils/monotonic_align/setup.py
136
+ TTS/tts/utils/text/__init__.py
137
+ TTS/tts/utils/text/characters.py
138
+ TTS/tts/utils/text/cleaners.py
139
+ TTS/tts/utils/text/cmudict.py
140
+ TTS/tts/utils/text/punctuation.py
141
+ TTS/tts/utils/text/tokenizer.py
142
+ TTS/tts/utils/text/chinese_mandarin/__init__.py
143
+ TTS/tts/utils/text/chinese_mandarin/numbers.py
144
+ TTS/tts/utils/text/chinese_mandarin/phonemizer.py
145
+ TTS/tts/utils/text/chinese_mandarin/pinyinToPhonemes.py
146
+ TTS/tts/utils/text/english/__init__.py
147
+ TTS/tts/utils/text/english/abbreviations.py
148
+ TTS/tts/utils/text/english/number_norm.py
149
+ TTS/tts/utils/text/english/time_norm.py
150
+ TTS/tts/utils/text/french/__init__.py
151
+ TTS/tts/utils/text/french/abbreviations.py
152
+ TTS/tts/utils/text/japanese/__init__.py
153
+ TTS/tts/utils/text/japanese/phonemizer.py
154
+ TTS/tts/utils/text/phonemizers/__init__.py
155
+ TTS/tts/utils/text/phonemizers/base.py
156
+ TTS/tts/utils/text/phonemizers/espeak_wrapper.py
157
+ TTS/tts/utils/text/phonemizers/gruut_wrapper.py
158
+ TTS/tts/utils/text/phonemizers/ja_jp_phonemizer.py
159
+ TTS/tts/utils/text/phonemizers/multi_phonemizer.py
160
+ TTS/tts/utils/text/phonemizers/zh_cn_phonemizer.py
161
+ TTS/utils/__init__.py
162
+ TTS/utils/audio.py
163
+ TTS/utils/callbacks.py
164
+ TTS/utils/capacitron_optimizer.py
165
+ TTS/utils/distribute.py
166
+ TTS/utils/download.py
167
+ TTS/utils/downloaders.py
168
+ TTS/utils/generic_utils.py
169
+ TTS/utils/io.py
170
+ TTS/utils/manage.py
171
+ TTS/utils/radam.py
172
+ TTS/utils/synthesizer.py
173
+ TTS/utils/training.py
174
+ TTS/utils/vad.py
175
+ TTS/vocoder/README.md
176
+ TTS/vocoder/__init__.py
177
+ TTS/vocoder/configs/__init__.py
178
+ TTS/vocoder/configs/fullband_melgan_config.py
179
+ TTS/vocoder/configs/hifigan_config.py
180
+ TTS/vocoder/configs/melgan_config.py
181
+ TTS/vocoder/configs/multiband_melgan_config.py
182
+ TTS/vocoder/configs/parallel_wavegan_config.py
183
+ TTS/vocoder/configs/shared_configs.py
184
+ TTS/vocoder/configs/univnet_config.py
185
+ TTS/vocoder/configs/wavegrad_config.py
186
+ TTS/vocoder/configs/wavernn_config.py
187
+ TTS/vocoder/datasets/__init__.py
188
+ TTS/vocoder/datasets/gan_dataset.py
189
+ TTS/vocoder/datasets/preprocess.py
190
+ TTS/vocoder/datasets/wavegrad_dataset.py
191
+ TTS/vocoder/datasets/wavernn_dataset.py
192
+ TTS/vocoder/layers/__init__.py
193
+ TTS/vocoder/layers/hifigan.py
194
+ TTS/vocoder/layers/losses.py
195
+ TTS/vocoder/layers/lvc_block.py
196
+ TTS/vocoder/layers/melgan.py
197
+ TTS/vocoder/layers/parallel_wavegan.py
198
+ TTS/vocoder/layers/pqmf.py
199
+ TTS/vocoder/layers/upsample.py
200
+ TTS/vocoder/layers/wavegrad.py
201
+ TTS/vocoder/models/__init__.py
202
+ TTS/vocoder/models/base_vocoder.py
203
+ TTS/vocoder/models/fullband_melgan_generator.py
204
+ TTS/vocoder/models/gan.py
205
+ TTS/vocoder/models/hifigan_discriminator.py
206
+ TTS/vocoder/models/hifigan_generator.py
207
+ TTS/vocoder/models/melgan_discriminator.py
208
+ TTS/vocoder/models/melgan_generator.py
209
+ TTS/vocoder/models/melgan_multiscale_discriminator.py
210
+ TTS/vocoder/models/multiband_melgan_generator.py
211
+ TTS/vocoder/models/parallel_wavegan_discriminator.py
212
+ TTS/vocoder/models/parallel_wavegan_generator.py
213
+ TTS/vocoder/models/random_window_discriminator.py
214
+ TTS/vocoder/models/univnet_discriminator.py
215
+ TTS/vocoder/models/univnet_generator.py
216
+ TTS/vocoder/models/wavegrad.py
217
+ TTS/vocoder/models/wavernn.py
218
+ TTS/vocoder/utils/__init__.py
219
+ TTS/vocoder/utils/distribution.py
220
+ TTS/vocoder/utils/generic_utils.py
221
+ images/TTS-performance.png
222
+ images/coqui-log-green-TTS.png
223
+ images/example_model_output.png
224
+ images/model.png
225
+ images/tts_performance.png
Indic-TTS/TTS/TTS.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
Indic-TTS/TTS/TTS.egg-info/entry_points.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [console_scripts]
2
+ tts = TTS.bin.synthesize:main
3
+ tts-server = TTS.server.server:main
Indic-TTS/TTS/TTS.egg-info/not-zip-safe ADDED
@@ -0,0 +1 @@
 
 
1
+
Indic-TTS/TTS/TTS.egg-info/requires.txt ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ numpy==1.21.6
2
+ cython==0.29.28
3
+ scipy>=1.4.0
4
+ torch>=1.7
5
+ torchaudio
6
+ soundfile
7
+ librosa==0.8.0
8
+ numba==0.55.1
9
+ inflect
10
+ tqdm
11
+ anyascii
12
+ pyyaml
13
+ fsspec>=2021.04.0
14
+ flask
15
+ pysbd
16
+ umap-learn==0.5.1
17
+ pandas
18
+ matplotlib
19
+ pyworld==0.2.10
20
+ trainer
21
+ coqpit>=0.0.16
22
+ jieba
23
+ pypinyin
24
+ mecab-python3==1.0.5
25
+ unidic-lite==1.0.8
26
+ gruut[cs,de,es,fr,it,nl,pt,ru,sv]==2.2.3
27
+
28
+ [all]
29
+ black
30
+ coverage
31
+ isort
32
+ nose2
33
+ pylint==2.10.2
34
+ bokeh==1.4.0
35
+
36
+ [dev]
37
+ black
38
+ coverage
39
+ isort
40
+ nose2
41
+ pylint==2.10.2
42
+
43
+ [notebooks]
44
+ bokeh==1.4.0
Indic-TTS/TTS/TTS.egg-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ TTS
Indic-TTS/TTS/TTS/.models.json ADDED
@@ -0,0 +1,500 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "tts_models": {
3
+ "multilingual":{
4
+ "multi-dataset":{
5
+ "your_tts":{
6
+ "description": "Your TTS model accompanying the paper https://arxiv.org/abs/2112.02418",
7
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--multilingual--multi-dataset--your_tts.zip",
8
+ "default_vocoder": null,
9
+ "commit": "e9a1953e",
10
+ "license": "CC BY-NC-ND 4.0",
11
+ "contact": "[email protected]"
12
+ }
13
+ }
14
+ },
15
+ "en": {
16
+ "ek1": {
17
+ "tacotron2": {
18
+ "description": "EK1 en-rp tacotron2 by NMStoker",
19
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--en--ek1--tacotron2.zip",
20
+ "default_vocoder": "vocoder_models/en/ek1/wavegrad",
21
+ "commit": "c802255",
22
+ "license": "apache 2.0"
23
+ }
24
+ },
25
+ "ljspeech": {
26
+ "tacotron2-DDC": {
27
+ "description": "Tacotron2 with Double Decoder Consistency.",
28
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--en--ljspeech--tacotron2-DDC.zip",
29
+ "default_vocoder": "vocoder_models/en/ljspeech/hifigan_v2",
30
+ "commit": "bae2ad0f",
31
+ "author": "Eren Gölge @erogol",
32
+ "license": "apache 2.0",
33
+ "contact": "[email protected]"
34
+ },
35
+ "tacotron2-DDC_ph": {
36
+ "description": "Tacotron2 with Double Decoder Consistency with phonemes.",
37
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--en--ljspeech--tacotron2-DDC_ph.zip",
38
+ "default_vocoder": "vocoder_models/en/ljspeech/univnet",
39
+ "commit": "3900448",
40
+ "author": "Eren Gölge @erogol",
41
+ "license": "apache 2.0",
42
+ "contact": "[email protected]"
43
+ },
44
+ "glow-tts": {
45
+ "description": "",
46
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--en--ljspeech--glow-tts.zip",
47
+ "stats_file": null,
48
+ "default_vocoder": "vocoder_models/en/ljspeech/multiband-melgan",
49
+ "commit": "",
50
+ "author": "Eren Gölge @erogol",
51
+ "license": "MPL",
52
+ "contact": "[email protected]"
53
+ },
54
+ "speedy-speech": {
55
+ "description": "Speedy Speech model trained on LJSpeech dataset using the Alignment Network for learning the durations.",
56
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--en--ljspeech--speedy-speech.zip",
57
+ "stats_file": null,
58
+ "default_vocoder": "vocoder_models/en/ljspeech/hifigan_v2",
59
+ "commit": "4581e3d",
60
+ "author": "Eren Gölge @erogol",
61
+ "license": "apache 2.0",
62
+ "contact": "[email protected]"
63
+ },
64
+ "tacotron2-DCA": {
65
+ "description": "",
66
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--en--ljspeech--tacotron2-DCA.zip",
67
+ "default_vocoder": "vocoder_models/en/ljspeech/multiband-melgan",
68
+ "commit": "",
69
+ "author": "Eren Gölge @erogol",
70
+ "license": "MPL",
71
+ "contact": "[email protected]"
72
+ },
73
+ "vits": {
74
+ "description": "VITS is an End2End TTS model trained on LJSpeech dataset with phonemes.",
75
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--en--ljspeech--vits.zip",
76
+ "default_vocoder": null,
77
+ "commit": "3900448",
78
+ "author": "Eren Gölge @erogol",
79
+ "license": "apache 2.0",
80
+ "contact": "[email protected]"
81
+ },
82
+ "fast_pitch": {
83
+ "description": "FastPitch model trained on LJSpeech using the Aligner Network",
84
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--en--ljspeech--fast_pitch.zip",
85
+ "default_vocoder": "vocoder_models/en/ljspeech/hifigan_v2",
86
+ "commit": "b27b3ba",
87
+ "author": "Eren Gölge @erogol",
88
+ "license": "apache 2.0",
89
+ "contact": "[email protected]"
90
+ }
91
+ },
92
+ "vctk": {
93
+ "vits": {
94
+ "description": "VITS End2End TTS model trained on VCTK dataset with 109 different speakers with EN accent.",
95
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--en--vctk--vits.zip",
96
+ "default_vocoder": null,
97
+ "commit": "3900448",
98
+ "author": "Eren @erogol",
99
+ "license": "apache 2.0",
100
+ "contact": "[email protected]"
101
+ },
102
+ "fast_pitch":{
103
+ "description": "FastPitch model trained on VCTK dataseset.",
104
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--en--vctk--fast_pitch.zip",
105
+ "default_vocoder": null,
106
+ "commit": "bdab788d",
107
+ "author": "Eren @erogol",
108
+ "license": "CC BY-NC-ND 4.0",
109
+ "contact": "[email protected]"
110
+ }
111
+ },
112
+ "sam": {
113
+ "tacotron-DDC": {
114
+ "description": "Tacotron2 with Double Decoder Consistency trained with Aceenture's Sam dataset.",
115
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--en--sam--tacotron-DDC.zip",
116
+ "default_vocoder": "vocoder_models/en/sam/hifigan_v2",
117
+ "commit": "bae2ad0f",
118
+ "author": "Eren Gölge @erogol",
119
+ "license": "apache 2.0",
120
+ "contact": "[email protected]"
121
+ }
122
+ },
123
+ "blizzard2013": {
124
+ "capacitron-t2-c50": {
125
+ "description": "Capacitron additions to Tacotron 2 with Capacity at 50 as in https://arxiv.org/pdf/1906.03402.pdf",
126
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.7.0_models/tts_models--en--blizzard2013--capacitron-t2-c50.zip",
127
+ "commit": "d6284e7",
128
+ "default_vocoder": "vocoder_models/en/blizzard2013/hifigan_v2",
129
+ "author": "Adam Froghyar @a-froghyar",
130
+ "license": "apache 2.0",
131
+ "contact": "[email protected]"
132
+ },
133
+ "capacitron-t2-c150": {
134
+ "description": "Capacitron additions to Tacotron 2 with Capacity at 150 as in https://arxiv.org/pdf/1906.03402.pdf",
135
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.7.0_models/tts_models--en--blizzard2013--capacitron-t2-c150.zip",
136
+ "commit": "d6284e7",
137
+ "default_vocoder": "vocoder_models/en/blizzard2013/hifigan_v2",
138
+ "author": "Adam Froghyar @a-froghyar",
139
+ "license": "apache 2.0",
140
+ "contact": "[email protected]"
141
+ }
142
+ }
143
+ },
144
+ "es": {
145
+ "mai": {
146
+ "tacotron2-DDC": {
147
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--es--mai--tacotron2-DDC.zip",
148
+ "default_vocoder": "vocoder_models/universal/libri-tts/fullband-melgan",
149
+ "commit": "",
150
+ "author": "Eren Gölge @erogol",
151
+ "license": "MPL",
152
+ "contact": "[email protected]"
153
+ }
154
+ }
155
+ },
156
+ "fr": {
157
+ "mai": {
158
+ "tacotron2-DDC": {
159
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--fr--mai--tacotron2-DDC.zip",
160
+ "default_vocoder": "vocoder_models/universal/libri-tts/fullband-melgan",
161
+ "commit": "",
162
+ "author": "Eren Gölge @erogol",
163
+ "license": "MPL",
164
+ "contact": "[email protected]"
165
+ }
166
+ }
167
+ },
168
+ "uk":{
169
+ "mai": {
170
+ "glow-tts": {
171
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--uk--mai--glow-tts.zip",
172
+ "author":"@robinhad",
173
+ "commit": "bdab788d",
174
+ "license": "MIT",
175
+ "contact": "",
176
+ "default_vocoder": "vocoder_models/uk/mai/multiband-melgan"
177
+ }
178
+ }
179
+ },
180
+ "zh-CN": {
181
+ "baker": {
182
+ "tacotron2-DDC-GST": {
183
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--zh-CN--baker--tacotron2-DDC-GST.zip",
184
+ "commit": "unknown",
185
+ "author": "@kirianguiller",
186
+ "license": "apache 2.0",
187
+ "default_vocoder": null
188
+ }
189
+ }
190
+ },
191
+ "nl": {
192
+ "mai": {
193
+ "tacotron2-DDC": {
194
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--nl--mai--tacotron2-DDC.zip",
195
+ "author": "@r-dh",
196
+ "license": "apache 2.0",
197
+ "default_vocoder": "vocoder_models/nl/mai/parallel-wavegan",
198
+ "stats_file": null,
199
+ "commit": "540d811"
200
+ }
201
+ }
202
+ },
203
+ "de": {
204
+ "thorsten": {
205
+ "tacotron2-DCA": {
206
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--de--thorsten--tacotron2-DCA.zip",
207
+ "default_vocoder": "vocoder_models/de/thorsten/fullband-melgan",
208
+ "author": "@thorstenMueller",
209
+ "license": "apache 2.0",
210
+ "commit": "unknown"
211
+ },
212
+ "vits": {
213
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.7.0_models/tts_models--de--thorsten--vits.zip",
214
+ "default_vocoder": null,
215
+ "author": "@thorstenMueller",
216
+ "license": "apache 2.0",
217
+ "commit": "unknown"
218
+ }
219
+ }
220
+ },
221
+ "ja": {
222
+ "kokoro": {
223
+ "tacotron2-DDC": {
224
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--ja--kokoro--tacotron2-DDC.zip",
225
+ "default_vocoder": "vocoder_models/ja/kokoro/hifigan_v1",
226
+ "description": "Tacotron2 with Double Decoder Consistency trained with Kokoro Speech Dataset.",
227
+ "author": "@kaiidams",
228
+ "license": "apache 2.0",
229
+ "commit": "401fbd89"
230
+ }
231
+ }
232
+ },
233
+ "tr":{
234
+ "common-voice": {
235
+ "glow-tts":{
236
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--tr--common-voice--glow-tts.zip",
237
+ "default_vocoder": "vocoder_models/tr/common-voice/hifigan",
238
+ "license": "MIT",
239
+ "description": "Turkish GlowTTS model using an unknown speaker from the Common-Voice dataset.",
240
+ "author": "Fatih Akademi",
241
+ "commit": null
242
+ }
243
+ }
244
+ },
245
+ "it": {
246
+ "mai_female": {
247
+ "glow-tts":{
248
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--it--mai_female--glow-tts.zip",
249
+ "default_vocoder": null,
250
+ "description": "GlowTTS model as explained on https://github.com/coqui-ai/TTS/issues/1148.",
251
+ "author": "@nicolalandro",
252
+ "license": "apache 2.0",
253
+ "commit": null
254
+ },
255
+ "vits":{
256
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--it--mai_female--vits.zip",
257
+ "default_vocoder": null,
258
+ "description": "GlowTTS model as explained on https://github.com/coqui-ai/TTS/issues/1148.",
259
+ "author": "@nicolalandro",
260
+ "license": "apache 2.0",
261
+ "commit": null
262
+ }
263
+ },
264
+ "mai_male": {
265
+ "glow-tts":{
266
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--it--mai_male--glow-tts.zip",
267
+ "default_vocoder": null,
268
+ "description": "GlowTTS model as explained on https://github.com/coqui-ai/TTS/issues/1148.",
269
+ "author": "@nicolalandro",
270
+ "license": "apache 2.0",
271
+ "commit": null
272
+ },
273
+ "vits":{
274
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--it--mai_male--vits.zip",
275
+ "default_vocoder": null,
276
+ "description": "GlowTTS model as explained on https://github.com/coqui-ai/TTS/issues/1148.",
277
+ "author": "@nicolalandro",
278
+ "license": "apache 2.0",
279
+ "commit": null
280
+ }
281
+ }
282
+ },
283
+ "ewe": {
284
+ "openbible": {
285
+ "vits":{
286
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.2_models/tts_models--ewe--openbible--vits.zip",
287
+ "default_vocoder": null,
288
+ "license": "CC-BY-SA 4.0",
289
+ "description": "Original work (audio and text) by Biblica available for free at www.biblica.com and open.bible.",
290
+ "author": "@coqui_ai",
291
+ "commit": "1b22f03"
292
+ }
293
+ }
294
+ },
295
+ "hau": {
296
+ "openbible": {
297
+ "vits":{
298
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.2_models/tts_models--hau--openbible--vits.zip",
299
+ "default_vocoder": null,
300
+ "license": "CC-BY-SA 4.0",
301
+ "description": "Original work (audio and text) by Biblica available for free at www.biblica.com and open.bible.",
302
+ "author": "@coqui_ai",
303
+ "commit": "1b22f03"
304
+ }
305
+ }
306
+ },
307
+ "lin": {
308
+ "openbible": {
309
+ "vits":{
310
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.2_models/tts_models--lin--openbible--vits.zip",
311
+ "default_vocoder": null,
312
+ "license": "CC-BY-SA 4.0",
313
+ "description": "Original work (audio and text) by Biblica available for free at www.biblica.com and open.bible.",
314
+ "author": "@coqui_ai",
315
+ "commit": "1b22f03"
316
+ }
317
+ }
318
+ },
319
+ "tw_akuapem": {
320
+ "openbible": {
321
+ "vits":{
322
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.2_models/tts_models--tw_akuapem--openbible--vits.zip",
323
+ "default_vocoder": null,
324
+ "license": "CC-BY-SA 4.0",
325
+ "description": "Original work (audio and text) by Biblica available for free at www.biblica.com and open.bible.",
326
+ "author": "@coqui_ai",
327
+ "commit": "1b22f03"
328
+ }
329
+ }
330
+ },
331
+ "tw_asante": {
332
+ "openbible": {
333
+ "vits":{
334
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.2_models/tts_models--tw_asante--openbible--vits.zip",
335
+ "default_vocoder": null,
336
+ "license": "CC-BY-SA 4.0",
337
+ "description": "Original work (audio and text) by Biblica available for free at www.biblica.com and open.bible.",
338
+ "author": "@coqui_ai",
339
+ "commit": "1b22f03"
340
+ }
341
+ }
342
+ },
343
+ "yor": {
344
+ "openbible": {
345
+ "vits":{
346
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.2_models/tts_models--yor--openbible--vits.zip",
347
+ "default_vocoder": null,
348
+ "license": "CC-BY-SA 4.0",
349
+ "description": "Original work (audio and text) by Biblica available for free at www.biblica.com and open.bible.",
350
+ "author": "@coqui_ai",
351
+ "commit": "1b22f03"
352
+ }
353
+ }
354
+ }
355
+ },
356
+ "vocoder_models": {
357
+ "universal": {
358
+ "libri-tts": {
359
+ "wavegrad": {
360
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/vocoder_models--universal--libri-tts--wavegrad.zip",
361
+ "commit": "ea976b0",
362
+ "author": "Eren Gölge @erogol",
363
+ "license": "MPL",
364
+ "contact": "[email protected]"
365
+ },
366
+ "fullband-melgan": {
367
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/vocoder_models--universal--libri-tts--fullband-melgan.zip",
368
+ "commit": "4132240",
369
+ "author": "Eren Gölge @erogol",
370
+ "license": "MPL",
371
+ "contact": "[email protected]"
372
+ }
373
+ }
374
+ },
375
+ "en": {
376
+ "ek1": {
377
+ "wavegrad": {
378
+ "description": "EK1 en-rp wavegrad by NMStoker",
379
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/vocoder_models--en--ek1--wavegrad.zip",
380
+ "commit": "c802255",
381
+ "license": "apache 2.0"
382
+ }
383
+ },
384
+ "ljspeech": {
385
+ "multiband-melgan": {
386
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/vocoder_models--en--ljspeech--multiband-melgan.zip",
387
+ "commit": "ea976b0",
388
+ "author": "Eren Gölge @erogol",
389
+ "license": "MPL",
390
+ "contact": "[email protected]"
391
+ },
392
+ "hifigan_v2": {
393
+ "description": "HiFiGAN_v2 LJSpeech vocoder from https://arxiv.org/abs/2010.05646.",
394
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/vocoder_models--en--ljspeech--hifigan_v2.zip",
395
+ "commit": "bae2ad0f",
396
+ "author": "@erogol",
397
+ "license": "apache 2.0",
398
+ "contact": "[email protected]"
399
+ },
400
+ "univnet": {
401
+ "description": "UnivNet model finetuned on TacotronDDC_ph spectrograms for better compatibility.",
402
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/vocoder_models--en--ljspeech--univnet_v2.zip",
403
+ "commit": "4581e3d",
404
+ "author": "Eren @erogol",
405
+ "license": "apache 2.0",
406
+ "contact": "[email protected]"
407
+ }
408
+ },
409
+ "blizzard2013": {
410
+ "hifigan_v2": {
411
+ "description": "HiFiGAN_v2 LJSpeech vocoder from https://arxiv.org/abs/2010.05646.",
412
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.7.0_models/vocoder_models--en--blizzard2013--hifigan_v2.zip",
413
+ "commit": "d6284e7",
414
+ "author": "Adam Froghyar @a-froghyar",
415
+ "license": "apache 2.0",
416
+ "contact": "[email protected]"
417
+ }
418
+ },
419
+ "vctk": {
420
+ "hifigan_v2": {
421
+ "description": "Finetuned and intended to be used with tts_models/en/vctk/sc-glow-tts",
422
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/vocoder_models--en--vctk--hifigan_v2.zip",
423
+ "commit": "2f07160",
424
+ "author": "Edresson Casanova",
425
+ "license": "apache 2.0",
426
+ "contact": ""
427
+ }
428
+ },
429
+ "sam": {
430
+ "hifigan_v2": {
431
+ "description": "Finetuned and intended to be used with tts_models/en/sam/tacotron_DDC",
432
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/vocoder_models--en--sam--hifigan_v2.zip",
433
+ "commit": "2f07160",
434
+ "author": "Eren Gölge @erogol",
435
+ "license": "apache 2.0",
436
+ "contact": "[email protected]"
437
+ }
438
+ }
439
+ },
440
+ "nl": {
441
+ "mai": {
442
+ "parallel-wavegan": {
443
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/vocoder_models--nl--mai--parallel-wavegan.zip",
444
+ "author": "@r-dh",
445
+ "license": "apache 2.0",
446
+ "commit": "unknown"
447
+ }
448
+ }
449
+ },
450
+ "de": {
451
+ "thorsten": {
452
+ "wavegrad": {
453
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/vocoder_models--de--thorsten--wavegrad.zip",
454
+ "author": "@thorstenMueller",
455
+ "license": "apache 2.0",
456
+ "commit": "unknown"
457
+ },
458
+ "fullband-melgan": {
459
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/vocoder_models--de--thorsten--fullband-melgan.zip",
460
+ "author": "@thorstenMueller",
461
+ "license": "apache 2.0",
462
+ "commit": "unknown"
463
+ }
464
+ }
465
+ },
466
+ "ja": {
467
+ "kokoro": {
468
+ "hifigan_v1": {
469
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/vocoder_models--ja--kokoro--hifigan_v1.zip",
470
+ "description": "HifiGAN model trained for kokoro dataset by @kaiidams",
471
+ "author": "@kaiidams",
472
+ "license": "apache 2.0",
473
+ "commit": "3900448"
474
+ }
475
+ }
476
+ },
477
+ "uk": {
478
+ "mai": {
479
+ "multiband-melgan": {
480
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/vocoder_models--uk--mai--multiband-melgan.zip",
481
+ "author":"@robinhad",
482
+ "commit": "bdab788d",
483
+ "license": "MIT",
484
+ "contact": ""
485
+ }
486
+ }
487
+ },
488
+ "tr":{
489
+ "common-voice": {
490
+ "hifigan":{
491
+ "github_rls_url": "https://coqui.gateway.scarf.sh/v0.6.1_models/vocoder_models--tr--common-voice--hifigan.zip",
492
+ "description": "HifiGAN model using an unknown speaker from the Common-Voice dataset.",
493
+ "author": "Fatih Akademi",
494
+ "license": "MIT",
495
+ "commit": null
496
+ }
497
+ }
498
+ }
499
+ }
500
+ }
Indic-TTS/TTS/TTS/VERSION ADDED
@@ -0,0 +1 @@
 
 
1
+ 0.7.1
Indic-TTS/TTS/TTS/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ with open(os.path.join(os.path.dirname(__file__), "VERSION"), "r", encoding="utf-8") as f:
4
+ version = f.read().strip()
5
+
6
+ __version__ = version
Indic-TTS/TTS/TTS/__pycache__/__init__.cpython-37.pyc ADDED
Binary file (327 Bytes). View file
 
Indic-TTS/TTS/TTS/__pycache__/model.cpython-37.pyc ADDED
Binary file (2.38 kB). View file
 
Indic-TTS/TTS/TTS/bin/__init__.py ADDED
File without changes
Indic-TTS/TTS/TTS/bin/__pycache__/__init__.cpython-37.pyc ADDED
Binary file (145 Bytes). View file
 
Indic-TTS/TTS/TTS/bin/__pycache__/synthesize.cpython-37.pyc ADDED
Binary file (9.04 kB). View file
 
Indic-TTS/TTS/TTS/bin/collect_env_info.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Get detailed info about the working environment."""
2
+ import os
3
+ import platform
4
+ import sys
5
+
6
+ import numpy
7
+ import torch
8
+
9
+ sys.path += [os.path.abspath(".."), os.path.abspath(".")]
10
+ import json
11
+
12
+ import TTS
13
+
14
+
15
+ def system_info():
16
+ return {
17
+ "OS": platform.system(),
18
+ "architecture": platform.architecture(),
19
+ "version": platform.version(),
20
+ "processor": platform.processor(),
21
+ "python": platform.python_version(),
22
+ }
23
+
24
+
25
+ def cuda_info():
26
+ return {
27
+ "GPU": [torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())],
28
+ "available": torch.cuda.is_available(),
29
+ "version": torch.version.cuda,
30
+ }
31
+
32
+
33
+ def package_info():
34
+ return {
35
+ "numpy": numpy.__version__,
36
+ "PyTorch_version": torch.__version__,
37
+ "PyTorch_debug": torch.version.debug,
38
+ "TTS": TTS.__version__,
39
+ }
40
+
41
+
42
+ def main():
43
+ details = {"System": system_info(), "CUDA": cuda_info(), "Packages": package_info()}
44
+ print(json.dumps(details, indent=4, sort_keys=True))
45
+
46
+
47
+ if __name__ == "__main__":
48
+ main()
Indic-TTS/TTS/TTS/bin/compute_attention_masks.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import importlib
3
+ import os
4
+ from argparse import RawTextHelpFormatter
5
+
6
+ import numpy as np
7
+ import torch
8
+ from torch.utils.data import DataLoader
9
+ from tqdm import tqdm
10
+
11
+ from TTS.config import load_config
12
+ from TTS.tts.datasets.TTSDataset import TTSDataset
13
+ from TTS.tts.models import setup_model
14
+ from TTS.tts.utils.text.characters import make_symbols, phonemes, symbols
15
+ from TTS.utils.audio import AudioProcessor
16
+ from TTS.utils.io import load_checkpoint
17
+
18
+ if __name__ == "__main__":
19
+ # pylint: disable=bad-option-value
20
+ parser = argparse.ArgumentParser(
21
+ description="""Extract attention masks from trained Tacotron/Tacotron2 models.
22
+ These masks can be used for different purposes including training a TTS model with a Duration Predictor.\n\n"""
23
+ """Each attention mask is written to the same path as the input wav file with ".npy" file extension.
24
+ (e.g. path/bla.wav (wav file) --> path/bla.npy (attention mask))\n"""
25
+ """
26
+ Example run:
27
+ CUDA_VISIBLE_DEVICE="0" python TTS/bin/compute_attention_masks.py
28
+ --model_path /data/rw/home/Models/ljspeech-dcattn-December-14-2020_11+10AM-9d0e8c7/checkpoint_200000.pth
29
+ --config_path /data/rw/home/Models/ljspeech-dcattn-December-14-2020_11+10AM-9d0e8c7/config.json
30
+ --dataset_metafile metadata.csv
31
+ --data_path /root/LJSpeech-1.1/
32
+ --batch_size 32
33
+ --dataset ljspeech
34
+ --use_cuda True
35
+ """,
36
+ formatter_class=RawTextHelpFormatter,
37
+ )
38
+ parser.add_argument("--model_path", type=str, required=True, help="Path to Tacotron/Tacotron2 model file ")
39
+ parser.add_argument(
40
+ "--config_path",
41
+ type=str,
42
+ required=True,
43
+ help="Path to Tacotron/Tacotron2 config file.",
44
+ )
45
+ parser.add_argument(
46
+ "--dataset",
47
+ type=str,
48
+ default="",
49
+ required=True,
50
+ help="Target dataset processor name from TTS.tts.dataset.preprocess.",
51
+ )
52
+
53
+ parser.add_argument(
54
+ "--dataset_metafile",
55
+ type=str,
56
+ default="",
57
+ required=True,
58
+ help="Dataset metafile inclusing file paths with transcripts.",
59
+ )
60
+ parser.add_argument("--data_path", type=str, default="", help="Defines the data path. It overwrites config.json.")
61
+ parser.add_argument("--use_cuda", type=bool, default=False, help="enable/disable cuda.")
62
+
63
+ parser.add_argument(
64
+ "--batch_size", default=16, type=int, help="Batch size for the model. Use batch_size=1 if you have no CUDA."
65
+ )
66
+ args = parser.parse_args()
67
+
68
+ C = load_config(args.config_path)
69
+ ap = AudioProcessor(**C.audio)
70
+
71
+ # if the vocabulary was passed, replace the default
72
+ if "characters" in C.keys():
73
+ symbols, phonemes = make_symbols(**C.characters)
74
+
75
+ # load the model
76
+ num_chars = len(phonemes) if C.use_phonemes else len(symbols)
77
+ # TODO: handle multi-speaker
78
+ model = setup_model(C)
79
+ model, _ = load_checkpoint(model, args.model_path, args.use_cuda, True)
80
+
81
+ # data loader
82
+ preprocessor = importlib.import_module("TTS.tts.datasets.formatters")
83
+ preprocessor = getattr(preprocessor, args.dataset)
84
+ meta_data = preprocessor(args.data_path, args.dataset_metafile)
85
+ dataset = TTSDataset(
86
+ model.decoder.r,
87
+ C.text_cleaner,
88
+ compute_linear_spec=False,
89
+ ap=ap,
90
+ meta_data=meta_data,
91
+ characters=C.characters if "characters" in C.keys() else None,
92
+ add_blank=C["add_blank"] if "add_blank" in C.keys() else False,
93
+ use_phonemes=C.use_phonemes,
94
+ phoneme_cache_path=C.phoneme_cache_path,
95
+ phoneme_language=C.phoneme_language,
96
+ enable_eos_bos=C.enable_eos_bos_chars,
97
+ )
98
+
99
+ dataset.sort_and_filter_items(C.get("sort_by_audio_len", default=False))
100
+ loader = DataLoader(
101
+ dataset,
102
+ batch_size=args.batch_size,
103
+ num_workers=4,
104
+ collate_fn=dataset.collate_fn,
105
+ shuffle=False,
106
+ drop_last=False,
107
+ )
108
+
109
+ # compute attentions
110
+ file_paths = []
111
+ with torch.no_grad():
112
+ for data in tqdm(loader):
113
+ # setup input data
114
+ text_input = data[0]
115
+ text_lengths = data[1]
116
+ linear_input = data[3]
117
+ mel_input = data[4]
118
+ mel_lengths = data[5]
119
+ stop_targets = data[6]
120
+ item_idxs = data[7]
121
+
122
+ # dispatch data to GPU
123
+ if args.use_cuda:
124
+ text_input = text_input.cuda()
125
+ text_lengths = text_lengths.cuda()
126
+ mel_input = mel_input.cuda()
127
+ mel_lengths = mel_lengths.cuda()
128
+
129
+ model_outputs = model.forward(text_input, text_lengths, mel_input)
130
+
131
+ alignments = model_outputs["alignments"].detach()
132
+ for idx, alignment in enumerate(alignments):
133
+ item_idx = item_idxs[idx]
134
+ # interpolate if r > 1
135
+ alignment = (
136
+ torch.nn.functional.interpolate(
137
+ alignment.transpose(0, 1).unsqueeze(0),
138
+ size=None,
139
+ scale_factor=model.decoder.r,
140
+ mode="nearest",
141
+ align_corners=None,
142
+ recompute_scale_factor=None,
143
+ )
144
+ .squeeze(0)
145
+ .transpose(0, 1)
146
+ )
147
+ # remove paddings
148
+ alignment = alignment[: mel_lengths[idx], : text_lengths[idx]].cpu().numpy()
149
+ # set file paths
150
+ wav_file_name = os.path.basename(item_idx)
151
+ align_file_name = os.path.splitext(wav_file_name)[0] + "_attn.npy"
152
+ file_path = item_idx.replace(wav_file_name, align_file_name)
153
+ # save output
154
+ wav_file_abs_path = os.path.abspath(item_idx)
155
+ file_abs_path = os.path.abspath(file_path)
156
+ file_paths.append([wav_file_abs_path, file_abs_path])
157
+ np.save(file_path, alignment)
158
+
159
+ # ourput metafile
160
+ metafile = os.path.join(args.data_path, "metadata_attn_mask.txt")
161
+
162
+ with open(metafile, "w", encoding="utf-8") as f:
163
+ for p in file_paths:
164
+ f.write(f"{p[0]}|{p[1]}\n")
165
+ print(f" >> Metafile created: {metafile}")