Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
config_entry | () | Create a mock GeoNet NZ Volcano config entry. | Create a mock GeoNet NZ Volcano config entry. | def config_entry():
"""Create a mock GeoNet NZ Volcano config entry."""
return MockConfigEntry(
domain=DOMAIN,
data={
CONF_LATITUDE: -41.2,
CONF_LONGITUDE: 174.7,
CONF_RADIUS: 25,
CONF_UNIT_SYSTEM: "metric",
CONF_SCAN_INTERVAL: 300.0,
},
title="-41.2, 174.7",
) | [
"def",
"config_entry",
"(",
")",
":",
"return",
"MockConfigEntry",
"(",
"domain",
"=",
"DOMAIN",
",",
"data",
"=",
"{",
"CONF_LATITUDE",
":",
"-",
"41.2",
",",
"CONF_LONGITUDE",
":",
"174.7",
",",
"CONF_RADIUS",
":",
"25",
",",
"CONF_UNIT_SYSTEM",
":",
"\"metric\"",
",",
"CONF_SCAN_INTERVAL",
":",
"300.0",
",",
"}",
",",
"title",
"=",
"\"-41.2, 174.7\"",
",",
")"
] | [
16,
0
] | [
28,
5
] | python | en | ['it', 'en', 'en'] | True |
Preprocessor.__init__ | (self, corpus, target=None, **kwargs) |
The corpus is the `HTMLCorpusReader` to preprocess and pickle.
The target is the directory on disk to output the pickled corpus to.
|
The corpus is the `HTMLCorpusReader` to preprocess and pickle.
The target is the directory on disk to output the pickled corpus to.
| def __init__(self, corpus, target=None, **kwargs):
"""
The corpus is the `HTMLCorpusReader` to preprocess and pickle.
The target is the directory on disk to output the pickled corpus to.
"""
self.corpus = corpus
self.target = target
self.titles = list(self.corpus.titles()) | [
"def",
"__init__",
"(",
"self",
",",
"corpus",
",",
"target",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"corpus",
"=",
"corpus",
"self",
".",
"target",
"=",
"target",
"self",
".",
"titles",
"=",
"list",
"(",
"self",
".",
"corpus",
".",
"titles",
"(",
")",
")"
] | [
20,
4
] | [
27,
48
] | python | en | ['en', 'error', 'th'] | False |
Preprocessor.fileids | (self, fileids=None, categories=None) |
Helper function to access the fileids of the corpus
|
Helper function to access the fileids of the corpus
| def fileids(self, fileids=None, categories=None):
"""
Helper function to access the fileids of the corpus
"""
fileids = self.corpus.resolve(fileids, categories)
if fileids:
return fileids
return self.corpus.fileids() | [
"def",
"fileids",
"(",
"self",
",",
"fileids",
"=",
"None",
",",
"categories",
"=",
"None",
")",
":",
"fileids",
"=",
"self",
".",
"corpus",
".",
"resolve",
"(",
"fileids",
",",
"categories",
")",
"if",
"fileids",
":",
"return",
"fileids",
"return",
"self",
".",
"corpus",
".",
"fileids",
"(",
")"
] | [
49,
4
] | [
56,
36
] | python | en | ['en', 'error', 'th'] | False |
Preprocessor.abspath | (self, fileid) |
Returns the absolute path to the target fileid from the corpus fileid.
|
Returns the absolute path to the target fileid from the corpus fileid.
| def abspath(self, fileid):
"""
Returns the absolute path to the target fileid from the corpus fileid.
"""
# Find the directory, relative from the corpus root.
parent = os.path.relpath(
os.path.dirname(self.corpus.abspath(fileid)), self.corpus.root
)
# Compute the name parts to reconstruct
basename = os.path.basename(fileid)
name, ext = os.path.splitext(basename)
# Create the pickle file extension
basename = name + '.pickle'
# Return the path to the file relative to the target.
return os.path.normpath(os.path.join(self.target, parent, basename)) | [
"def",
"abspath",
"(",
"self",
",",
"fileid",
")",
":",
"# Find the directory, relative from the corpus root.",
"parent",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"corpus",
".",
"abspath",
"(",
"fileid",
")",
")",
",",
"self",
".",
"corpus",
".",
"root",
")",
"# Compute the name parts to reconstruct",
"basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fileid",
")",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"basename",
")",
"# Create the pickle file extension",
"basename",
"=",
"name",
"+",
"'.pickle'",
"# Return the path to the file relative to the target.",
"return",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"target",
",",
"parent",
",",
"basename",
")",
")"
] | [
58,
4
] | [
75,
76
] | python | en | ['en', 'error', 'th'] | False |
Preprocessor.replicate | (self, source) |
Directly copies all files in the source directory to the root of the
target directory (does not maintain subdirectory structures). Used to
copy over metadata files from the root of the corpus to the target.
|
Directly copies all files in the source directory to the root of the
target directory (does not maintain subdirectory structures). Used to
copy over metadata files from the root of the corpus to the target.
| def replicate(self, source):
"""
Directly copies all files in the source directory to the root of the
target directory (does not maintain subdirectory structures). Used to
copy over metadata files from the root of the corpus to the target.
"""
names = [
name for name in os.listdir(source)
if not name.startswith('.')
]
# Filter out directories and copy files
for name in names:
src = os.path.abspath(os.path.join(source, name))
dst = os.path.abspath(os.path.join(self.target, name))
if os.path.isfile(src):
shutil.copy(src, dst) | [
"def",
"replicate",
"(",
"self",
",",
"source",
")",
":",
"names",
"=",
"[",
"name",
"for",
"name",
"in",
"os",
".",
"listdir",
"(",
"source",
")",
"if",
"not",
"name",
".",
"startswith",
"(",
"'.'",
")",
"]",
"# Filter out directories and copy files",
"for",
"name",
"in",
"names",
":",
"src",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"source",
",",
"name",
")",
")",
"dst",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"target",
",",
"name",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"src",
")",
":",
"shutil",
".",
"copy",
"(",
"src",
",",
"dst",
")"
] | [
77,
4
] | [
94,
37
] | python | en | ['en', 'error', 'th'] | False |
Preprocessor.tokenize | (self, fileid) |
Segments, tokenizes, and tags a document in the corpus. Returns a
generator of paragraphs, which are lists of sentences, which in turn
are lists of part of speech tagged words.
|
Segments, tokenizes, and tags a document in the corpus. Returns a
generator of paragraphs, which are lists of sentences, which in turn
are lists of part of speech tagged words.
| def tokenize(self, fileid):
"""
Segments, tokenizes, and tags a document in the corpus. Returns a
generator of paragraphs, which are lists of sentences, which in turn
are lists of part of speech tagged words.
"""
for paragraph in self.corpus.paras(fileids=fileid):
yield [
nltk.pos_tag(nltk.wordpunct_tokenize(sent))
for sent in nltk.sent_tokenize(paragraph)
] | [
"def",
"tokenize",
"(",
"self",
",",
"fileid",
")",
":",
"for",
"paragraph",
"in",
"self",
".",
"corpus",
".",
"paras",
"(",
"fileids",
"=",
"fileid",
")",
":",
"yield",
"[",
"nltk",
".",
"pos_tag",
"(",
"nltk",
".",
"wordpunct_tokenize",
"(",
"sent",
")",
")",
"for",
"sent",
"in",
"nltk",
".",
"sent_tokenize",
"(",
"paragraph",
")",
"]"
] | [
96,
4
] | [
106,
13
] | python | en | ['en', 'error', 'th'] | False |
Preprocessor.process | (self, idx, fileid) |
For a single file does the following preprocessing work:
1. Checks the location on disk to make sure no errors occur.
2. Gets all paragraphs for the given text.
3. Segments the paragraphs with the sent_tokenizer
4. Tokenizes the sentences with the wordpunct_tokenizer
5. Tags the sentences using the default pos_tagger
6. Writes the document as a pickle to the target location.
This method is called multiple times from the transform runner.
|
For a single file does the following preprocessing work:
1. Checks the location on disk to make sure no errors occur.
2. Gets all paragraphs for the given text.
3. Segments the paragraphs with the sent_tokenizer
4. Tokenizes the sentences with the wordpunct_tokenizer
5. Tags the sentences using the default pos_tagger
6. Writes the document as a pickle to the target location.
This method is called multiple times from the transform runner.
| def process(self, idx, fileid):
"""
For a single file does the following preprocessing work:
1. Checks the location on disk to make sure no errors occur.
2. Gets all paragraphs for the given text.
3. Segments the paragraphs with the sent_tokenizer
4. Tokenizes the sentences with the wordpunct_tokenizer
5. Tags the sentences using the default pos_tagger
6. Writes the document as a pickle to the target location.
This method is called multiple times from the transform runner.
"""
# Compute the outpath to write the file to.
target = self.abspath(fileid)
parent = os.path.dirname(target)
# Make sure the directory exists
if not os.path.exists(parent):
os.makedirs(parent)
# Make sure that the parent is a directory and not a file
if not os.path.isdir(parent):
raise ValueError(
"Please supply a directory to write preprocessed data to."
)
# Create a data structure for the pickle
document = {'title': self.titles[idx],
'document': list(self.tokenize(fileid))
}
# Open and serialize the pickle to disk
with open(target, 'wb') as f:
pickle.dump(document, f, pickle.HIGHEST_PROTOCOL)
# Clean up the document
del document
# Return the target fileid
return target | [
"def",
"process",
"(",
"self",
",",
"idx",
",",
"fileid",
")",
":",
"# Compute the outpath to write the file to.",
"target",
"=",
"self",
".",
"abspath",
"(",
"fileid",
")",
"parent",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"target",
")",
"# Make sure the directory exists",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"parent",
")",
":",
"os",
".",
"makedirs",
"(",
"parent",
")",
"# Make sure that the parent is a directory and not a file",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"parent",
")",
":",
"raise",
"ValueError",
"(",
"\"Please supply a directory to write preprocessed data to.\"",
")",
"# Create a data structure for the pickle",
"document",
"=",
"{",
"'title'",
":",
"self",
".",
"titles",
"[",
"idx",
"]",
",",
"'document'",
":",
"list",
"(",
"self",
".",
"tokenize",
"(",
"fileid",
")",
")",
"}",
"# Open and serialize the pickle to disk",
"with",
"open",
"(",
"target",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"document",
",",
"f",
",",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"# Clean up the document",
"del",
"document",
"# Return the target fileid",
"return",
"target"
] | [
108,
4
] | [
146,
21
] | python | en | ['en', 'error', 'th'] | False |
Preprocessor.transform | (self, fileids=None, categories=None) |
Transform the wrapped corpus, writing out the segmented, tokenized,
and part of speech tagged corpus as a pickle to the target directory.
This method will also directly copy files that are in the corpus.root
directory that are not matched by the corpus.fileids().
|
Transform the wrapped corpus, writing out the segmented, tokenized,
and part of speech tagged corpus as a pickle to the target directory.
This method will also directly copy files that are in the corpus.root
directory that are not matched by the corpus.fileids().
| def transform(self, fileids=None, categories=None):
"""
Transform the wrapped corpus, writing out the segmented, tokenized,
and part of speech tagged corpus as a pickle to the target directory.
This method will also directly copy files that are in the corpus.root
directory that are not matched by the corpus.fileids().
"""
# Make the target directory if it doesn't already exist
if not os.path.exists(self.target):
os.makedirs(self.target)
# First shutil.copy anything in the root directory.
self.replicate(self.corpus.root)
# Resolve the fileids to start processing
for idx, fileid in enumerate(self.fileids(fileids, categories)):
yield self.process(idx,fileid) | [
"def",
"transform",
"(",
"self",
",",
"fileids",
"=",
"None",
",",
"categories",
"=",
"None",
")",
":",
"# Make the target directory if it doesn't already exist",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"target",
")",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"target",
")",
"# First shutil.copy anything in the root directory.",
"self",
".",
"replicate",
"(",
"self",
".",
"corpus",
".",
"root",
")",
"# Resolve the fileids to start processing",
"for",
"idx",
",",
"fileid",
"in",
"enumerate",
"(",
"self",
".",
"fileids",
"(",
"fileids",
",",
"categories",
")",
")",
":",
"yield",
"self",
".",
"process",
"(",
"idx",
",",
"fileid",
")"
] | [
148,
4
] | [
164,
42
] | python | en | ['en', 'error', 'th'] | False |
ProgressPreprocessor.transform | (self, fileids=None, categories=None) |
At the moment, we simply have to replace the entire transform method
to get progress bar functionality. Kind of a bummer, but it's a small
method (purposefully so).
|
At the moment, we simply have to replace the entire transform method
to get progress bar functionality. Kind of a bummer, but it's a small
method (purposefully so).
| def transform(self, fileids=None, categories=None):
"""
At the moment, we simply have to replace the entire transform method
to get progress bar functionality. Kind of a bummer, but it's a small
method (purposefully so).
"""
# Make the target directory if it doesn't already exist
if not os.path.exists(self.target):
os.makedirs(self.target)
# First shutil.copy anything in the root directory.
self.replicate(self.corpus.root)
# Get the total corpus size for per byte counting
corpus_size = sum(self.corpus.sizes(fileids, categories))
# Start processing with a progress bar.
with tqdm(total=corpus_size, unit='B', unit_scale=True) as pbar:
for idx, fileid in enumerate(self.fileids(fileids, categories)):
yield self.process(idx, fileid)
pbar.update(sum(self.corpus.sizes(fileids=fileid))) | [
"def",
"transform",
"(",
"self",
",",
"fileids",
"=",
"None",
",",
"categories",
"=",
"None",
")",
":",
"# Make the target directory if it doesn't already exist",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"target",
")",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"target",
")",
"# First shutil.copy anything in the root directory.",
"self",
".",
"replicate",
"(",
"self",
".",
"corpus",
".",
"root",
")",
"# Get the total corpus size for per byte counting",
"corpus_size",
"=",
"sum",
"(",
"self",
".",
"corpus",
".",
"sizes",
"(",
"fileids",
",",
"categories",
")",
")",
"# Start processing with a progress bar.",
"with",
"tqdm",
"(",
"total",
"=",
"corpus_size",
",",
"unit",
"=",
"'B'",
",",
"unit_scale",
"=",
"True",
")",
"as",
"pbar",
":",
"for",
"idx",
",",
"fileid",
"in",
"enumerate",
"(",
"self",
".",
"fileids",
"(",
"fileids",
",",
"categories",
")",
")",
":",
"yield",
"self",
".",
"process",
"(",
"idx",
",",
"fileid",
")",
"pbar",
".",
"update",
"(",
"sum",
"(",
"self",
".",
"corpus",
".",
"sizes",
"(",
"fileids",
"=",
"fileid",
")",
")",
")"
] | [
173,
4
] | [
193,
67
] | python | en | ['en', 'error', 'th'] | False |
ParallelPreprocessor.__init__ | (self, *args, **kwargs) |
Get parallel-specific arguments and then call super.
|
Get parallel-specific arguments and then call super.
| def __init__(self, *args, **kwargs):
"""
Get parallel-specific arguments and then call super.
"""
self.tasks = mp.cpu_count()
super(ParallelPreprocessor, self).__init__(*args, **kwargs) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"tasks",
"=",
"mp",
".",
"cpu_count",
"(",
")",
"super",
"(",
"ParallelPreprocessor",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | [
201,
4
] | [
206,
67
] | python | en | ['en', 'error', 'th'] | False |
ParallelPreprocessor.on_result | (self, result) |
Appends the results to the master results list.
|
Appends the results to the master results list.
| def on_result(self, result):
"""
Appends the results to the master results list.
"""
self.results.append(result) | [
"def",
"on_result",
"(",
"self",
",",
"result",
")",
":",
"self",
".",
"results",
".",
"append",
"(",
"result",
")"
] | [
208,
4
] | [
212,
35
] | python | en | ['en', 'error', 'th'] | False |
ParallelPreprocessor.transform | (self, fileids=None, categories=None) |
Create a pool using the multiprocessing library, passing in
the number of cores available to set the desired number of
processes.
|
Create a pool using the multiprocessing library, passing in
the number of cores available to set the desired number of
processes.
| def transform(self, fileids=None, categories=None):
"""
Create a pool using the multiprocessing library, passing in
the number of cores available to set the desired number of
processes.
"""
# Make the target directory if it doesn't already exist
if not os.path.exists(self.target):
os.makedirs(self.target)
# First shutil.copy anything in the root directory.
self.replicate(self.corpus.root)
# Reset the results
self.results = []
# Create a multiprocessing pool
pool = mp.Pool(processes=self.tasks)
tasks = [
pool.apply_async(self.process, (idx, fileid,), callback=self.on_result)
for idx, fileid in enumerate(self.fileids(fileids, categories))
]
# Close the pool and join
pool.close()
pool.join()
return self.results | [
"def",
"transform",
"(",
"self",
",",
"fileids",
"=",
"None",
",",
"categories",
"=",
"None",
")",
":",
"# Make the target directory if it doesn't already exist",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"target",
")",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"target",
")",
"# First shutil.copy anything in the root directory.",
"self",
".",
"replicate",
"(",
"self",
".",
"corpus",
".",
"root",
")",
"# Reset the results",
"self",
".",
"results",
"=",
"[",
"]",
"# Create a multiprocessing pool",
"pool",
"=",
"mp",
".",
"Pool",
"(",
"processes",
"=",
"self",
".",
"tasks",
")",
"tasks",
"=",
"[",
"pool",
".",
"apply_async",
"(",
"self",
".",
"process",
",",
"(",
"idx",
",",
"fileid",
",",
")",
",",
"callback",
"=",
"self",
".",
"on_result",
")",
"for",
"idx",
",",
"fileid",
"in",
"enumerate",
"(",
"self",
".",
"fileids",
"(",
"fileids",
",",
"categories",
")",
")",
"]",
"# Close the pool and join",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"return",
"self",
".",
"results"
] | [
214,
4
] | [
241,
27
] | python | en | ['en', 'error', 'th'] | False |
ProgressParallelPreprocessor.on_result | (self, pbar) |
Indicates progress on result.
|
Indicates progress on result.
| def on_result(self, pbar):
"""
Indicates progress on result.
"""
def inner(result):
pbar.update(1)
self.results.append(result)
return inner | [
"def",
"on_result",
"(",
"self",
",",
"pbar",
")",
":",
"def",
"inner",
"(",
"result",
")",
":",
"pbar",
".",
"update",
"(",
"1",
")",
"self",
".",
"results",
".",
"append",
"(",
"result",
")",
"return",
"inner"
] | [
251,
4
] | [
259,
20
] | python | en | ['en', 'error', 'th'] | False |
ProgressParallelPreprocessor.transform | (self, fileids=None, categories=None) |
Setup the progress bar before conducting multiprocess transform.
|
Setup the progress bar before conducting multiprocess transform.
| def transform(self, fileids=None, categories=None):
"""
Setup the progress bar before conducting multiprocess transform.
"""
# Make the target directory if it doesn't already exist
if not os.path.exists(self.target):
os.makedirs(self.target)
# First shutil.copy anything in the root directory.
self.replicate(self.corpus.root)
# Reset the results
self.results = []
fileids = self.fileids(fileids, categories)
# Get the total corpus size for per byte counting and create pbar
with tqdm(total=len(fileids), unit='Docs') as pbar:
# Create a multiprocessing pool
pool = mp.Pool(processes=self.tasks)
tasks = [
pool.apply_async(self.process, (idx, fileid,), callback=self.on_result)
for idx, fileid in enumerate(self.fileids(fileids, categories))
]
# Close the pool and join
pool.close()
pool.join()
return self.results | [
"def",
"transform",
"(",
"self",
",",
"fileids",
"=",
"None",
",",
"categories",
"=",
"None",
")",
":",
"# Make the target directory if it doesn't already exist",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"target",
")",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"target",
")",
"# First shutil.copy anything in the root directory.",
"self",
".",
"replicate",
"(",
"self",
".",
"corpus",
".",
"root",
")",
"# Reset the results",
"self",
".",
"results",
"=",
"[",
"]",
"fileids",
"=",
"self",
".",
"fileids",
"(",
"fileids",
",",
"categories",
")",
"# Get the total corpus size for per byte counting and create pbar",
"with",
"tqdm",
"(",
"total",
"=",
"len",
"(",
"fileids",
")",
",",
"unit",
"=",
"'Docs'",
")",
"as",
"pbar",
":",
"# Create a multiprocessing pool",
"pool",
"=",
"mp",
".",
"Pool",
"(",
"processes",
"=",
"self",
".",
"tasks",
")",
"tasks",
"=",
"[",
"pool",
".",
"apply_async",
"(",
"self",
".",
"process",
",",
"(",
"idx",
",",
"fileid",
",",
")",
",",
"callback",
"=",
"self",
".",
"on_result",
")",
"for",
"idx",
",",
"fileid",
"in",
"enumerate",
"(",
"self",
".",
"fileids",
"(",
"fileids",
",",
"categories",
")",
")",
"]",
"# Close the pool and join",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"return",
"self",
".",
"results"
] | [
261,
4
] | [
291,
27
] | python | en | ['en', 'error', 'th'] | False |
test_init | (hass) | Test initial config. | Test initial config. | async def test_init(hass):
"""Test initial config."""
channels = MagicMock()
channels.get_by_id.return_value = CHANNEL_OBJECT
streams = MagicMock()
streams.get_stream_by_user.return_value = None
twitch_mock = MagicMock()
twitch_mock.users.translate_usernames_to_ids.return_value = [USER_ID]
twitch_mock.channels = channels
twitch_mock.streams = streams
with patch(
"homeassistant.components.twitch.sensor.TwitchClient", return_value=twitch_mock
):
assert await async_setup_component(hass, sensor.DOMAIN, CONFIG) is True
await hass.async_block_till_done()
sensor_state = hass.states.get(ENTITY_ID)
assert sensor_state.state == "offline"
assert sensor_state.name == "channel123"
assert sensor_state.attributes["icon"] == "mdi:twitch"
assert sensor_state.attributes["friendly_name"] == "channel123"
assert sensor_state.attributes["views"] == 24
assert sensor_state.attributes["followers"] == 42 | [
"async",
"def",
"test_init",
"(",
"hass",
")",
":",
"channels",
"=",
"MagicMock",
"(",
")",
"channels",
".",
"get_by_id",
".",
"return_value",
"=",
"CHANNEL_OBJECT",
"streams",
"=",
"MagicMock",
"(",
")",
"streams",
".",
"get_stream_by_user",
".",
"return_value",
"=",
"None",
"twitch_mock",
"=",
"MagicMock",
"(",
")",
"twitch_mock",
".",
"users",
".",
"translate_usernames_to_ids",
".",
"return_value",
"=",
"[",
"USER_ID",
"]",
"twitch_mock",
".",
"channels",
"=",
"channels",
"twitch_mock",
".",
"streams",
"=",
"streams",
"with",
"patch",
"(",
"\"homeassistant.components.twitch.sensor.TwitchClient\"",
",",
"return_value",
"=",
"twitch_mock",
")",
":",
"assert",
"await",
"async_setup_component",
"(",
"hass",
",",
"sensor",
".",
"DOMAIN",
",",
"CONFIG",
")",
"is",
"True",
"await",
"hass",
".",
"async_block_till_done",
"(",
")",
"sensor_state",
"=",
"hass",
".",
"states",
".",
"get",
"(",
"ENTITY_ID",
")",
"assert",
"sensor_state",
".",
"state",
"==",
"\"offline\"",
"assert",
"sensor_state",
".",
"name",
"==",
"\"channel123\"",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"icon\"",
"]",
"==",
"\"mdi:twitch\"",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"friendly_name\"",
"]",
"==",
"\"channel123\"",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"views\"",
"]",
"==",
"24",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"followers\"",
"]",
"==",
"42"
] | [
40,
0
] | [
65,
53
] | python | en | ['en', 'en', 'en'] | True |
test_offline | (hass) | Test offline state. | Test offline state. | async def test_offline(hass):
"""Test offline state."""
twitch_mock = MagicMock()
twitch_mock.users.translate_usernames_to_ids.return_value = [USER_ID]
twitch_mock.channels.get_by_id.return_value = CHANNEL_OBJECT
twitch_mock.streams.get_stream_by_user.return_value = None
with patch(
"homeassistant.components.twitch.sensor.TwitchClient",
return_value=twitch_mock,
):
assert await async_setup_component(hass, sensor.DOMAIN, CONFIG) is True
await hass.async_block_till_done()
sensor_state = hass.states.get(ENTITY_ID)
assert sensor_state.state == "offline"
assert sensor_state.attributes["entity_picture"] == "logo.png" | [
"async",
"def",
"test_offline",
"(",
"hass",
")",
":",
"twitch_mock",
"=",
"MagicMock",
"(",
")",
"twitch_mock",
".",
"users",
".",
"translate_usernames_to_ids",
".",
"return_value",
"=",
"[",
"USER_ID",
"]",
"twitch_mock",
".",
"channels",
".",
"get_by_id",
".",
"return_value",
"=",
"CHANNEL_OBJECT",
"twitch_mock",
".",
"streams",
".",
"get_stream_by_user",
".",
"return_value",
"=",
"None",
"with",
"patch",
"(",
"\"homeassistant.components.twitch.sensor.TwitchClient\"",
",",
"return_value",
"=",
"twitch_mock",
",",
")",
":",
"assert",
"await",
"async_setup_component",
"(",
"hass",
",",
"sensor",
".",
"DOMAIN",
",",
"CONFIG",
")",
"is",
"True",
"await",
"hass",
".",
"async_block_till_done",
"(",
")",
"sensor_state",
"=",
"hass",
".",
"states",
".",
"get",
"(",
"ENTITY_ID",
")",
"assert",
"sensor_state",
".",
"state",
"==",
"\"offline\"",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"entity_picture\"",
"]",
"==",
"\"logo.png\""
] | [
68,
0
] | [
85,
66
] | python | en | ['en', 'de', 'en'] | True |
test_streaming | (hass) | Test streaming state. | Test streaming state. | async def test_streaming(hass):
"""Test streaming state."""
twitch_mock = MagicMock()
twitch_mock.users.translate_usernames_to_ids.return_value = [USER_ID]
twitch_mock.channels.get_by_id.return_value = CHANNEL_OBJECT
twitch_mock.streams.get_stream_by_user.return_value = STREAM_OBJECT_ONLINE
with patch(
"homeassistant.components.twitch.sensor.TwitchClient",
return_value=twitch_mock,
):
assert await async_setup_component(hass, sensor.DOMAIN, CONFIG) is True
await hass.async_block_till_done()
sensor_state = hass.states.get(ENTITY_ID)
assert sensor_state.state == "streaming"
assert sensor_state.attributes["entity_picture"] == "stream-medium.png"
assert sensor_state.attributes["game"] == "Good Game"
assert sensor_state.attributes["title"] == "Title" | [
"async",
"def",
"test_streaming",
"(",
"hass",
")",
":",
"twitch_mock",
"=",
"MagicMock",
"(",
")",
"twitch_mock",
".",
"users",
".",
"translate_usernames_to_ids",
".",
"return_value",
"=",
"[",
"USER_ID",
"]",
"twitch_mock",
".",
"channels",
".",
"get_by_id",
".",
"return_value",
"=",
"CHANNEL_OBJECT",
"twitch_mock",
".",
"streams",
".",
"get_stream_by_user",
".",
"return_value",
"=",
"STREAM_OBJECT_ONLINE",
"with",
"patch",
"(",
"\"homeassistant.components.twitch.sensor.TwitchClient\"",
",",
"return_value",
"=",
"twitch_mock",
",",
")",
":",
"assert",
"await",
"async_setup_component",
"(",
"hass",
",",
"sensor",
".",
"DOMAIN",
",",
"CONFIG",
")",
"is",
"True",
"await",
"hass",
".",
"async_block_till_done",
"(",
")",
"sensor_state",
"=",
"hass",
".",
"states",
".",
"get",
"(",
"ENTITY_ID",
")",
"assert",
"sensor_state",
".",
"state",
"==",
"\"streaming\"",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"entity_picture\"",
"]",
"==",
"\"stream-medium.png\"",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"game\"",
"]",
"==",
"\"Good Game\"",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"title\"",
"]",
"==",
"\"Title\""
] | [
88,
0
] | [
107,
54
] | python | en | ['en', 'en', 'en'] | True |
test_oauth_without_sub_and_follow | (hass) | Test state with oauth. | Test state with oauth. | async def test_oauth_without_sub_and_follow(hass):
"""Test state with oauth."""
twitch_mock = MagicMock()
twitch_mock.users.translate_usernames_to_ids.return_value = [USER_ID]
twitch_mock.channels.get_by_id.return_value = CHANNEL_OBJECT
twitch_mock._oauth_token = True # A replacement for the token
twitch_mock.users.get.return_value = OAUTH_USER_ID
twitch_mock.users.check_subscribed_to_channel.side_effect = HTTPError()
twitch_mock.users.check_follows_channel.side_effect = HTTPError()
with patch(
"homeassistant.components.twitch.sensor.TwitchClient",
return_value=twitch_mock,
):
assert await async_setup_component(hass, sensor.DOMAIN, CONFIG_WITH_OAUTH)
await hass.async_block_till_done()
sensor_state = hass.states.get(ENTITY_ID)
assert sensor_state.attributes["subscribed"] is False
assert sensor_state.attributes["following"] is False | [
"async",
"def",
"test_oauth_without_sub_and_follow",
"(",
"hass",
")",
":",
"twitch_mock",
"=",
"MagicMock",
"(",
")",
"twitch_mock",
".",
"users",
".",
"translate_usernames_to_ids",
".",
"return_value",
"=",
"[",
"USER_ID",
"]",
"twitch_mock",
".",
"channels",
".",
"get_by_id",
".",
"return_value",
"=",
"CHANNEL_OBJECT",
"twitch_mock",
".",
"_oauth_token",
"=",
"True",
"# A replacement for the token",
"twitch_mock",
".",
"users",
".",
"get",
".",
"return_value",
"=",
"OAUTH_USER_ID",
"twitch_mock",
".",
"users",
".",
"check_subscribed_to_channel",
".",
"side_effect",
"=",
"HTTPError",
"(",
")",
"twitch_mock",
".",
"users",
".",
"check_follows_channel",
".",
"side_effect",
"=",
"HTTPError",
"(",
")",
"with",
"patch",
"(",
"\"homeassistant.components.twitch.sensor.TwitchClient\"",
",",
"return_value",
"=",
"twitch_mock",
",",
")",
":",
"assert",
"await",
"async_setup_component",
"(",
"hass",
",",
"sensor",
".",
"DOMAIN",
",",
"CONFIG_WITH_OAUTH",
")",
"await",
"hass",
".",
"async_block_till_done",
"(",
")",
"sensor_state",
"=",
"hass",
".",
"states",
".",
"get",
"(",
"ENTITY_ID",
")",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"subscribed\"",
"]",
"is",
"False",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"following\"",
"]",
"is",
"False"
] | [
110,
0
] | [
130,
56
] | python | en | ['en', 'en', 'en'] | True |
test_oauth_with_sub | (hass) | Test state with oauth and sub. | Test state with oauth and sub. | async def test_oauth_with_sub(hass):
"""Test state with oauth and sub."""
twitch_mock = MagicMock()
twitch_mock.users.translate_usernames_to_ids.return_value = [USER_ID]
twitch_mock.channels.get_by_id.return_value = CHANNEL_OBJECT
twitch_mock._oauth_token = True # A replacement for the token
twitch_mock.users.get.return_value = OAUTH_USER_ID
twitch_mock.users.check_subscribed_to_channel.return_value = SUB_ACTIVE
twitch_mock.users.check_follows_channel.side_effect = HTTPError()
with patch(
"homeassistant.components.twitch.sensor.TwitchClient",
return_value=twitch_mock,
):
assert await async_setup_component(hass, sensor.DOMAIN, CONFIG_WITH_OAUTH)
await hass.async_block_till_done()
sensor_state = hass.states.get(ENTITY_ID)
assert sensor_state.attributes["subscribed"] is True
assert sensor_state.attributes["subscribed_since"] == "2020-01-20T21:22:42"
assert sensor_state.attributes["subscription_is_gifted"] is False
assert sensor_state.attributes["following"] is False | [
"async",
"def",
"test_oauth_with_sub",
"(",
"hass",
")",
":",
"twitch_mock",
"=",
"MagicMock",
"(",
")",
"twitch_mock",
".",
"users",
".",
"translate_usernames_to_ids",
".",
"return_value",
"=",
"[",
"USER_ID",
"]",
"twitch_mock",
".",
"channels",
".",
"get_by_id",
".",
"return_value",
"=",
"CHANNEL_OBJECT",
"twitch_mock",
".",
"_oauth_token",
"=",
"True",
"# A replacement for the token",
"twitch_mock",
".",
"users",
".",
"get",
".",
"return_value",
"=",
"OAUTH_USER_ID",
"twitch_mock",
".",
"users",
".",
"check_subscribed_to_channel",
".",
"return_value",
"=",
"SUB_ACTIVE",
"twitch_mock",
".",
"users",
".",
"check_follows_channel",
".",
"side_effect",
"=",
"HTTPError",
"(",
")",
"with",
"patch",
"(",
"\"homeassistant.components.twitch.sensor.TwitchClient\"",
",",
"return_value",
"=",
"twitch_mock",
",",
")",
":",
"assert",
"await",
"async_setup_component",
"(",
"hass",
",",
"sensor",
".",
"DOMAIN",
",",
"CONFIG_WITH_OAUTH",
")",
"await",
"hass",
".",
"async_block_till_done",
"(",
")",
"sensor_state",
"=",
"hass",
".",
"states",
".",
"get",
"(",
"ENTITY_ID",
")",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"subscribed\"",
"]",
"is",
"True",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"subscribed_since\"",
"]",
"==",
"\"2020-01-20T21:22:42\"",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"subscription_is_gifted\"",
"]",
"is",
"False",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"following\"",
"]",
"is",
"False"
] | [
133,
0
] | [
155,
56
] | python | en | ['en', 'en', 'en'] | True |
test_oauth_with_follow | (hass) | Test state with oauth and follow. | Test state with oauth and follow. | async def test_oauth_with_follow(hass):
"""Test state with oauth and follow."""
twitch_mock = MagicMock()
twitch_mock.users.translate_usernames_to_ids.return_value = [USER_ID]
twitch_mock.channels.get_by_id.return_value = CHANNEL_OBJECT
twitch_mock._oauth_token = True # A replacement for the token
twitch_mock.users.get.return_value = OAUTH_USER_ID
twitch_mock.users.check_subscribed_to_channel.side_effect = HTTPError()
twitch_mock.users.check_follows_channel.return_value = FOLLOW_ACTIVE
with patch(
"homeassistant.components.twitch.sensor.TwitchClient",
return_value=twitch_mock,
):
assert await async_setup_component(hass, sensor.DOMAIN, CONFIG_WITH_OAUTH)
await hass.async_block_till_done()
sensor_state = hass.states.get(ENTITY_ID)
assert sensor_state.attributes["subscribed"] is False
assert sensor_state.attributes["following"] is True
assert sensor_state.attributes["following_since"] == "2020-01-20T21:22:42" | [
"async",
"def",
"test_oauth_with_follow",
"(",
"hass",
")",
":",
"twitch_mock",
"=",
"MagicMock",
"(",
")",
"twitch_mock",
".",
"users",
".",
"translate_usernames_to_ids",
".",
"return_value",
"=",
"[",
"USER_ID",
"]",
"twitch_mock",
".",
"channels",
".",
"get_by_id",
".",
"return_value",
"=",
"CHANNEL_OBJECT",
"twitch_mock",
".",
"_oauth_token",
"=",
"True",
"# A replacement for the token",
"twitch_mock",
".",
"users",
".",
"get",
".",
"return_value",
"=",
"OAUTH_USER_ID",
"twitch_mock",
".",
"users",
".",
"check_subscribed_to_channel",
".",
"side_effect",
"=",
"HTTPError",
"(",
")",
"twitch_mock",
".",
"users",
".",
"check_follows_channel",
".",
"return_value",
"=",
"FOLLOW_ACTIVE",
"with",
"patch",
"(",
"\"homeassistant.components.twitch.sensor.TwitchClient\"",
",",
"return_value",
"=",
"twitch_mock",
",",
")",
":",
"assert",
"await",
"async_setup_component",
"(",
"hass",
",",
"sensor",
".",
"DOMAIN",
",",
"CONFIG_WITH_OAUTH",
")",
"await",
"hass",
".",
"async_block_till_done",
"(",
")",
"sensor_state",
"=",
"hass",
".",
"states",
".",
"get",
"(",
"ENTITY_ID",
")",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"subscribed\"",
"]",
"is",
"False",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"following\"",
"]",
"is",
"True",
"assert",
"sensor_state",
".",
"attributes",
"[",
"\"following_since\"",
"]",
"==",
"\"2020-01-20T21:22:42\""
] | [
158,
0
] | [
179,
78
] | python | en | ['en', 'en', 'en'] | True |
_constfn | (val) |
Wrap as function
|
Wrap as function
| def _constfn(val):
"""
Wrap as function
"""
def f(_):
return val
return f | [
"def",
"_constfn",
"(",
"val",
")",
":",
"def",
"f",
"(",
"_",
")",
":",
"return",
"val",
"return",
"f"
] | [
26,
0
] | [
32,
12
] | python | en | ['en', 'error', 'th'] | False |
TrialsInfo.get_next | (self) |
Get actions of the next trial
|
Get actions of the next trial
| def get_next(self):
"""
Get actions of the next trial
"""
if self.iter >= self.inf_batch_size:
return None, None
actions = []
for step in self.actions:
actions.append(step[self.iter])
self.iter += 1
return self.iter - 1, actions | [
"def",
"get_next",
"(",
"self",
")",
":",
"if",
"self",
".",
"iter",
">=",
"self",
".",
"inf_batch_size",
":",
"return",
"None",
",",
"None",
"actions",
"=",
"[",
"]",
"for",
"step",
"in",
"self",
".",
"actions",
":",
"actions",
".",
"append",
"(",
"step",
"[",
"self",
".",
"iter",
"]",
")",
"self",
".",
"iter",
"+=",
"1",
"return",
"self",
".",
"iter",
"-",
"1",
",",
"actions"
] | [
78,
4
] | [
88,
37
] | python | en | ['en', 'error', 'th'] | False |
TrialsInfo.update_rewards | (self, rewards, returns) |
After the trial is finished, reward and return of this trial is updated
|
After the trial is finished, reward and return of this trial is updated
| def update_rewards(self, rewards, returns):
"""
After the trial is finished, reward and return of this trial is updated
"""
self.rewards = rewards
self.returns = returns | [
"def",
"update_rewards",
"(",
"self",
",",
"rewards",
",",
"returns",
")",
":",
"self",
".",
"rewards",
"=",
"rewards",
"self",
".",
"returns",
"=",
"returns"
] | [
90,
4
] | [
95,
30
] | python | en | ['en', 'error', 'th'] | False |
TrialsInfo.convert_shape | (self) |
Convert shape
|
Convert shape
| def convert_shape(self):
"""
Convert shape
"""
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
self.obs = sf01(self.obs)
self.returns = sf01(self.returns)
self.dones = sf01(self.dones)
self.actions = sf01(self.actions)
self.values = sf01(self.values)
self.neglogpacs = sf01(self.neglogpacs) | [
"def",
"convert_shape",
"(",
"self",
")",
":",
"def",
"sf01",
"(",
"arr",
")",
":",
"\"\"\"\n swap and then flatten axes 0 and 1\n \"\"\"",
"s",
"=",
"arr",
".",
"shape",
"return",
"arr",
".",
"swapaxes",
"(",
"0",
",",
"1",
")",
".",
"reshape",
"(",
"s",
"[",
"0",
"]",
"*",
"s",
"[",
"1",
"]",
",",
"*",
"s",
"[",
"2",
":",
"]",
")",
"self",
".",
"obs",
"=",
"sf01",
"(",
"self",
".",
"obs",
")",
"self",
".",
"returns",
"=",
"sf01",
"(",
"self",
".",
"returns",
")",
"self",
".",
"dones",
"=",
"sf01",
"(",
"self",
".",
"dones",
")",
"self",
".",
"actions",
"=",
"sf01",
"(",
"self",
".",
"actions",
")",
"self",
".",
"values",
"=",
"sf01",
"(",
"self",
".",
"values",
")",
"self",
".",
"neglogpacs",
"=",
"sf01",
"(",
"self",
".",
"neglogpacs",
")"
] | [
97,
4
] | [
112,
47
] | python | en | ['en', 'error', 'th'] | False |
PPOModel.inference | (self, num) |
Generate actions along with related info from policy network.
observation is the action of the last step.
Parameters
----------
num: int
The number of trials to generate
Returns
-------
mb_obs : list
Observation of the ``num`` configurations
mb_actions : list
Actions of the ``num`` configurations
mb_values : list
Values from the value function of the ``num`` configurations
mb_neglogpacs : list
``neglogp`` of the ``num`` configurations
mb_dones : list
To show whether the play is done, always ``True``
last_values : tensorflow tensor
The last values of the ``num`` configurations, got with session run
|
Generate actions along with related info from policy network.
observation is the action of the last step. | def inference(self, num):
"""
Generate actions along with related info from policy network.
observation is the action of the last step.
Parameters
----------
num: int
The number of trials to generate
Returns
-------
mb_obs : list
Observation of the ``num`` configurations
mb_actions : list
Actions of the ``num`` configurations
mb_values : list
Values from the value function of the ``num`` configurations
mb_neglogpacs : list
``neglogp`` of the ``num`` configurations
mb_dones : list
To show whether the play is done, always ``True``
last_values : tensorflow tensor
The last values of the ``num`` configurations, got with session run
"""
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], []
# initial observation
# use the (n+1)th embedding to represent the first step action
first_step_ob = self.model_config.action_space.n
obs = [first_step_ob for _ in range(num)]
dones = [True for _ in range(num)]
states = self.states
# For n in range number of steps
for cur_step in range(self.model_config.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, states, neglogpacs = self.model.step(cur_step, obs, S=states, M=dones)
mb_obs.append(obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
obs[:] = actions
if cur_step == self.model_config.nsteps - 1:
dones = [True for _ in range(num)]
else:
dones = [False for _ in range(num)]
#batch of steps to batch of rollouts
np_obs = np.asarray(obs)
mb_obs = np.asarray(mb_obs, dtype=np_obs.dtype)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(np_obs, S=states, M=dones)
return mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values | [
"def",
"inference",
"(",
"self",
",",
"num",
")",
":",
"# Here, we init the lists that will contain the mb of experiences",
"mb_obs",
",",
"mb_actions",
",",
"mb_values",
",",
"mb_dones",
",",
"mb_neglogpacs",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"# initial observation",
"# use the (n+1)th embedding to represent the first step action",
"first_step_ob",
"=",
"self",
".",
"model_config",
".",
"action_space",
".",
"n",
"obs",
"=",
"[",
"first_step_ob",
"for",
"_",
"in",
"range",
"(",
"num",
")",
"]",
"dones",
"=",
"[",
"True",
"for",
"_",
"in",
"range",
"(",
"num",
")",
"]",
"states",
"=",
"self",
".",
"states",
"# For n in range number of steps",
"for",
"cur_step",
"in",
"range",
"(",
"self",
".",
"model_config",
".",
"nsteps",
")",
":",
"# Given observations, get action value and neglopacs",
"# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init",
"actions",
",",
"values",
",",
"states",
",",
"neglogpacs",
"=",
"self",
".",
"model",
".",
"step",
"(",
"cur_step",
",",
"obs",
",",
"S",
"=",
"states",
",",
"M",
"=",
"dones",
")",
"mb_obs",
".",
"append",
"(",
"obs",
".",
"copy",
"(",
")",
")",
"mb_actions",
".",
"append",
"(",
"actions",
")",
"mb_values",
".",
"append",
"(",
"values",
")",
"mb_neglogpacs",
".",
"append",
"(",
"neglogpacs",
")",
"mb_dones",
".",
"append",
"(",
"dones",
")",
"# Take actions in env and look the results",
"# Infos contains a ton of useful informations",
"obs",
"[",
":",
"]",
"=",
"actions",
"if",
"cur_step",
"==",
"self",
".",
"model_config",
".",
"nsteps",
"-",
"1",
":",
"dones",
"=",
"[",
"True",
"for",
"_",
"in",
"range",
"(",
"num",
")",
"]",
"else",
":",
"dones",
"=",
"[",
"False",
"for",
"_",
"in",
"range",
"(",
"num",
")",
"]",
"#batch of steps to batch of rollouts",
"np_obs",
"=",
"np",
".",
"asarray",
"(",
"obs",
")",
"mb_obs",
"=",
"np",
".",
"asarray",
"(",
"mb_obs",
",",
"dtype",
"=",
"np_obs",
".",
"dtype",
")",
"mb_actions",
"=",
"np",
".",
"asarray",
"(",
"mb_actions",
")",
"mb_values",
"=",
"np",
".",
"asarray",
"(",
"mb_values",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"mb_neglogpacs",
"=",
"np",
".",
"asarray",
"(",
"mb_neglogpacs",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"mb_dones",
"=",
"np",
".",
"asarray",
"(",
"mb_dones",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"last_values",
"=",
"self",
".",
"model",
".",
"value",
"(",
"np_obs",
",",
"S",
"=",
"states",
",",
"M",
"=",
"dones",
")",
"return",
"mb_obs",
",",
"mb_actions",
",",
"mb_values",
",",
"mb_neglogpacs",
",",
"mb_dones",
",",
"last_values"
] | [
153,
4
] | [
214,
82
] | python | en | ['en', 'error', 'th'] | False |
PPOModel.compute_rewards | (self, trials_info, trials_result) |
Compute the rewards of the trials in trials_info based on trials_result,
and update the rewards in trials_info
Parameters
----------
trials_info : TrialsInfo
Info of the generated trials
trials_result : list
Final results (e.g., acc) of the generated trials
|
Compute the rewards of the trials in trials_info based on trials_result,
and update the rewards in trials_info | def compute_rewards(self, trials_info, trials_result):
"""
Compute the rewards of the trials in trials_info based on trials_result,
and update the rewards in trials_info
Parameters
----------
trials_info : TrialsInfo
Info of the generated trials
trials_result : list
Final results (e.g., acc) of the generated trials
"""
mb_rewards = np.asarray([trials_result for _ in trials_info.actions], dtype=np.float32)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
last_dones = np.asarray([True for _ in trials_result], dtype=np.bool) # ugly
for t in reversed(range(self.model_config.nsteps)):
if t == self.model_config.nsteps - 1:
nextnonterminal = 1.0 - last_dones
nextvalues = trials_info.last_value
else:
nextnonterminal = 1.0 - trials_info.dones[t+1]
nextvalues = trials_info.values[t+1]
delta = mb_rewards[t] + self.model_config.gamma * nextvalues * nextnonterminal - trials_info.values[t]
lastgaelam = delta + self.model_config.gamma * self.model_config.lam * nextnonterminal * lastgaelam
mb_advs[t] = lastgaelam # pylint: disable=unsupported-assignment-operation
mb_returns = mb_advs + trials_info.values
trials_info.update_rewards(mb_rewards, mb_returns)
trials_info.convert_shape() | [
"def",
"compute_rewards",
"(",
"self",
",",
"trials_info",
",",
"trials_result",
")",
":",
"mb_rewards",
"=",
"np",
".",
"asarray",
"(",
"[",
"trials_result",
"for",
"_",
"in",
"trials_info",
".",
"actions",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# discount/bootstrap off value fn",
"mb_returns",
"=",
"np",
".",
"zeros_like",
"(",
"mb_rewards",
")",
"mb_advs",
"=",
"np",
".",
"zeros_like",
"(",
"mb_rewards",
")",
"lastgaelam",
"=",
"0",
"last_dones",
"=",
"np",
".",
"asarray",
"(",
"[",
"True",
"for",
"_",
"in",
"trials_result",
"]",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"# ugly",
"for",
"t",
"in",
"reversed",
"(",
"range",
"(",
"self",
".",
"model_config",
".",
"nsteps",
")",
")",
":",
"if",
"t",
"==",
"self",
".",
"model_config",
".",
"nsteps",
"-",
"1",
":",
"nextnonterminal",
"=",
"1.0",
"-",
"last_dones",
"nextvalues",
"=",
"trials_info",
".",
"last_value",
"else",
":",
"nextnonterminal",
"=",
"1.0",
"-",
"trials_info",
".",
"dones",
"[",
"t",
"+",
"1",
"]",
"nextvalues",
"=",
"trials_info",
".",
"values",
"[",
"t",
"+",
"1",
"]",
"delta",
"=",
"mb_rewards",
"[",
"t",
"]",
"+",
"self",
".",
"model_config",
".",
"gamma",
"*",
"nextvalues",
"*",
"nextnonterminal",
"-",
"trials_info",
".",
"values",
"[",
"t",
"]",
"lastgaelam",
"=",
"delta",
"+",
"self",
".",
"model_config",
".",
"gamma",
"*",
"self",
".",
"model_config",
".",
"lam",
"*",
"nextnonterminal",
"*",
"lastgaelam",
"mb_advs",
"[",
"t",
"]",
"=",
"lastgaelam",
"# pylint: disable=unsupported-assignment-operation",
"mb_returns",
"=",
"mb_advs",
"+",
"trials_info",
".",
"values",
"trials_info",
".",
"update_rewards",
"(",
"mb_rewards",
",",
"mb_returns",
")",
"trials_info",
".",
"convert_shape",
"(",
")"
] | [
216,
4
] | [
247,
35
] | python | en | ['en', 'error', 'th'] | False |
PPOModel.train | (self, trials_info, nenvs) |
Train the policy/value network using trials_info
Parameters
----------
trials_info : TrialsInfo
Complete info of the generated trials from the previous inference
nenvs : int
The batch size of the (previous) inference
|
Train the policy/value network using trials_info | def train(self, trials_info, nenvs):
"""
Train the policy/value network using trials_info
Parameters
----------
trials_info : TrialsInfo
Complete info of the generated trials from the previous inference
nenvs : int
The batch size of the (previous) inference
"""
# keep frac decay for future optimization
if self.cur_update <= self.nupdates:
frac = 1.0 - (self.cur_update - 1.0) / self.nupdates
else:
logger.warning('current update (self.cur_update) %d has exceeded total updates (self.nupdates) %d',
self.cur_update, self.nupdates)
frac = 1.0 - (self.nupdates - 1.0) / self.nupdates
lrnow = self.lr(frac)
cliprangenow = self.cliprange(frac)
self.cur_update += 1
states = self.states
assert states is not None # recurrent version
assert nenvs % self.model_config.nminibatches == 0
envsperbatch = nenvs // self.model_config.nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * self.model_config.nsteps).reshape(nenvs, self.model_config.nsteps)
for _ in range(self.model_config.noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (trials_info.obs, trials_info.returns, trials_info.dones,
trials_info.actions, trials_info.values, trials_info.neglogpacs))
mbstates = states[mbenvinds]
self.model.train(lrnow, cliprangenow, *slices, mbstates) | [
"def",
"train",
"(",
"self",
",",
"trials_info",
",",
"nenvs",
")",
":",
"# keep frac decay for future optimization",
"if",
"self",
".",
"cur_update",
"<=",
"self",
".",
"nupdates",
":",
"frac",
"=",
"1.0",
"-",
"(",
"self",
".",
"cur_update",
"-",
"1.0",
")",
"/",
"self",
".",
"nupdates",
"else",
":",
"logger",
".",
"warning",
"(",
"'current update (self.cur_update) %d has exceeded total updates (self.nupdates) %d'",
",",
"self",
".",
"cur_update",
",",
"self",
".",
"nupdates",
")",
"frac",
"=",
"1.0",
"-",
"(",
"self",
".",
"nupdates",
"-",
"1.0",
")",
"/",
"self",
".",
"nupdates",
"lrnow",
"=",
"self",
".",
"lr",
"(",
"frac",
")",
"cliprangenow",
"=",
"self",
".",
"cliprange",
"(",
"frac",
")",
"self",
".",
"cur_update",
"+=",
"1",
"states",
"=",
"self",
".",
"states",
"assert",
"states",
"is",
"not",
"None",
"# recurrent version",
"assert",
"nenvs",
"%",
"self",
".",
"model_config",
".",
"nminibatches",
"==",
"0",
"envsperbatch",
"=",
"nenvs",
"//",
"self",
".",
"model_config",
".",
"nminibatches",
"envinds",
"=",
"np",
".",
"arange",
"(",
"nenvs",
")",
"flatinds",
"=",
"np",
".",
"arange",
"(",
"nenvs",
"*",
"self",
".",
"model_config",
".",
"nsteps",
")",
".",
"reshape",
"(",
"nenvs",
",",
"self",
".",
"model_config",
".",
"nsteps",
")",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"model_config",
".",
"noptepochs",
")",
":",
"np",
".",
"random",
".",
"shuffle",
"(",
"envinds",
")",
"for",
"start",
"in",
"range",
"(",
"0",
",",
"nenvs",
",",
"envsperbatch",
")",
":",
"end",
"=",
"start",
"+",
"envsperbatch",
"mbenvinds",
"=",
"envinds",
"[",
"start",
":",
"end",
"]",
"mbflatinds",
"=",
"flatinds",
"[",
"mbenvinds",
"]",
".",
"ravel",
"(",
")",
"slices",
"=",
"(",
"arr",
"[",
"mbflatinds",
"]",
"for",
"arr",
"in",
"(",
"trials_info",
".",
"obs",
",",
"trials_info",
".",
"returns",
",",
"trials_info",
".",
"dones",
",",
"trials_info",
".",
"actions",
",",
"trials_info",
".",
"values",
",",
"trials_info",
".",
"neglogpacs",
")",
")",
"mbstates",
"=",
"states",
"[",
"mbenvinds",
"]",
"self",
".",
"model",
".",
"train",
"(",
"lrnow",
",",
"cliprangenow",
",",
"*",
"slices",
",",
"mbstates",
")"
] | [
249,
4
] | [
287,
72
] | python | en | ['en', 'error', 'th'] | False |
PPOTuner.__init__ | (self, optimize_mode, trials_per_update=20, epochs_per_update=4, minibatch_size=4,
ent_coef=0.0, lr=3e-4, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, cliprange=0.2) |
Initialization, PPO model is not initialized here as search space is not received yet.
Parameters
----------
optimize_mode : str
maximize or minimize
trials_per_update : int
Number of trials to have for each model update
epochs_per_update : int
Number of epochs to run for each model update
minibatch_size : int
Minibatch size (number of trials) for the update
ent_coef : float
Policy entropy coefficient in the optimization objective
lr : float
Learning rate of the model (lstm network), constant
vf_coef : float
Value function loss coefficient in the optimization objective
max_grad_norm : float
Gradient norm clipping coefficient
gamma : float
Discounting factor
lam : float
Advantage estimation discounting factor (lambda in the paper)
cliprange : float
Cliprange in the PPO algorithm, constant
|
Initialization, PPO model is not initialized here as search space is not received yet. | def __init__(self, optimize_mode, trials_per_update=20, epochs_per_update=4, minibatch_size=4,
ent_coef=0.0, lr=3e-4, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, cliprange=0.2):
"""
Initialization, PPO model is not initialized here as search space is not received yet.
Parameters
----------
optimize_mode : str
maximize or minimize
trials_per_update : int
Number of trials to have for each model update
epochs_per_update : int
Number of epochs to run for each model update
minibatch_size : int
Minibatch size (number of trials) for the update
ent_coef : float
Policy entropy coefficient in the optimization objective
lr : float
Learning rate of the model (lstm network), constant
vf_coef : float
Value function loss coefficient in the optimization objective
max_grad_norm : float
Gradient norm clipping coefficient
gamma : float
Discounting factor
lam : float
Advantage estimation discounting factor (lambda in the paper)
cliprange : float
Cliprange in the PPO algorithm, constant
"""
self.optimize_mode = OptimizeMode(optimize_mode)
self.model_config = ModelConfig()
self.model = None
self.search_space = None
self.running_trials = {} # key: parameter_id, value: actions/states/etc.
self.inf_batch_size = trials_per_update # number of trials to generate in one inference
self.first_inf = True # indicate whether it is the first time to inference new trials
self.trials_result = [None for _ in range(self.inf_batch_size)] # results of finished trials
self.credit = 0 # record the unsatisfied trial requests
self.param_ids = []
self.finished_trials = 0
self.chosen_arch_template = {}
self.actions_spaces = None
self.actions_to_config = None
self.full_act_space = None
self.trials_info = None
self.all_trials = {} # used to dedup the same trial, key: config, value: final result
self.model_config.num_envs = self.inf_batch_size
self.model_config.noptepochs = epochs_per_update
self.model_config.nminibatches = minibatch_size
self.send_trial_callback = None
logger.info('Finished PPOTuner initialization') | [
"def",
"__init__",
"(",
"self",
",",
"optimize_mode",
",",
"trials_per_update",
"=",
"20",
",",
"epochs_per_update",
"=",
"4",
",",
"minibatch_size",
"=",
"4",
",",
"ent_coef",
"=",
"0.0",
",",
"lr",
"=",
"3e-4",
",",
"vf_coef",
"=",
"0.5",
",",
"max_grad_norm",
"=",
"0.5",
",",
"gamma",
"=",
"0.99",
",",
"lam",
"=",
"0.95",
",",
"cliprange",
"=",
"0.2",
")",
":",
"self",
".",
"optimize_mode",
"=",
"OptimizeMode",
"(",
"optimize_mode",
")",
"self",
".",
"model_config",
"=",
"ModelConfig",
"(",
")",
"self",
".",
"model",
"=",
"None",
"self",
".",
"search_space",
"=",
"None",
"self",
".",
"running_trials",
"=",
"{",
"}",
"# key: parameter_id, value: actions/states/etc.",
"self",
".",
"inf_batch_size",
"=",
"trials_per_update",
"# number of trials to generate in one inference",
"self",
".",
"first_inf",
"=",
"True",
"# indicate whether it is the first time to inference new trials",
"self",
".",
"trials_result",
"=",
"[",
"None",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"inf_batch_size",
")",
"]",
"# results of finished trials",
"self",
".",
"credit",
"=",
"0",
"# record the unsatisfied trial requests",
"self",
".",
"param_ids",
"=",
"[",
"]",
"self",
".",
"finished_trials",
"=",
"0",
"self",
".",
"chosen_arch_template",
"=",
"{",
"}",
"self",
".",
"actions_spaces",
"=",
"None",
"self",
".",
"actions_to_config",
"=",
"None",
"self",
".",
"full_act_space",
"=",
"None",
"self",
".",
"trials_info",
"=",
"None",
"self",
".",
"all_trials",
"=",
"{",
"}",
"# used to dedup the same trial, key: config, value: final result",
"self",
".",
"model_config",
".",
"num_envs",
"=",
"self",
".",
"inf_batch_size",
"self",
".",
"model_config",
".",
"noptepochs",
"=",
"epochs_per_update",
"self",
".",
"model_config",
".",
"nminibatches",
"=",
"minibatch_size",
"self",
".",
"send_trial_callback",
"=",
"None",
"logger",
".",
"info",
"(",
"'Finished PPOTuner initialization'",
")"
] | [
312,
4
] | [
368,
55
] | python | en | ['en', 'error', 'th'] | False |
PPOTuner._generate_action_mask | (self) |
Different step could have different action space. to deal with this case, we merge all the
possible actions into one action space, and use mask to indicate available actions for each step
|
Different step could have different action space. to deal with this case, we merge all the
possible actions into one action space, and use mask to indicate available actions for each step
| def _generate_action_mask(self):
"""
Different step could have different action space. to deal with this case, we merge all the
possible actions into one action space, and use mask to indicate available actions for each step
"""
two_masks = []
mask = []
for acts in self.actions_spaces:
one_mask = [0 for _ in range(len(self.full_act_space))]
for act in acts:
idx = self.full_act_space.index(act)
one_mask[idx] = 1
mask.append(one_mask)
two_masks.append(mask)
mask = []
for acts in self.actions_spaces:
one_mask = [-np.inf for _ in range(len(self.full_act_space))]
for act in acts:
idx = self.full_act_space.index(act)
one_mask[idx] = 0
mask.append(one_mask)
two_masks.append(mask)
return np.asarray(two_masks, dtype=np.float32) | [
"def",
"_generate_action_mask",
"(",
"self",
")",
":",
"two_masks",
"=",
"[",
"]",
"mask",
"=",
"[",
"]",
"for",
"acts",
"in",
"self",
".",
"actions_spaces",
":",
"one_mask",
"=",
"[",
"0",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"full_act_space",
")",
")",
"]",
"for",
"act",
"in",
"acts",
":",
"idx",
"=",
"self",
".",
"full_act_space",
".",
"index",
"(",
"act",
")",
"one_mask",
"[",
"idx",
"]",
"=",
"1",
"mask",
".",
"append",
"(",
"one_mask",
")",
"two_masks",
".",
"append",
"(",
"mask",
")",
"mask",
"=",
"[",
"]",
"for",
"acts",
"in",
"self",
".",
"actions_spaces",
":",
"one_mask",
"=",
"[",
"-",
"np",
".",
"inf",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"full_act_space",
")",
")",
"]",
"for",
"act",
"in",
"acts",
":",
"idx",
"=",
"self",
".",
"full_act_space",
".",
"index",
"(",
"act",
")",
"one_mask",
"[",
"idx",
"]",
"=",
"0",
"mask",
".",
"append",
"(",
"one_mask",
")",
"two_masks",
".",
"append",
"(",
"mask",
")",
"return",
"np",
".",
"asarray",
"(",
"two_masks",
",",
"dtype",
"=",
"np",
".",
"float32",
")"
] | [
410,
4
] | [
435,
54
] | python | en | ['en', 'error', 'th'] | False |
PPOTuner.update_search_space | (self, search_space) |
Get search space, currently the space only includes that for NAS
Parameters
----------
search_space : dict
Search space for NAS
the format could be referred to search space spec (https://nni.readthedocs.io/en/latest/Tutorial/SearchSpaceSpec.html).
|
Get search space, currently the space only includes that for NAS | def update_search_space(self, search_space):
"""
Get search space, currently the space only includes that for NAS
Parameters
----------
search_space : dict
Search space for NAS
the format could be referred to search space spec (https://nni.readthedocs.io/en/latest/Tutorial/SearchSpaceSpec.html).
"""
logger.info('update search space %s', search_space)
assert self.search_space is None
self.search_space = search_space
assert self.model_config.observation_space is None
assert self.model_config.action_space is None
self.actions_spaces, self.actions_to_config, self.full_act_space, obs_space, nsteps = self._process_nas_space(search_space)
self.model_config.observation_space = spaces.Discrete(obs_space)
self.model_config.action_space = spaces.Discrete(obs_space)
self.model_config.nsteps = nsteps
# generate mask in numpy
mask = self._generate_action_mask()
assert self.model is None
self.model = PPOModel(self.model_config, mask) | [
"def",
"update_search_space",
"(",
"self",
",",
"search_space",
")",
":",
"logger",
".",
"info",
"(",
"'update search space %s'",
",",
"search_space",
")",
"assert",
"self",
".",
"search_space",
"is",
"None",
"self",
".",
"search_space",
"=",
"search_space",
"assert",
"self",
".",
"model_config",
".",
"observation_space",
"is",
"None",
"assert",
"self",
".",
"model_config",
".",
"action_space",
"is",
"None",
"self",
".",
"actions_spaces",
",",
"self",
".",
"actions_to_config",
",",
"self",
".",
"full_act_space",
",",
"obs_space",
",",
"nsteps",
"=",
"self",
".",
"_process_nas_space",
"(",
"search_space",
")",
"self",
".",
"model_config",
".",
"observation_space",
"=",
"spaces",
".",
"Discrete",
"(",
"obs_space",
")",
"self",
".",
"model_config",
".",
"action_space",
"=",
"spaces",
".",
"Discrete",
"(",
"obs_space",
")",
"self",
".",
"model_config",
".",
"nsteps",
"=",
"nsteps",
"# generate mask in numpy",
"mask",
"=",
"self",
".",
"_generate_action_mask",
"(",
")",
"assert",
"self",
".",
"model",
"is",
"None",
"self",
".",
"model",
"=",
"PPOModel",
"(",
"self",
".",
"model_config",
",",
"mask",
")"
] | [
437,
4
] | [
464,
54
] | python | en | ['en', 'error', 'th'] | False |
PPOTuner._actions_to_config | (self, actions) |
Given actions, to generate the corresponding trial configuration
|
Given actions, to generate the corresponding trial configuration
| def _actions_to_config(self, actions):
"""
Given actions, to generate the corresponding trial configuration
"""
chosen_arch = copy.deepcopy(self.chosen_arch_template)
for cnt, act in enumerate(actions):
act_name = self.full_act_space[act]
(_key, _type) = self.actions_to_config[cnt]
if _type == 'input_choice':
if act_name == 'None':
chosen_arch[_key] = {'_value': [], '_idx': []}
else:
candidates = self.search_space[_key]['_value']['candidates']
idx = candidates.index(act_name)
chosen_arch[_key] = {'_value': [act_name], '_idx': [idx]}
elif _type == 'layer_choice':
idx = self.search_space[_key]['_value'].index(act_name)
chosen_arch[_key] = {'_value': act_name, '_idx': idx}
else:
raise ValueError('unrecognized key: {0}'.format(_type))
return chosen_arch | [
"def",
"_actions_to_config",
"(",
"self",
",",
"actions",
")",
":",
"chosen_arch",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"chosen_arch_template",
")",
"for",
"cnt",
",",
"act",
"in",
"enumerate",
"(",
"actions",
")",
":",
"act_name",
"=",
"self",
".",
"full_act_space",
"[",
"act",
"]",
"(",
"_key",
",",
"_type",
")",
"=",
"self",
".",
"actions_to_config",
"[",
"cnt",
"]",
"if",
"_type",
"==",
"'input_choice'",
":",
"if",
"act_name",
"==",
"'None'",
":",
"chosen_arch",
"[",
"_key",
"]",
"=",
"{",
"'_value'",
":",
"[",
"]",
",",
"'_idx'",
":",
"[",
"]",
"}",
"else",
":",
"candidates",
"=",
"self",
".",
"search_space",
"[",
"_key",
"]",
"[",
"'_value'",
"]",
"[",
"'candidates'",
"]",
"idx",
"=",
"candidates",
".",
"index",
"(",
"act_name",
")",
"chosen_arch",
"[",
"_key",
"]",
"=",
"{",
"'_value'",
":",
"[",
"act_name",
"]",
",",
"'_idx'",
":",
"[",
"idx",
"]",
"}",
"elif",
"_type",
"==",
"'layer_choice'",
":",
"idx",
"=",
"self",
".",
"search_space",
"[",
"_key",
"]",
"[",
"'_value'",
"]",
".",
"index",
"(",
"act_name",
")",
"chosen_arch",
"[",
"_key",
"]",
"=",
"{",
"'_value'",
":",
"act_name",
",",
"'_idx'",
":",
"idx",
"}",
"else",
":",
"raise",
"ValueError",
"(",
"'unrecognized key: {0}'",
".",
"format",
"(",
"_type",
")",
")",
"return",
"chosen_arch"
] | [
466,
4
] | [
486,
26
] | python | en | ['en', 'error', 'th'] | False |
PPOTuner.generate_multiple_parameters | (self, parameter_id_list, **kwargs) |
Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects.
Parameters
----------
parameter_id_list : list of int
Unique identifiers for each set of requested hyper-parameters.
These will later be used in :meth:`receive_trial_result`.
**kwargs
Not used
Returns
-------
list
A list of newly generated configurations
|
Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects. | def generate_multiple_parameters(self, parameter_id_list, **kwargs):
"""
Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects.
Parameters
----------
parameter_id_list : list of int
Unique identifiers for each set of requested hyper-parameters.
These will later be used in :meth:`receive_trial_result`.
**kwargs
Not used
Returns
-------
list
A list of newly generated configurations
"""
result = []
self.send_trial_callback = kwargs['st_callback']
for parameter_id in parameter_id_list:
had_exception = False
try:
logger.debug("generating param for %s", parameter_id)
res = self.generate_parameters(parameter_id, **kwargs)
except nni.NoMoreTrialError:
had_exception = True
if not had_exception:
result.append(res)
return result | [
"def",
"generate_multiple_parameters",
"(",
"self",
",",
"parameter_id_list",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"[",
"]",
"self",
".",
"send_trial_callback",
"=",
"kwargs",
"[",
"'st_callback'",
"]",
"for",
"parameter_id",
"in",
"parameter_id_list",
":",
"had_exception",
"=",
"False",
"try",
":",
"logger",
".",
"debug",
"(",
"\"generating param for %s\"",
",",
"parameter_id",
")",
"res",
"=",
"self",
".",
"generate_parameters",
"(",
"parameter_id",
",",
"*",
"*",
"kwargs",
")",
"except",
"nni",
".",
"NoMoreTrialError",
":",
"had_exception",
"=",
"True",
"if",
"not",
"had_exception",
":",
"result",
".",
"append",
"(",
"res",
")",
"return",
"result"
] | [
488,
4
] | [
516,
21
] | python | en | ['en', 'error', 'th'] | False |
PPOTuner.generate_parameters | (self, parameter_id, **kwargs) |
Generate parameters, if no trial configration for now, self.credit plus 1 to send the config later
Parameters
----------
parameter_id : int
Unique identifier for requested hyper-parameters.
This will later be used in :meth:`receive_trial_result`.
**kwargs
Not used
Returns
-------
dict
One newly generated configuration
|
Generate parameters, if no trial configration for now, self.credit plus 1 to send the config later | def generate_parameters(self, parameter_id, **kwargs):
"""
Generate parameters, if no trial configration for now, self.credit plus 1 to send the config later
Parameters
----------
parameter_id : int
Unique identifier for requested hyper-parameters.
This will later be used in :meth:`receive_trial_result`.
**kwargs
Not used
Returns
-------
dict
One newly generated configuration
"""
if self.first_inf:
self.trials_result = [None for _ in range(self.inf_batch_size)]
mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values = self.model.inference(self.inf_batch_size)
self.trials_info = TrialsInfo(mb_obs, mb_actions, mb_values, mb_neglogpacs,
mb_dones, last_values, self.inf_batch_size)
self.first_inf = False
trial_info_idx, actions = self.trials_info.get_next()
if trial_info_idx is None:
logger.debug('Credit added by one in parameters request')
self.credit += 1
self.param_ids.append(parameter_id)
raise nni.NoMoreTrialError('no more parameters now.')
self.running_trials[parameter_id] = trial_info_idx
new_config = self._actions_to_config(actions)
return new_config | [
"def",
"generate_parameters",
"(",
"self",
",",
"parameter_id",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"first_inf",
":",
"self",
".",
"trials_result",
"=",
"[",
"None",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"inf_batch_size",
")",
"]",
"mb_obs",
",",
"mb_actions",
",",
"mb_values",
",",
"mb_neglogpacs",
",",
"mb_dones",
",",
"last_values",
"=",
"self",
".",
"model",
".",
"inference",
"(",
"self",
".",
"inf_batch_size",
")",
"self",
".",
"trials_info",
"=",
"TrialsInfo",
"(",
"mb_obs",
",",
"mb_actions",
",",
"mb_values",
",",
"mb_neglogpacs",
",",
"mb_dones",
",",
"last_values",
",",
"self",
".",
"inf_batch_size",
")",
"self",
".",
"first_inf",
"=",
"False",
"trial_info_idx",
",",
"actions",
"=",
"self",
".",
"trials_info",
".",
"get_next",
"(",
")",
"if",
"trial_info_idx",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"'Credit added by one in parameters request'",
")",
"self",
".",
"credit",
"+=",
"1",
"self",
".",
"param_ids",
".",
"append",
"(",
"parameter_id",
")",
"raise",
"nni",
".",
"NoMoreTrialError",
"(",
"'no more parameters now.'",
")",
"self",
".",
"running_trials",
"[",
"parameter_id",
"]",
"=",
"trial_info_idx",
"new_config",
"=",
"self",
".",
"_actions_to_config",
"(",
"actions",
")",
"return",
"new_config"
] | [
518,
4
] | [
552,
25
] | python | en | ['en', 'error', 'th'] | False |
PPOTuner._next_round_inference | (self) |
Run a inference to generate next batch of configurations
|
Run a inference to generate next batch of configurations
| def _next_round_inference(self):
"""
Run a inference to generate next batch of configurations
"""
logger.debug('Start next round inference...')
self.finished_trials = 0
self.model.compute_rewards(self.trials_info, self.trials_result)
self.model.train(self.trials_info, self.inf_batch_size)
self.running_trials = {}
# generate new trials
self.trials_result = [None for _ in range(self.inf_batch_size)]
mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values = self.model.inference(self.inf_batch_size)
self.trials_info = TrialsInfo(mb_obs, mb_actions,
mb_values, mb_neglogpacs,
mb_dones, last_values,
self.inf_batch_size)
logger.debug('Next round inference complete.')
# check credit and submit new trials
for _ in range(self.credit):
trial_info_idx, actions = self.trials_info.get_next()
if trial_info_idx is None:
logger.warning('No enough trial config, trials_per_update is suggested to be larger than trialConcurrency')
break
assert self.param_ids
param_id = self.param_ids.pop()
self.running_trials[param_id] = trial_info_idx
new_config = self._actions_to_config(actions)
self.send_trial_callback(param_id, new_config)
self.credit -= 1
logger.debug('Send new trial (%d, %s) for reducing credit', param_id, new_config) | [
"def",
"_next_round_inference",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Start next round inference...'",
")",
"self",
".",
"finished_trials",
"=",
"0",
"self",
".",
"model",
".",
"compute_rewards",
"(",
"self",
".",
"trials_info",
",",
"self",
".",
"trials_result",
")",
"self",
".",
"model",
".",
"train",
"(",
"self",
".",
"trials_info",
",",
"self",
".",
"inf_batch_size",
")",
"self",
".",
"running_trials",
"=",
"{",
"}",
"# generate new trials",
"self",
".",
"trials_result",
"=",
"[",
"None",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"inf_batch_size",
")",
"]",
"mb_obs",
",",
"mb_actions",
",",
"mb_values",
",",
"mb_neglogpacs",
",",
"mb_dones",
",",
"last_values",
"=",
"self",
".",
"model",
".",
"inference",
"(",
"self",
".",
"inf_batch_size",
")",
"self",
".",
"trials_info",
"=",
"TrialsInfo",
"(",
"mb_obs",
",",
"mb_actions",
",",
"mb_values",
",",
"mb_neglogpacs",
",",
"mb_dones",
",",
"last_values",
",",
"self",
".",
"inf_batch_size",
")",
"logger",
".",
"debug",
"(",
"'Next round inference complete.'",
")",
"# check credit and submit new trials",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"credit",
")",
":",
"trial_info_idx",
",",
"actions",
"=",
"self",
".",
"trials_info",
".",
"get_next",
"(",
")",
"if",
"trial_info_idx",
"is",
"None",
":",
"logger",
".",
"warning",
"(",
"'No enough trial config, trials_per_update is suggested to be larger than trialConcurrency'",
")",
"break",
"assert",
"self",
".",
"param_ids",
"param_id",
"=",
"self",
".",
"param_ids",
".",
"pop",
"(",
")",
"self",
".",
"running_trials",
"[",
"param_id",
"]",
"=",
"trial_info_idx",
"new_config",
"=",
"self",
".",
"_actions_to_config",
"(",
"actions",
")",
"self",
".",
"send_trial_callback",
"(",
"param_id",
",",
"new_config",
")",
"self",
".",
"credit",
"-=",
"1",
"logger",
".",
"debug",
"(",
"'Send new trial (%d, %s) for reducing credit'",
",",
"param_id",
",",
"new_config",
")"
] | [
554,
4
] | [
583,
93
] | python | en | ['en', 'error', 'th'] | False |
PPOTuner.receive_trial_result | (self, parameter_id, parameters, value, **kwargs) |
Receive trial's result. if the number of finished trials equals self.inf_batch_size, start the next update to
train the model.
Parameters
----------
parameter_id : int
Unique identifier of used hyper-parameters, same with :meth:`generate_parameters`.
parameters : dict
Hyper-parameters generated by :meth:`generate_parameters`.
value : dict
Result from trial (the return value of :func:`nni.report_final_result`).
|
Receive trial's result. if the number of finished trials equals self.inf_batch_size, start the next update to
train the model. | def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
"""
Receive trial's result. if the number of finished trials equals self.inf_batch_size, start the next update to
train the model.
Parameters
----------
parameter_id : int
Unique identifier of used hyper-parameters, same with :meth:`generate_parameters`.
parameters : dict
Hyper-parameters generated by :meth:`generate_parameters`.
value : dict
Result from trial (the return value of :func:`nni.report_final_result`).
"""
trial_info_idx = self.running_trials.pop(parameter_id, None)
assert trial_info_idx is not None
value = extract_scalar_reward(value)
if self.optimize_mode == OptimizeMode.Minimize:
value = -value
self.trials_result[trial_info_idx] = value
self.finished_trials += 1
logger.debug('receive_trial_result, parameter_id %d, trial_info_idx %d, finished_trials %d, inf_batch_size %d',
parameter_id, trial_info_idx, self.finished_trials, self.inf_batch_size)
if self.finished_trials == self.inf_batch_size:
logger.debug('Start next round inference in receive_trial_result')
self._next_round_inference() | [
"def",
"receive_trial_result",
"(",
"self",
",",
"parameter_id",
",",
"parameters",
",",
"value",
",",
"*",
"*",
"kwargs",
")",
":",
"trial_info_idx",
"=",
"self",
".",
"running_trials",
".",
"pop",
"(",
"parameter_id",
",",
"None",
")",
"assert",
"trial_info_idx",
"is",
"not",
"None",
"value",
"=",
"extract_scalar_reward",
"(",
"value",
")",
"if",
"self",
".",
"optimize_mode",
"==",
"OptimizeMode",
".",
"Minimize",
":",
"value",
"=",
"-",
"value",
"self",
".",
"trials_result",
"[",
"trial_info_idx",
"]",
"=",
"value",
"self",
".",
"finished_trials",
"+=",
"1",
"logger",
".",
"debug",
"(",
"'receive_trial_result, parameter_id %d, trial_info_idx %d, finished_trials %d, inf_batch_size %d'",
",",
"parameter_id",
",",
"trial_info_idx",
",",
"self",
".",
"finished_trials",
",",
"self",
".",
"inf_batch_size",
")",
"if",
"self",
".",
"finished_trials",
"==",
"self",
".",
"inf_batch_size",
":",
"logger",
".",
"debug",
"(",
"'Start next round inference in receive_trial_result'",
")",
"self",
".",
"_next_round_inference",
"(",
")"
] | [
585,
4
] | [
613,
40
] | python | en | ['en', 'error', 'th'] | False |
PPOTuner.trial_end | (self, parameter_id, success, **kwargs) |
To deal with trial failure. If a trial fails, it is popped out from ``self.running_trials``,
and the final result of this trial is assigned with the average of the finished trials.
Parameters
----------
parameter_id : int
Unique identifier for hyper-parameters used by this trial.
success : bool
True if the trial successfully completed; False if failed or terminated.
**kwargs
Not used
|
To deal with trial failure. If a trial fails, it is popped out from ``self.running_trials``,
and the final result of this trial is assigned with the average of the finished trials. | def trial_end(self, parameter_id, success, **kwargs):
"""
To deal with trial failure. If a trial fails, it is popped out from ``self.running_trials``,
and the final result of this trial is assigned with the average of the finished trials.
Parameters
----------
parameter_id : int
Unique identifier for hyper-parameters used by this trial.
success : bool
True if the trial successfully completed; False if failed or terminated.
**kwargs
Not used
"""
if not success:
if parameter_id not in self.running_trials:
logger.warning('The trial is failed, but self.running_trial does not have this trial')
return
trial_info_idx = self.running_trials.pop(parameter_id, None)
assert trial_info_idx is not None
# use mean of finished trials as the result of this failed trial
values = [val for val in self.trials_result if val is not None]
logger.warning('In trial_end, values: %s', values)
self.trials_result[trial_info_idx] = (sum(values) / len(values)) if values else 0
self.finished_trials += 1
if self.finished_trials == self.inf_batch_size:
logger.debug('Start next round inference in trial_end')
self._next_round_inference() | [
"def",
"trial_end",
"(",
"self",
",",
"parameter_id",
",",
"success",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"success",
":",
"if",
"parameter_id",
"not",
"in",
"self",
".",
"running_trials",
":",
"logger",
".",
"warning",
"(",
"'The trial is failed, but self.running_trial does not have this trial'",
")",
"return",
"trial_info_idx",
"=",
"self",
".",
"running_trials",
".",
"pop",
"(",
"parameter_id",
",",
"None",
")",
"assert",
"trial_info_idx",
"is",
"not",
"None",
"# use mean of finished trials as the result of this failed trial",
"values",
"=",
"[",
"val",
"for",
"val",
"in",
"self",
".",
"trials_result",
"if",
"val",
"is",
"not",
"None",
"]",
"logger",
".",
"warning",
"(",
"'In trial_end, values: %s'",
",",
"values",
")",
"self",
".",
"trials_result",
"[",
"trial_info_idx",
"]",
"=",
"(",
"sum",
"(",
"values",
")",
"/",
"len",
"(",
"values",
")",
")",
"if",
"values",
"else",
"0",
"self",
".",
"finished_trials",
"+=",
"1",
"if",
"self",
".",
"finished_trials",
"==",
"self",
".",
"inf_batch_size",
":",
"logger",
".",
"debug",
"(",
"'Start next round inference in trial_end'",
")",
"self",
".",
"_next_round_inference",
"(",
")"
] | [
615,
4
] | [
642,
44
] | python | en | ['en', 'error', 'th'] | False |
PPOTuner.import_data | (self, data) |
Import additional data for tuning, not supported yet.
Parameters
----------
data : list
A list of dictionarys, each of which has at least two keys, ``parameter`` and ``value``
|
Import additional data for tuning, not supported yet. | def import_data(self, data):
"""
Import additional data for tuning, not supported yet.
Parameters
----------
data : list
A list of dictionarys, each of which has at least two keys, ``parameter`` and ``value``
"""
logger.warning('PPOTuner cannot leverage imported data.') | [
"def",
"import_data",
"(",
"self",
",",
"data",
")",
":",
"logger",
".",
"warning",
"(",
"'PPOTuner cannot leverage imported data.'",
")"
] | [
644,
4
] | [
653,
65
] | python | en | ['en', 'error', 'th'] | False |
save_summaries | (summaries, path, original_document_name) | Write the summaries in fies that are prefixed by the original
files' name with the `_summary` appended.
Attributes:
original_document_names: List[string]
Name of the document that was summarized.
path: string
Path were the summaries will be written
summaries: List[string]
The summaries that we produced.
| Write the summaries in fies that are prefixed by the original
files' name with the `_summary` appended. | def save_summaries(summaries, path, original_document_name):
"""Write the summaries in fies that are prefixed by the original
files' name with the `_summary` appended.
Attributes:
original_document_names: List[string]
Name of the document that was summarized.
path: string
Path were the summaries will be written
summaries: List[string]
The summaries that we produced.
"""
for summary, document_name in zip(summaries, original_document_name):
# Prepare the summary file's name
if "." in document_name:
bare_document_name = ".".join(document_name.split(".")[:-1])
extension = document_name.split(".")[-1]
name = bare_document_name + "_summary." + extension
else:
name = document_name + "_summary"
file_path = os.path.join(path, name)
with open(file_path, "w") as output:
output.write(summary) | [
"def",
"save_summaries",
"(",
"summaries",
",",
"path",
",",
"original_document_name",
")",
":",
"for",
"summary",
",",
"document_name",
"in",
"zip",
"(",
"summaries",
",",
"original_document_name",
")",
":",
"# Prepare the summary file's name",
"if",
"\".\"",
"in",
"document_name",
":",
"bare_document_name",
"=",
"\".\"",
".",
"join",
"(",
"document_name",
".",
"split",
"(",
"\".\"",
")",
"[",
":",
"-",
"1",
"]",
")",
"extension",
"=",
"document_name",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
"name",
"=",
"bare_document_name",
"+",
"\"_summary.\"",
"+",
"extension",
"else",
":",
"name",
"=",
"document_name",
"+",
"\"_summary\"",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"name",
")",
"with",
"open",
"(",
"file_path",
",",
"\"w\"",
")",
"as",
"output",
":",
"output",
".",
"write",
"(",
"summary",
")"
] | [
100,
0
] | [
123,
33
] | python | en | ['en', 'en', 'en'] | True |
format_summary | (translation) | Transforms the output of the `from_batch` function
into nicely formatted summaries.
| Transforms the output of the `from_batch` function
into nicely formatted summaries.
| def format_summary(translation):
"""Transforms the output of the `from_batch` function
into nicely formatted summaries.
"""
raw_summary, _, _ = translation
summary = (
raw_summary.replace("[unused0]", "")
.replace("[unused3]", "")
.replace("[PAD]", "")
.replace("[unused1]", "")
.replace(r" +", " ")
.replace(" [unused2] ", ". ")
.replace("[unused2]", "")
.strip()
)
return summary | [
"def",
"format_summary",
"(",
"translation",
")",
":",
"raw_summary",
",",
"_",
",",
"_",
"=",
"translation",
"summary",
"=",
"(",
"raw_summary",
".",
"replace",
"(",
"\"[unused0]\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"[unused3]\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"[PAD]\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"[unused1]\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"r\" +\"",
",",
"\" \"",
")",
".",
"replace",
"(",
"\" [unused2] \"",
",",
"\". \"",
")",
".",
"replace",
"(",
"\"[unused2]\"",
",",
"\"\"",
")",
".",
"strip",
"(",
")",
")",
"return",
"summary"
] | [
126,
0
] | [
142,
18
] | python | en | ['en', 'en', 'en'] | True |
collate | (data, tokenizer, block_size, device) | Collate formats the data passed to the data loader.
In particular we tokenize the data batch after batch to avoid keeping them
all in memory. We output the data as a namedtuple to fit the original BertAbs's
API.
| Collate formats the data passed to the data loader. | def collate(data, tokenizer, block_size, device):
"""Collate formats the data passed to the data loader.
In particular we tokenize the data batch after batch to avoid keeping them
all in memory. We output the data as a namedtuple to fit the original BertAbs's
API.
"""
data = [x for x in data if not len(x[1]) == 0] # remove empty_files
names = [name for name, _, _ in data]
summaries = [" ".join(summary_list) for _, _, summary_list in data]
encoded_text = [encode_for_summarization(story, summary, tokenizer) for _, story, summary in data]
encoded_stories = torch.tensor(
[truncate_or_pad(story, block_size, tokenizer.pad_token_id) for story, _ in encoded_text]
)
encoder_token_type_ids = compute_token_type_ids(encoded_stories, tokenizer.cls_token_id)
encoder_mask = build_mask(encoded_stories, tokenizer.pad_token_id)
batch = Batch(
document_names=names,
batch_size=len(encoded_stories),
src=encoded_stories.to(device),
segs=encoder_token_type_ids.to(device),
mask_src=encoder_mask.to(device),
tgt_str=summaries,
)
return batch | [
"def",
"collate",
"(",
"data",
",",
"tokenizer",
",",
"block_size",
",",
"device",
")",
":",
"data",
"=",
"[",
"x",
"for",
"x",
"in",
"data",
"if",
"not",
"len",
"(",
"x",
"[",
"1",
"]",
")",
"==",
"0",
"]",
"# remove empty_files",
"names",
"=",
"[",
"name",
"for",
"name",
",",
"_",
",",
"_",
"in",
"data",
"]",
"summaries",
"=",
"[",
"\" \"",
".",
"join",
"(",
"summary_list",
")",
"for",
"_",
",",
"_",
",",
"summary_list",
"in",
"data",
"]",
"encoded_text",
"=",
"[",
"encode_for_summarization",
"(",
"story",
",",
"summary",
",",
"tokenizer",
")",
"for",
"_",
",",
"story",
",",
"summary",
"in",
"data",
"]",
"encoded_stories",
"=",
"torch",
".",
"tensor",
"(",
"[",
"truncate_or_pad",
"(",
"story",
",",
"block_size",
",",
"tokenizer",
".",
"pad_token_id",
")",
"for",
"story",
",",
"_",
"in",
"encoded_text",
"]",
")",
"encoder_token_type_ids",
"=",
"compute_token_type_ids",
"(",
"encoded_stories",
",",
"tokenizer",
".",
"cls_token_id",
")",
"encoder_mask",
"=",
"build_mask",
"(",
"encoded_stories",
",",
"tokenizer",
".",
"pad_token_id",
")",
"batch",
"=",
"Batch",
"(",
"document_names",
"=",
"names",
",",
"batch_size",
"=",
"len",
"(",
"encoded_stories",
")",
",",
"src",
"=",
"encoded_stories",
".",
"to",
"(",
"device",
")",
",",
"segs",
"=",
"encoder_token_type_ids",
".",
"to",
"(",
"device",
")",
",",
"mask_src",
"=",
"encoder_mask",
".",
"to",
"(",
"device",
")",
",",
"tgt_str",
"=",
"summaries",
",",
")",
"return",
"batch"
] | [
207,
0
] | [
234,
16
] | python | en | ['en', 'en', 'en'] | True |
decode_summary | (summary_tokens, tokenizer) | Decode the summary and return it in a format
suitable for evaluation.
| Decode the summary and return it in a format
suitable for evaluation.
| def decode_summary(summary_tokens, tokenizer):
"""Decode the summary and return it in a format
suitable for evaluation.
"""
summary_tokens = summary_tokens.to("cpu").numpy()
summary = tokenizer.decode(summary_tokens)
sentences = summary.split(".")
sentences = [s + "." for s in sentences]
return sentences | [
"def",
"decode_summary",
"(",
"summary_tokens",
",",
"tokenizer",
")",
":",
"summary_tokens",
"=",
"summary_tokens",
".",
"to",
"(",
"\"cpu\"",
")",
".",
"numpy",
"(",
")",
"summary",
"=",
"tokenizer",
".",
"decode",
"(",
"summary_tokens",
")",
"sentences",
"=",
"summary",
".",
"split",
"(",
"\".\"",
")",
"sentences",
"=",
"[",
"s",
"+",
"\".\"",
"for",
"s",
"in",
"sentences",
"]",
"return",
"sentences"
] | [
237,
0
] | [
245,
20
] | python | en | ['en', 'en', 'en'] | True |
main | () | The main function defines the interface with the users. | The main function defines the interface with the users. | def main():
"""The main function defines the interface with the users."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--documents_dir",
default=None,
type=str,
required=True,
help="The folder where the documents to summarize are located.",
)
parser.add_argument(
"--summaries_output_dir",
default=None,
type=str,
required=False,
help="The folder in wich the summaries should be written. Defaults to the folder where the documents are",
)
parser.add_argument(
"--compute_rouge",
default=False,
type=bool,
required=False,
help="Compute the ROUGE metrics during evaluation. Only available for the CNN/DailyMail dataset.",
)
# EVALUATION options
parser.add_argument(
"--no_cuda",
default=False,
type=bool,
help="Whether to force the execution on CPU.",
)
parser.add_argument(
"--batch_size",
default=4,
type=int,
help="Batch size per GPU/CPU for training.",
)
# BEAM SEARCH arguments
parser.add_argument(
"--min_length",
default=50,
type=int,
help="Minimum number of tokens for the summaries.",
)
parser.add_argument(
"--max_length",
default=200,
type=int,
help="Maixmum number of tokens for the summaries.",
)
parser.add_argument(
"--beam_size",
default=5,
type=int,
help="The number of beams to start with for each example.",
)
parser.add_argument(
"--alpha",
default=0.95,
type=float,
help="The value of alpha for the length penalty in the beam search.",
)
parser.add_argument(
"--block_trigram",
default=True,
type=bool,
help="Whether to block the existence of repeating trigrams in the text generated by beam search.",
)
args = parser.parse_args()
# Select device (distibuted not available)
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
# Check the existence of directories
if not args.summaries_output_dir:
args.summaries_output_dir = args.documents_dir
if not documents_dir_is_valid(args.documents_dir):
raise FileNotFoundError(
"We could not find the directory you specified for the documents to summarize, or it was empty. Please specify a valid path."
)
os.makedirs(args.summaries_output_dir, exist_ok=True)
evaluate(args) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"\"--documents_dir\"",
",",
"default",
"=",
"None",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"The folder where the documents to summarize are located.\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--summaries_output_dir\"",
",",
"default",
"=",
"None",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
",",
"help",
"=",
"\"The folder in wich the summaries should be written. Defaults to the folder where the documents are\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--compute_rouge\"",
",",
"default",
"=",
"False",
",",
"type",
"=",
"bool",
",",
"required",
"=",
"False",
",",
"help",
"=",
"\"Compute the ROUGE metrics during evaluation. Only available for the CNN/DailyMail dataset.\"",
",",
")",
"# EVALUATION options",
"parser",
".",
"add_argument",
"(",
"\"--no_cuda\"",
",",
"default",
"=",
"False",
",",
"type",
"=",
"bool",
",",
"help",
"=",
"\"Whether to force the execution on CPU.\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--batch_size\"",
",",
"default",
"=",
"4",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Batch size per GPU/CPU for training.\"",
",",
")",
"# BEAM SEARCH arguments",
"parser",
".",
"add_argument",
"(",
"\"--min_length\"",
",",
"default",
"=",
"50",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Minimum number of tokens for the summaries.\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--max_length\"",
",",
"default",
"=",
"200",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Maixmum number of tokens for the summaries.\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--beam_size\"",
",",
"default",
"=",
"5",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"The number of beams to start with for each example.\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--alpha\"",
",",
"default",
"=",
"0.95",
",",
"type",
"=",
"float",
",",
"help",
"=",
"\"The value of alpha for the length penalty in the beam search.\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--block_trigram\"",
",",
"default",
"=",
"True",
",",
"type",
"=",
"bool",
",",
"help",
"=",
"\"Whether to block the existence of repeating trigrams in the text generated by beam search.\"",
",",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"# Select device (distibuted not available)",
"args",
".",
"device",
"=",
"torch",
".",
"device",
"(",
"\"cuda\"",
"if",
"torch",
".",
"cuda",
".",
"is_available",
"(",
")",
"and",
"not",
"args",
".",
"no_cuda",
"else",
"\"cpu\"",
")",
"# Check the existence of directories",
"if",
"not",
"args",
".",
"summaries_output_dir",
":",
"args",
".",
"summaries_output_dir",
"=",
"args",
".",
"documents_dir",
"if",
"not",
"documents_dir_is_valid",
"(",
"args",
".",
"documents_dir",
")",
":",
"raise",
"FileNotFoundError",
"(",
"\"We could not find the directory you specified for the documents to summarize, or it was empty. Please specify a valid path.\"",
")",
"os",
".",
"makedirs",
"(",
"args",
".",
"summaries_output_dir",
",",
"exist_ok",
"=",
"True",
")",
"evaluate",
"(",
"args",
")"
] | [
248,
0
] | [
331,
18
] | python | en | ['en', 'en', 'en'] | True |
setup_platform | (hass, config, add_entities, discovery_info=None) | Set up Hive climate devices. | Set up Hive climate devices. | def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Hive climate devices."""
if discovery_info is None:
return
session = hass.data.get(DATA_HIVE)
devs = []
for dev in discovery_info:
devs.append(HiveClimateEntity(session, dev))
add_entities(devs) | [
"def",
"setup_platform",
"(",
"hass",
",",
"config",
",",
"add_entities",
",",
"discovery_info",
"=",
"None",
")",
":",
"if",
"discovery_info",
"is",
"None",
":",
"return",
"session",
"=",
"hass",
".",
"data",
".",
"get",
"(",
"DATA_HIVE",
")",
"devs",
"=",
"[",
"]",
"for",
"dev",
"in",
"discovery_info",
":",
"devs",
".",
"append",
"(",
"HiveClimateEntity",
"(",
"session",
",",
"dev",
")",
")",
"add_entities",
"(",
"devs",
")"
] | [
41,
0
] | [
50,
22
] | python | en | ['fr', 'en', 'en'] | True |
HiveClimateEntity.__init__ | (self, hive_session, hive_device) | Initialize the Climate device. | Initialize the Climate device. | def __init__(self, hive_session, hive_device):
"""Initialize the Climate device."""
super().__init__(hive_session, hive_device)
self.thermostat_node_id = hive_device["Thermostat_NodeID"] | [
"def",
"__init__",
"(",
"self",
",",
"hive_session",
",",
"hive_device",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"hive_session",
",",
"hive_device",
")",
"self",
".",
"thermostat_node_id",
"=",
"hive_device",
"[",
"\"Thermostat_NodeID\"",
"]"
] | [
56,
4
] | [
59,
66
] | python | en | ['en', 'en', 'en'] | True |
HiveClimateEntity.unique_id | (self) | Return unique ID of entity. | Return unique ID of entity. | def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id | [
"def",
"unique_id",
"(",
"self",
")",
":",
"return",
"self",
".",
"_unique_id"
] | [
62,
4
] | [
64,
30
] | python | en | ['en', 'cy', 'en'] | True |
HiveClimateEntity.device_info | (self) | Return device information. | Return device information. | def device_info(self):
"""Return device information."""
return {"identifiers": {(DOMAIN, self.unique_id)}, "name": self.name} | [
"def",
"device_info",
"(",
"self",
")",
":",
"return",
"{",
"\"identifiers\"",
":",
"{",
"(",
"DOMAIN",
",",
"self",
".",
"unique_id",
")",
"}",
",",
"\"name\"",
":",
"self",
".",
"name",
"}"
] | [
67,
4
] | [
69,
77
] | python | da | ['es', 'da', 'en'] | False |
HiveClimateEntity.supported_features | (self) | Return the list of supported features. | Return the list of supported features. | def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS | [
"def",
"supported_features",
"(",
"self",
")",
":",
"return",
"SUPPORT_FLAGS"
] | [
72,
4
] | [
74,
28
] | python | en | ['en', 'en', 'en'] | True |
HiveClimateEntity.name | (self) | Return the name of the Climate device. | Return the name of the Climate device. | def name(self):
"""Return the name of the Climate device."""
friendly_name = "Heating"
if self.node_name is not None:
if self.device_type == "TRV":
friendly_name = self.node_name
else:
friendly_name = f"{self.node_name} {friendly_name}"
return friendly_name | [
"def",
"name",
"(",
"self",
")",
":",
"friendly_name",
"=",
"\"Heating\"",
"if",
"self",
".",
"node_name",
"is",
"not",
"None",
":",
"if",
"self",
".",
"device_type",
"==",
"\"TRV\"",
":",
"friendly_name",
"=",
"self",
".",
"node_name",
"else",
":",
"friendly_name",
"=",
"f\"{self.node_name} {friendly_name}\"",
"return",
"friendly_name"
] | [
77,
4
] | [
86,
28
] | python | en | ['en', 'en', 'en'] | True |
HiveClimateEntity.device_state_attributes | (self) | Show Device Attributes. | Show Device Attributes. | def device_state_attributes(self):
"""Show Device Attributes."""
return self.attributes | [
"def",
"device_state_attributes",
"(",
"self",
")",
":",
"return",
"self",
".",
"attributes"
] | [
89,
4
] | [
91,
30
] | python | en | ['en', 'en', 'en'] | True |
HiveClimateEntity.hvac_modes | (self) | Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
| Return the list of available hvac operation modes. | def hvac_modes(self):
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return SUPPORT_HVAC | [
"def",
"hvac_modes",
"(",
"self",
")",
":",
"return",
"SUPPORT_HVAC"
] | [
94,
4
] | [
99,
27
] | python | en | ['en', 'en', 'en'] | True |
HiveClimateEntity.hvac_mode | (self) | Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
| Return hvac operation ie. heat, cool mode. | def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
return HIVE_TO_HASS_STATE[self.session.heating.get_mode(self.node_id)] | [
"def",
"hvac_mode",
"(",
"self",
")",
":",
"return",
"HIVE_TO_HASS_STATE",
"[",
"self",
".",
"session",
".",
"heating",
".",
"get_mode",
"(",
"self",
".",
"node_id",
")",
"]"
] | [
102,
4
] | [
107,
78
] | python | bg | ['en', 'bg', 'bg'] | True |
HiveClimateEntity.hvac_action | (self) | Return current HVAC action. | Return current HVAC action. | def hvac_action(self):
"""Return current HVAC action."""
return HIVE_TO_HASS_HVAC_ACTION[
self.session.heating.operational_status(self.node_id, self.device_type)
] | [
"def",
"hvac_action",
"(",
"self",
")",
":",
"return",
"HIVE_TO_HASS_HVAC_ACTION",
"[",
"self",
".",
"session",
".",
"heating",
".",
"operational_status",
"(",
"self",
".",
"node_id",
",",
"self",
".",
"device_type",
")",
"]"
] | [
110,
4
] | [
114,
9
] | python | en | ['en', 'da', 'en'] | True |
HiveClimateEntity.temperature_unit | (self) | Return the unit of measurement. | Return the unit of measurement. | def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS | [
"def",
"temperature_unit",
"(",
"self",
")",
":",
"return",
"TEMP_CELSIUS"
] | [
117,
4
] | [
119,
27
] | python | en | ['en', 'la', 'en'] | True |
HiveClimateEntity.current_temperature | (self) | Return the current temperature. | Return the current temperature. | def current_temperature(self):
"""Return the current temperature."""
return self.session.heating.current_temperature(self.node_id) | [
"def",
"current_temperature",
"(",
"self",
")",
":",
"return",
"self",
".",
"session",
".",
"heating",
".",
"current_temperature",
"(",
"self",
".",
"node_id",
")"
] | [
122,
4
] | [
124,
69
] | python | en | ['en', 'la', 'en'] | True |
HiveClimateEntity.target_temperature | (self) | Return the target temperature. | Return the target temperature. | def target_temperature(self):
"""Return the target temperature."""
return self.session.heating.get_target_temperature(self.node_id) | [
"def",
"target_temperature",
"(",
"self",
")",
":",
"return",
"self",
".",
"session",
".",
"heating",
".",
"get_target_temperature",
"(",
"self",
".",
"node_id",
")"
] | [
127,
4
] | [
129,
72
] | python | en | ['en', 'la', 'en'] | True |
HiveClimateEntity.min_temp | (self) | Return minimum temperature. | Return minimum temperature. | def min_temp(self):
"""Return minimum temperature."""
return self.session.heating.min_temperature(self.node_id) | [
"def",
"min_temp",
"(",
"self",
")",
":",
"return",
"self",
".",
"session",
".",
"heating",
".",
"min_temperature",
"(",
"self",
".",
"node_id",
")"
] | [
132,
4
] | [
134,
65
] | python | de | ['de', 'la', 'en'] | False |
HiveClimateEntity.max_temp | (self) | Return the maximum temperature. | Return the maximum temperature. | def max_temp(self):
"""Return the maximum temperature."""
return self.session.heating.max_temperature(self.node_id) | [
"def",
"max_temp",
"(",
"self",
")",
":",
"return",
"self",
".",
"session",
".",
"heating",
".",
"max_temperature",
"(",
"self",
".",
"node_id",
")"
] | [
137,
4
] | [
139,
65
] | python | en | ['en', 'la', 'en'] | True |
HiveClimateEntity.preset_mode | (self) | Return the current preset mode, e.g., home, away, temp. | Return the current preset mode, e.g., home, away, temp. | def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
if (
self.device_type == "Heating"
and self.session.heating.get_boost(self.node_id) == "ON"
):
return PRESET_BOOST
return None | [
"def",
"preset_mode",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"device_type",
"==",
"\"Heating\"",
"and",
"self",
".",
"session",
".",
"heating",
".",
"get_boost",
"(",
"self",
".",
"node_id",
")",
"==",
"\"ON\"",
")",
":",
"return",
"PRESET_BOOST",
"return",
"None"
] | [
142,
4
] | [
149,
19
] | python | en | ['en', 'pt', 'en'] | True |
HiveClimateEntity.preset_modes | (self) | Return a list of available preset modes. | Return a list of available preset modes. | def preset_modes(self):
"""Return a list of available preset modes."""
return SUPPORT_PRESET | [
"def",
"preset_modes",
"(",
"self",
")",
":",
"return",
"SUPPORT_PRESET"
] | [
152,
4
] | [
154,
29
] | python | en | ['en', 'en', 'en'] | True |
HiveClimateEntity.set_hvac_mode | (self, hvac_mode) | Set new target hvac mode. | Set new target hvac mode. | def set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
new_mode = HASS_TO_HIVE_STATE[hvac_mode]
self.session.heating.set_mode(self.node_id, new_mode) | [
"def",
"set_hvac_mode",
"(",
"self",
",",
"hvac_mode",
")",
":",
"new_mode",
"=",
"HASS_TO_HIVE_STATE",
"[",
"hvac_mode",
"]",
"self",
".",
"session",
".",
"heating",
".",
"set_mode",
"(",
"self",
".",
"node_id",
",",
"new_mode",
")"
] | [
157,
4
] | [
160,
61
] | python | da | ['da', 'su', 'en'] | False |
HiveClimateEntity.set_temperature | (self, **kwargs) | Set new target temperature. | Set new target temperature. | def set_temperature(self, **kwargs):
"""Set new target temperature."""
new_temperature = kwargs.get(ATTR_TEMPERATURE)
if new_temperature is not None:
self.session.heating.set_target_temperature(self.node_id, new_temperature) | [
"def",
"set_temperature",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"new_temperature",
"=",
"kwargs",
".",
"get",
"(",
"ATTR_TEMPERATURE",
")",
"if",
"new_temperature",
"is",
"not",
"None",
":",
"self",
".",
"session",
".",
"heating",
".",
"set_target_temperature",
"(",
"self",
".",
"node_id",
",",
"new_temperature",
")"
] | [
163,
4
] | [
167,
86
] | python | en | ['en', 'ca', 'en'] | True |
HiveClimateEntity.set_preset_mode | (self, preset_mode) | Set new preset mode. | Set new preset mode. | def set_preset_mode(self, preset_mode):
"""Set new preset mode."""
if preset_mode == PRESET_NONE and self.preset_mode == PRESET_BOOST:
self.session.heating.turn_boost_off(self.node_id)
elif preset_mode == PRESET_BOOST:
curtemp = round(self.current_temperature * 2) / 2
temperature = curtemp + 0.5
self.session.heating.turn_boost_on(self.node_id, 30, temperature) | [
"def",
"set_preset_mode",
"(",
"self",
",",
"preset_mode",
")",
":",
"if",
"preset_mode",
"==",
"PRESET_NONE",
"and",
"self",
".",
"preset_mode",
"==",
"PRESET_BOOST",
":",
"self",
".",
"session",
".",
"heating",
".",
"turn_boost_off",
"(",
"self",
".",
"node_id",
")",
"elif",
"preset_mode",
"==",
"PRESET_BOOST",
":",
"curtemp",
"=",
"round",
"(",
"self",
".",
"current_temperature",
"*",
"2",
")",
"/",
"2",
"temperature",
"=",
"curtemp",
"+",
"0.5",
"self",
".",
"session",
".",
"heating",
".",
"turn_boost_on",
"(",
"self",
".",
"node_id",
",",
"30",
",",
"temperature",
")"
] | [
170,
4
] | [
177,
77
] | python | en | ['en', 'sr', 'en'] | True |
HiveClimateEntity.update | (self) | Update all Node data from Hive. | Update all Node data from Hive. | def update(self):
"""Update all Node data from Hive."""
self.session.core.update_data(self.node_id)
self.attributes = self.session.attributes.state_attributes(
self.thermostat_node_id
) | [
"def",
"update",
"(",
"self",
")",
":",
"self",
".",
"session",
".",
"core",
".",
"update_data",
"(",
"self",
".",
"node_id",
")",
"self",
".",
"attributes",
"=",
"self",
".",
"session",
".",
"attributes",
".",
"state_attributes",
"(",
"self",
".",
"thermostat_node_id",
")"
] | [
179,
4
] | [
184,
9
] | python | en | ['en', 'en', 'en'] | True |
ScheduledOptim.step_and_update_lr | (self) | Step with the inner optimizer | Step with the inner optimizer | def step_and_update_lr(self):
"Step with the inner optimizer"
self._update_learning_rate()
self._optimizer.step() | [
"def",
"step_and_update_lr",
"(",
"self",
")",
":",
"self",
".",
"_update_learning_rate",
"(",
")",
"self",
".",
"_optimizer",
".",
"step",
"(",
")"
] | [
14,
4
] | [
17,
30
] | python | en | ['en', 'en', 'en'] | True |
ScheduledOptim.zero_grad | (self) | Zero out the gradients with the inner optimizer | Zero out the gradients with the inner optimizer | def zero_grad(self):
"Zero out the gradients with the inner optimizer"
self._optimizer.zero_grad() | [
"def",
"zero_grad",
"(",
"self",
")",
":",
"self",
".",
"_optimizer",
".",
"zero_grad",
"(",
")"
] | [
20,
4
] | [
22,
35
] | python | en | ['en', 'en', 'en'] | True |
ScheduledOptim._update_learning_rate | (self) | Learning rate scheduling per step | Learning rate scheduling per step | def _update_learning_rate(self):
''' Learning rate scheduling per step '''
self.n_steps += 1
lr = self.lr_mul * self._get_lr_scale()
for param_group in self._optimizer.param_groups:
param_group['lr'] = lr | [
"def",
"_update_learning_rate",
"(",
"self",
")",
":",
"self",
".",
"n_steps",
"+=",
"1",
"lr",
"=",
"self",
".",
"lr_mul",
"*",
"self",
".",
"_get_lr_scale",
"(",
")",
"for",
"param_group",
"in",
"self",
".",
"_optimizer",
".",
"param_groups",
":",
"param_group",
"[",
"'lr'",
"]",
"=",
"lr"
] | [
31,
4
] | [
38,
34
] | python | de | ['de', 'en', 'it'] | False |
assert_tensors_close | (a, b, atol=1e-12, prefix="") | If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error. | If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error. | def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg) | [
"def",
"assert_tensors_close",
"(",
"a",
",",
"b",
",",
"atol",
"=",
"1e-12",
",",
"prefix",
"=",
"\"\"",
")",
":",
"if",
"a",
"is",
"None",
"and",
"b",
"is",
"None",
":",
"return",
"True",
"try",
":",
"if",
"torch",
".",
"allclose",
"(",
"a",
",",
"b",
",",
"atol",
"=",
"atol",
")",
":",
"return",
"True",
"raise",
"except",
"Exception",
":",
"pct_different",
"=",
"(",
"torch",
".",
"gt",
"(",
"(",
"a",
"-",
"b",
")",
".",
"abs",
"(",
")",
",",
"atol",
")",
")",
".",
"float",
"(",
")",
".",
"mean",
"(",
")",
".",
"item",
"(",
")",
"if",
"a",
".",
"numel",
"(",
")",
">",
"100",
":",
"msg",
"=",
"f\"tensor values are {pct_different:.1%} percent different.\"",
"else",
":",
"msg",
"=",
"f\"{a} != {b}\"",
"if",
"prefix",
":",
"msg",
"=",
"prefix",
"+",
"\": \"",
"+",
"msg",
"raise",
"AssertionError",
"(",
"msg",
")"
] | [
472,
0
] | [
488,
33
] | python | en | ['en', 'en', 'en'] | True |
setup_platform | (hass, config, add_entities, discovery_info=None) | Set up the Pandora media player platform. | Set up the Pandora media player platform. | def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Pandora media player platform."""
if not _pianobar_exists():
return False
pandora = PandoraMediaPlayer("Pandora")
# Make sure we end the pandora subprocess on exit in case user doesn't
# power it down.
def _stop_pianobar(_event):
pandora.turn_off()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _stop_pianobar)
add_entities([pandora]) | [
"def",
"setup_platform",
"(",
"hass",
",",
"config",
",",
"add_entities",
",",
"discovery_info",
"=",
"None",
")",
":",
"if",
"not",
"_pianobar_exists",
"(",
")",
":",
"return",
"False",
"pandora",
"=",
"PandoraMediaPlayer",
"(",
"\"Pandora\"",
")",
"# Make sure we end the pandora subprocess on exit in case user doesn't",
"# power it down.",
"def",
"_stop_pianobar",
"(",
"_event",
")",
":",
"pandora",
".",
"turn_off",
"(",
")",
"hass",
".",
"bus",
".",
"listen_once",
"(",
"EVENT_HOMEASSISTANT_STOP",
",",
"_stop_pianobar",
")",
"add_entities",
"(",
"[",
"pandora",
"]",
")"
] | [
59,
0
] | [
71,
27
] | python | en | ['en', 'lv', 'en'] | True |
_pianobar_exists | () | Verify that Pianobar is properly installed. | Verify that Pianobar is properly installed. | def _pianobar_exists():
"""Verify that Pianobar is properly installed."""
pianobar_exe = shutil.which("pianobar")
if pianobar_exe:
return True
_LOGGER.warning(
"The Pandora integration depends on the Pianobar client, which "
"cannot be found. Please install using instructions at "
"https://www.home-assistant.io/integrations/media_player.pandora/"
)
return False | [
"def",
"_pianobar_exists",
"(",
")",
":",
"pianobar_exe",
"=",
"shutil",
".",
"which",
"(",
"\"pianobar\"",
")",
"if",
"pianobar_exe",
":",
"return",
"True",
"_LOGGER",
".",
"warning",
"(",
"\"The Pandora integration depends on the Pianobar client, which \"",
"\"cannot be found. Please install using instructions at \"",
"\"https://www.home-assistant.io/integrations/media_player.pandora/\"",
")",
"return",
"False"
] | [
371,
0
] | [
382,
16
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.__init__ | (self, name) | Initialize the Pandora device. | Initialize the Pandora device. | def __init__(self, name):
"""Initialize the Pandora device."""
self._name = name
self._player_state = STATE_OFF
self._station = ""
self._media_title = ""
self._media_artist = ""
self._media_album = ""
self._stations = []
self._time_remaining = 0
self._media_duration = 0
self._pianobar = None | [
"def",
"__init__",
"(",
"self",
",",
"name",
")",
":",
"self",
".",
"_name",
"=",
"name",
"self",
".",
"_player_state",
"=",
"STATE_OFF",
"self",
".",
"_station",
"=",
"\"\"",
"self",
".",
"_media_title",
"=",
"\"\"",
"self",
".",
"_media_artist",
"=",
"\"\"",
"self",
".",
"_media_album",
"=",
"\"\"",
"self",
".",
"_stations",
"=",
"[",
"]",
"self",
".",
"_time_remaining",
"=",
"0",
"self",
".",
"_media_duration",
"=",
"0",
"self",
".",
"_pianobar",
"=",
"None"
] | [
77,
4
] | [
88,
29
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.name | (self) | Return the name of the media player. | Return the name of the media player. | def name(self):
"""Return the name of the media player."""
return self._name | [
"def",
"name",
"(",
"self",
")",
":",
"return",
"self",
".",
"_name"
] | [
91,
4
] | [
93,
25
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.state | (self) | Return the state of the player. | Return the state of the player. | def state(self):
"""Return the state of the player."""
return self._player_state | [
"def",
"state",
"(",
"self",
")",
":",
"return",
"self",
".",
"_player_state"
] | [
96,
4
] | [
98,
33
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.turn_on | (self) | Turn the media player on. | Turn the media player on. | def turn_on(self):
"""Turn the media player on."""
if self._player_state != STATE_OFF:
return
self._pianobar = pexpect.spawn("pianobar")
_LOGGER.info("Started pianobar subprocess")
mode = self._pianobar.expect(
["Receiving new playlist", "Select station:", "Email:"]
)
if mode == 1:
# station list was presented. dismiss it.
self._pianobar.sendcontrol("m")
elif mode == 2:
_LOGGER.warning(
"The pianobar client is not configured to log in. "
"Please create a configuration file for it as described at "
"https://www.home-assistant.io/integrations/pandora/"
)
# pass through the email/password prompts to quit cleanly
self._pianobar.sendcontrol("m")
self._pianobar.sendcontrol("m")
self._pianobar.terminate()
self._pianobar = None
return
self._update_stations()
self.update_playing_status()
self._player_state = STATE_IDLE
self.schedule_update_ha_state() | [
"def",
"turn_on",
"(",
"self",
")",
":",
"if",
"self",
".",
"_player_state",
"!=",
"STATE_OFF",
":",
"return",
"self",
".",
"_pianobar",
"=",
"pexpect",
".",
"spawn",
"(",
"\"pianobar\"",
")",
"_LOGGER",
".",
"info",
"(",
"\"Started pianobar subprocess\"",
")",
"mode",
"=",
"self",
".",
"_pianobar",
".",
"expect",
"(",
"[",
"\"Receiving new playlist\"",
",",
"\"Select station:\"",
",",
"\"Email:\"",
"]",
")",
"if",
"mode",
"==",
"1",
":",
"# station list was presented. dismiss it.",
"self",
".",
"_pianobar",
".",
"sendcontrol",
"(",
"\"m\"",
")",
"elif",
"mode",
"==",
"2",
":",
"_LOGGER",
".",
"warning",
"(",
"\"The pianobar client is not configured to log in. \"",
"\"Please create a configuration file for it as described at \"",
"\"https://www.home-assistant.io/integrations/pandora/\"",
")",
"# pass through the email/password prompts to quit cleanly",
"self",
".",
"_pianobar",
".",
"sendcontrol",
"(",
"\"m\"",
")",
"self",
".",
"_pianobar",
".",
"sendcontrol",
"(",
"\"m\"",
")",
"self",
".",
"_pianobar",
".",
"terminate",
"(",
")",
"self",
".",
"_pianobar",
"=",
"None",
"return",
"self",
".",
"_update_stations",
"(",
")",
"self",
".",
"update_playing_status",
"(",
")",
"self",
".",
"_player_state",
"=",
"STATE_IDLE",
"self",
".",
"schedule_update_ha_state",
"(",
")"
] | [
100,
4
] | [
128,
39
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.turn_off | (self) | Turn the media player off. | Turn the media player off. | def turn_off(self):
"""Turn the media player off."""
if self._pianobar is None:
_LOGGER.info("Pianobar subprocess already stopped")
return
self._pianobar.send("q")
try:
_LOGGER.debug("Stopped Pianobar subprocess")
self._pianobar.terminate()
except pexpect.exceptions.TIMEOUT:
# kill the process group
os.killpg(os.getpgid(self._pianobar.pid), signal.SIGTERM)
_LOGGER.debug("Killed Pianobar subprocess")
self._pianobar = None
self._player_state = STATE_OFF
self.schedule_update_ha_state() | [
"def",
"turn_off",
"(",
"self",
")",
":",
"if",
"self",
".",
"_pianobar",
"is",
"None",
":",
"_LOGGER",
".",
"info",
"(",
"\"Pianobar subprocess already stopped\"",
")",
"return",
"self",
".",
"_pianobar",
".",
"send",
"(",
"\"q\"",
")",
"try",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Stopped Pianobar subprocess\"",
")",
"self",
".",
"_pianobar",
".",
"terminate",
"(",
")",
"except",
"pexpect",
".",
"exceptions",
".",
"TIMEOUT",
":",
"# kill the process group",
"os",
".",
"killpg",
"(",
"os",
".",
"getpgid",
"(",
"self",
".",
"_pianobar",
".",
"pid",
")",
",",
"signal",
".",
"SIGTERM",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Killed Pianobar subprocess\"",
")",
"self",
".",
"_pianobar",
"=",
"None",
"self",
".",
"_player_state",
"=",
"STATE_OFF",
"self",
".",
"schedule_update_ha_state",
"(",
")"
] | [
130,
4
] | [
145,
39
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.media_play | (self) | Send play command. | Send play command. | def media_play(self):
"""Send play command."""
self._send_pianobar_command(SERVICE_MEDIA_PLAY_PAUSE)
self._player_state = STATE_PLAYING
self.schedule_update_ha_state() | [
"def",
"media_play",
"(",
"self",
")",
":",
"self",
".",
"_send_pianobar_command",
"(",
"SERVICE_MEDIA_PLAY_PAUSE",
")",
"self",
".",
"_player_state",
"=",
"STATE_PLAYING",
"self",
".",
"schedule_update_ha_state",
"(",
")"
] | [
147,
4
] | [
151,
39
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.media_pause | (self) | Send pause command. | Send pause command. | def media_pause(self):
"""Send pause command."""
self._send_pianobar_command(SERVICE_MEDIA_PLAY_PAUSE)
self._player_state = STATE_PAUSED
self.schedule_update_ha_state() | [
"def",
"media_pause",
"(",
"self",
")",
":",
"self",
".",
"_send_pianobar_command",
"(",
"SERVICE_MEDIA_PLAY_PAUSE",
")",
"self",
".",
"_player_state",
"=",
"STATE_PAUSED",
"self",
".",
"schedule_update_ha_state",
"(",
")"
] | [
153,
4
] | [
157,
39
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.media_next_track | (self) | Go to next track. | Go to next track. | def media_next_track(self):
"""Go to next track."""
self._send_pianobar_command(SERVICE_MEDIA_NEXT_TRACK)
self.schedule_update_ha_state() | [
"def",
"media_next_track",
"(",
"self",
")",
":",
"self",
".",
"_send_pianobar_command",
"(",
"SERVICE_MEDIA_NEXT_TRACK",
")",
"self",
".",
"schedule_update_ha_state",
"(",
")"
] | [
159,
4
] | [
162,
39
] | python | en | ['en', 'pt', 'en'] | True |
PandoraMediaPlayer.supported_features | (self) | Flag media player features that are supported. | Flag media player features that are supported. | def supported_features(self):
"""Flag media player features that are supported."""
return PANDORA_SUPPORT | [
"def",
"supported_features",
"(",
"self",
")",
":",
"return",
"PANDORA_SUPPORT"
] | [
165,
4
] | [
167,
30
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.source | (self) | Name of the current input source. | Name of the current input source. | def source(self):
"""Name of the current input source."""
return self._station | [
"def",
"source",
"(",
"self",
")",
":",
"return",
"self",
".",
"_station"
] | [
170,
4
] | [
172,
28
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.source_list | (self) | List of available input sources. | List of available input sources. | def source_list(self):
"""List of available input sources."""
return self._stations | [
"def",
"source_list",
"(",
"self",
")",
":",
"return",
"self",
".",
"_stations"
] | [
175,
4
] | [
177,
29
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.media_title | (self) | Title of current playing media. | Title of current playing media. | def media_title(self):
"""Title of current playing media."""
self.update_playing_status()
return self._media_title | [
"def",
"media_title",
"(",
"self",
")",
":",
"self",
".",
"update_playing_status",
"(",
")",
"return",
"self",
".",
"_media_title"
] | [
180,
4
] | [
183,
32
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.media_content_type | (self) | Content type of current playing media. | Content type of current playing media. | def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC | [
"def",
"media_content_type",
"(",
"self",
")",
":",
"return",
"MEDIA_TYPE_MUSIC"
] | [
186,
4
] | [
188,
31
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.media_artist | (self) | Artist of current playing media, music track only. | Artist of current playing media, music track only. | def media_artist(self):
"""Artist of current playing media, music track only."""
return self._media_artist | [
"def",
"media_artist",
"(",
"self",
")",
":",
"return",
"self",
".",
"_media_artist"
] | [
191,
4
] | [
193,
33
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.media_album_name | (self) | Album name of current playing media, music track only. | Album name of current playing media, music track only. | def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._media_album | [
"def",
"media_album_name",
"(",
"self",
")",
":",
"return",
"self",
".",
"_media_album"
] | [
196,
4
] | [
198,
32
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.media_duration | (self) | Duration of current playing media in seconds. | Duration of current playing media in seconds. | def media_duration(self):
"""Duration of current playing media in seconds."""
return self._media_duration | [
"def",
"media_duration",
"(",
"self",
")",
":",
"return",
"self",
".",
"_media_duration"
] | [
201,
4
] | [
203,
35
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.select_source | (self, source) | Choose a different Pandora station and play it. | Choose a different Pandora station and play it. | def select_source(self, source):
"""Choose a different Pandora station and play it."""
try:
station_index = self._stations.index(source)
except ValueError:
_LOGGER.warning("Station %s is not in list", source)
return
_LOGGER.debug("Setting station %s, %d", source, station_index)
self._send_station_list_command()
self._pianobar.sendline(f"{station_index}")
self._pianobar.expect("\r\n")
self._player_state = STATE_PLAYING | [
"def",
"select_source",
"(",
"self",
",",
"source",
")",
":",
"try",
":",
"station_index",
"=",
"self",
".",
"_stations",
".",
"index",
"(",
"source",
")",
"except",
"ValueError",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Station %s is not in list\"",
",",
"source",
")",
"return",
"_LOGGER",
".",
"debug",
"(",
"\"Setting station %s, %d\"",
",",
"source",
",",
"station_index",
")",
"self",
".",
"_send_station_list_command",
"(",
")",
"self",
".",
"_pianobar",
".",
"sendline",
"(",
"f\"{station_index}\"",
")",
"self",
".",
"_pianobar",
".",
"expect",
"(",
"\"\\r\\n\"",
")",
"self",
".",
"_player_state",
"=",
"STATE_PLAYING"
] | [
205,
4
] | [
216,
42
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer._send_station_list_command | (self) | Send a station list command. | Send a station list command. | def _send_station_list_command(self):
"""Send a station list command."""
self._pianobar.send("s")
try:
self._pianobar.expect("Select station:", timeout=1)
except pexpect.exceptions.TIMEOUT:
# try again. Buffer was contaminated.
self._clear_buffer()
self._pianobar.send("s")
self._pianobar.expect("Select station:") | [
"def",
"_send_station_list_command",
"(",
"self",
")",
":",
"self",
".",
"_pianobar",
".",
"send",
"(",
"\"s\"",
")",
"try",
":",
"self",
".",
"_pianobar",
".",
"expect",
"(",
"\"Select station:\"",
",",
"timeout",
"=",
"1",
")",
"except",
"pexpect",
".",
"exceptions",
".",
"TIMEOUT",
":",
"# try again. Buffer was contaminated.",
"self",
".",
"_clear_buffer",
"(",
")",
"self",
".",
"_pianobar",
".",
"send",
"(",
"\"s\"",
")",
"self",
".",
"_pianobar",
".",
"expect",
"(",
"\"Select station:\"",
")"
] | [
218,
4
] | [
227,
52
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer.update_playing_status | (self) | Query pianobar for info about current media_title, station. | Query pianobar for info about current media_title, station. | def update_playing_status(self):
"""Query pianobar for info about current media_title, station."""
response = self._query_for_playing_status()
if not response:
return
self._update_current_station(response)
self._update_current_song(response)
self._update_song_position() | [
"def",
"update_playing_status",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"_query_for_playing_status",
"(",
")",
"if",
"not",
"response",
":",
"return",
"self",
".",
"_update_current_station",
"(",
"response",
")",
"self",
".",
"_update_current_song",
"(",
"response",
")",
"self",
".",
"_update_song_position",
"(",
")"
] | [
229,
4
] | [
236,
36
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer._query_for_playing_status | (self) | Query system for info about current track. | Query system for info about current track. | def _query_for_playing_status(self):
"""Query system for info about current track."""
self._clear_buffer()
self._pianobar.send("i")
try:
match_idx = self._pianobar.expect(
[
br"(\d\d):(\d\d)/(\d\d):(\d\d)",
"No song playing",
"Select station",
"Receiving new playlist",
]
)
except pexpect.exceptions.EOF:
_LOGGER.info("Pianobar process already exited")
return None
self._log_match()
if match_idx == 1:
# idle.
response = None
elif match_idx == 2:
# stuck on a station selection dialog. Clear it.
_LOGGER.warning("On unexpected station list page")
self._pianobar.sendcontrol("m") # press enter
self._pianobar.sendcontrol("m") # do it again b/c an 'i' got in
# pylint: disable=assignment-from-none
response = self.update_playing_status()
elif match_idx == 3:
_LOGGER.debug("Received new playlist list")
# pylint: disable=assignment-from-none
response = self.update_playing_status()
else:
response = self._pianobar.before.decode("utf-8")
return response | [
"def",
"_query_for_playing_status",
"(",
"self",
")",
":",
"self",
".",
"_clear_buffer",
"(",
")",
"self",
".",
"_pianobar",
".",
"send",
"(",
"\"i\"",
")",
"try",
":",
"match_idx",
"=",
"self",
".",
"_pianobar",
".",
"expect",
"(",
"[",
"br\"(\\d\\d):(\\d\\d)/(\\d\\d):(\\d\\d)\"",
",",
"\"No song playing\"",
",",
"\"Select station\"",
",",
"\"Receiving new playlist\"",
",",
"]",
")",
"except",
"pexpect",
".",
"exceptions",
".",
"EOF",
":",
"_LOGGER",
".",
"info",
"(",
"\"Pianobar process already exited\"",
")",
"return",
"None",
"self",
".",
"_log_match",
"(",
")",
"if",
"match_idx",
"==",
"1",
":",
"# idle.",
"response",
"=",
"None",
"elif",
"match_idx",
"==",
"2",
":",
"# stuck on a station selection dialog. Clear it.",
"_LOGGER",
".",
"warning",
"(",
"\"On unexpected station list page\"",
")",
"self",
".",
"_pianobar",
".",
"sendcontrol",
"(",
"\"m\"",
")",
"# press enter",
"self",
".",
"_pianobar",
".",
"sendcontrol",
"(",
"\"m\"",
")",
"# do it again b/c an 'i' got in",
"# pylint: disable=assignment-from-none",
"response",
"=",
"self",
".",
"update_playing_status",
"(",
")",
"elif",
"match_idx",
"==",
"3",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Received new playlist list\"",
")",
"# pylint: disable=assignment-from-none",
"response",
"=",
"self",
".",
"update_playing_status",
"(",
")",
"else",
":",
"response",
"=",
"self",
".",
"_pianobar",
".",
"before",
".",
"decode",
"(",
"\"utf-8\"",
")",
"return",
"response"
] | [
238,
4
] | [
272,
23
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer._update_current_station | (self, response) | Update current station. | Update current station. | def _update_current_station(self, response):
"""Update current station."""
station_match = re.search(STATION_PATTERN, response)
if station_match:
self._station = station_match.group(1)
_LOGGER.debug("Got station as: %s", self._station)
else:
_LOGGER.warning("No station match") | [
"def",
"_update_current_station",
"(",
"self",
",",
"response",
")",
":",
"station_match",
"=",
"re",
".",
"search",
"(",
"STATION_PATTERN",
",",
"response",
")",
"if",
"station_match",
":",
"self",
".",
"_station",
"=",
"station_match",
".",
"group",
"(",
"1",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Got station as: %s\"",
",",
"self",
".",
"_station",
")",
"else",
":",
"_LOGGER",
".",
"warning",
"(",
"\"No station match\"",
")"
] | [
274,
4
] | [
281,
47
] | python | en | ['ro', 'en', 'en'] | True |
PandoraMediaPlayer._update_current_song | (self, response) | Update info about current song. | Update info about current song. | def _update_current_song(self, response):
"""Update info about current song."""
song_match = re.search(CURRENT_SONG_PATTERN, response)
if song_match:
(
self._media_title,
self._media_artist,
self._media_album,
) = song_match.groups()
_LOGGER.debug("Got song as: %s", self._media_title)
else:
_LOGGER.warning("No song match") | [
"def",
"_update_current_song",
"(",
"self",
",",
"response",
")",
":",
"song_match",
"=",
"re",
".",
"search",
"(",
"CURRENT_SONG_PATTERN",
",",
"response",
")",
"if",
"song_match",
":",
"(",
"self",
".",
"_media_title",
",",
"self",
".",
"_media_artist",
",",
"self",
".",
"_media_album",
",",
")",
"=",
"song_match",
".",
"groups",
"(",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Got song as: %s\"",
",",
"self",
".",
"_media_title",
")",
"else",
":",
"_LOGGER",
".",
"warning",
"(",
"\"No song match\"",
")"
] | [
283,
4
] | [
294,
44
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer._update_song_position | (self) |
Get the song position and duration.
It's hard to predict whether or not the music will start during init
so we have to detect state by checking the ticker.
|
Get the song position and duration. | def _update_song_position(self):
"""
Get the song position and duration.
It's hard to predict whether or not the music will start during init
so we have to detect state by checking the ticker.
"""
(
cur_minutes,
cur_seconds,
total_minutes,
total_seconds,
) = self._pianobar.match.groups()
time_remaining = int(cur_minutes) * 60 + int(cur_seconds)
self._media_duration = int(total_minutes) * 60 + int(total_seconds)
if time_remaining not in (self._time_remaining, self._media_duration):
self._player_state = STATE_PLAYING
elif self._player_state == STATE_PLAYING:
self._player_state = STATE_PAUSED
self._time_remaining = time_remaining | [
"def",
"_update_song_position",
"(",
"self",
")",
":",
"(",
"cur_minutes",
",",
"cur_seconds",
",",
"total_minutes",
",",
"total_seconds",
",",
")",
"=",
"self",
".",
"_pianobar",
".",
"match",
".",
"groups",
"(",
")",
"time_remaining",
"=",
"int",
"(",
"cur_minutes",
")",
"*",
"60",
"+",
"int",
"(",
"cur_seconds",
")",
"self",
".",
"_media_duration",
"=",
"int",
"(",
"total_minutes",
")",
"*",
"60",
"+",
"int",
"(",
"total_seconds",
")",
"if",
"time_remaining",
"not",
"in",
"(",
"self",
".",
"_time_remaining",
",",
"self",
".",
"_media_duration",
")",
":",
"self",
".",
"_player_state",
"=",
"STATE_PLAYING",
"elif",
"self",
".",
"_player_state",
"==",
"STATE_PLAYING",
":",
"self",
".",
"_player_state",
"=",
"STATE_PAUSED",
"self",
".",
"_time_remaining",
"=",
"time_remaining"
] | [
297,
4
] | [
318,
45
] | python | en | ['en', 'error', 'th'] | False |
PandoraMediaPlayer._log_match | (self) | Log grabbed values from console. | Log grabbed values from console. | def _log_match(self):
"""Log grabbed values from console."""
_LOGGER.debug(
"Before: %s\nMatch: %s\nAfter: %s",
repr(self._pianobar.before),
repr(self._pianobar.match),
repr(self._pianobar.after),
) | [
"def",
"_log_match",
"(",
"self",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Before: %s\\nMatch: %s\\nAfter: %s\"",
",",
"repr",
"(",
"self",
".",
"_pianobar",
".",
"before",
")",
",",
"repr",
"(",
"self",
".",
"_pianobar",
".",
"match",
")",
",",
"repr",
"(",
"self",
".",
"_pianobar",
".",
"after",
")",
",",
")"
] | [
320,
4
] | [
327,
9
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer._send_pianobar_command | (self, service_cmd) | Send a command to Pianobar. | Send a command to Pianobar. | def _send_pianobar_command(self, service_cmd):
"""Send a command to Pianobar."""
command = CMD_MAP.get(service_cmd)
_LOGGER.debug("Sending pinaobar command %s for %s", command, service_cmd)
if command is None:
_LOGGER.info("Command %s not supported yet", service_cmd)
self._clear_buffer()
self._pianobar.sendline(command) | [
"def",
"_send_pianobar_command",
"(",
"self",
",",
"service_cmd",
")",
":",
"command",
"=",
"CMD_MAP",
".",
"get",
"(",
"service_cmd",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Sending pinaobar command %s for %s\"",
",",
"command",
",",
"service_cmd",
")",
"if",
"command",
"is",
"None",
":",
"_LOGGER",
".",
"info",
"(",
"\"Command %s not supported yet\"",
",",
"service_cmd",
")",
"self",
".",
"_clear_buffer",
"(",
")",
"self",
".",
"_pianobar",
".",
"sendline",
"(",
"command",
")"
] | [
329,
4
] | [
336,
40
] | python | en | ['en', 'en', 'en'] | True |
PandoraMediaPlayer._update_stations | (self) | List defined Pandora stations. | List defined Pandora stations. | def _update_stations(self):
"""List defined Pandora stations."""
self._send_station_list_command()
station_lines = self._pianobar.before.decode("utf-8")
_LOGGER.debug("Getting stations: %s", station_lines)
self._stations = []
for line in station_lines.split("\r\n"):
match = re.search(r"\d+\).....(.+)", line)
if match:
station = match.group(1).strip()
_LOGGER.debug("Found station %s", station)
self._stations.append(station)
else:
_LOGGER.debug("No station match on %s", line)
self._pianobar.sendcontrol("m") # press enter with blank line
self._pianobar.sendcontrol("m") | [
"def",
"_update_stations",
"(",
"self",
")",
":",
"self",
".",
"_send_station_list_command",
"(",
")",
"station_lines",
"=",
"self",
".",
"_pianobar",
".",
"before",
".",
"decode",
"(",
"\"utf-8\"",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Getting stations: %s\"",
",",
"station_lines",
")",
"self",
".",
"_stations",
"=",
"[",
"]",
"for",
"line",
"in",
"station_lines",
".",
"split",
"(",
"\"\\r\\n\"",
")",
":",
"match",
"=",
"re",
".",
"search",
"(",
"r\"\\d+\\).....(.+)\"",
",",
"line",
")",
"if",
"match",
":",
"station",
"=",
"match",
".",
"group",
"(",
"1",
")",
".",
"strip",
"(",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Found station %s\"",
",",
"station",
")",
"self",
".",
"_stations",
".",
"append",
"(",
"station",
")",
"else",
":",
"_LOGGER",
".",
"debug",
"(",
"\"No station match on %s\"",
",",
"line",
")",
"self",
".",
"_pianobar",
".",
"sendcontrol",
"(",
"\"m\"",
")",
"# press enter with blank line",
"self",
".",
"_pianobar",
".",
"sendcontrol",
"(",
"\"m\"",
")"
] | [
338,
4
] | [
353,
39
] | python | ca | ['ca', 'et', 'en'] | False |
PandoraMediaPlayer._clear_buffer | (self) |
Clear buffer from pexpect.
This is necessary because there are a bunch of 00:00 in the buffer
|
Clear buffer from pexpect. | def _clear_buffer(self):
"""
Clear buffer from pexpect.
This is necessary because there are a bunch of 00:00 in the buffer
"""
try:
while not self._pianobar.expect(".+", timeout=0.1):
pass
except pexpect.exceptions.TIMEOUT:
pass
except pexpect.exceptions.EOF:
pass | [
"def",
"_clear_buffer",
"(",
"self",
")",
":",
"try",
":",
"while",
"not",
"self",
".",
"_pianobar",
".",
"expect",
"(",
"\".+\"",
",",
"timeout",
"=",
"0.1",
")",
":",
"pass",
"except",
"pexpect",
".",
"exceptions",
".",
"TIMEOUT",
":",
"pass",
"except",
"pexpect",
".",
"exceptions",
".",
"EOF",
":",
"pass"
] | [
355,
4
] | [
368,
16
] | python | en | ['en', 'error', 'th'] | False |
test_api_ping | (hassio_handler, aioclient_mock) | Test setup with API ping. | Test setup with API ping. | async def test_api_ping(hassio_handler, aioclient_mock):
"""Test setup with API ping."""
aioclient_mock.get("http://127.0.0.1/supervisor/ping", json={"result": "ok"})
assert await hassio_handler.is_connected()
assert aioclient_mock.call_count == 1 | [
"async",
"def",
"test_api_ping",
"(",
"hassio_handler",
",",
"aioclient_mock",
")",
":",
"aioclient_mock",
".",
"get",
"(",
"\"http://127.0.0.1/supervisor/ping\"",
",",
"json",
"=",
"{",
"\"result\"",
":",
"\"ok\"",
"}",
")",
"assert",
"await",
"hassio_handler",
".",
"is_connected",
"(",
")",
"assert",
"aioclient_mock",
".",
"call_count",
"==",
"1"
] | [
8,
0
] | [
13,
41
] | python | en | ['en', 'ceb', 'en'] | True |
test_api_ping_error | (hassio_handler, aioclient_mock) | Test setup with API ping error. | Test setup with API ping error. | async def test_api_ping_error(hassio_handler, aioclient_mock):
"""Test setup with API ping error."""
aioclient_mock.get("http://127.0.0.1/supervisor/ping", json={"result": "error"})
assert not (await hassio_handler.is_connected())
assert aioclient_mock.call_count == 1 | [
"async",
"def",
"test_api_ping_error",
"(",
"hassio_handler",
",",
"aioclient_mock",
")",
":",
"aioclient_mock",
".",
"get",
"(",
"\"http://127.0.0.1/supervisor/ping\"",
",",
"json",
"=",
"{",
"\"result\"",
":",
"\"error\"",
"}",
")",
"assert",
"not",
"(",
"await",
"hassio_handler",
".",
"is_connected",
"(",
")",
")",
"assert",
"aioclient_mock",
".",
"call_count",
"==",
"1"
] | [
16,
0
] | [
21,
41
] | python | en | ['en', 'pt', 'en'] | True |
test_api_ping_exeption | (hassio_handler, aioclient_mock) | Test setup with API ping exception. | Test setup with API ping exception. | async def test_api_ping_exeption(hassio_handler, aioclient_mock):
"""Test setup with API ping exception."""
aioclient_mock.get("http://127.0.0.1/supervisor/ping", exc=aiohttp.ClientError())
assert not (await hassio_handler.is_connected())
assert aioclient_mock.call_count == 1 | [
"async",
"def",
"test_api_ping_exeption",
"(",
"hassio_handler",
",",
"aioclient_mock",
")",
":",
"aioclient_mock",
".",
"get",
"(",
"\"http://127.0.0.1/supervisor/ping\"",
",",
"exc",
"=",
"aiohttp",
".",
"ClientError",
"(",
")",
")",
"assert",
"not",
"(",
"await",
"hassio_handler",
".",
"is_connected",
"(",
")",
")",
"assert",
"aioclient_mock",
".",
"call_count",
"==",
"1"
] | [
24,
0
] | [
29,
41
] | python | en | ['en', 'en', 'en'] | True |
test_api_info | (hassio_handler, aioclient_mock) | Test setup with API generic info. | Test setup with API generic info. | async def test_api_info(hassio_handler, aioclient_mock):
"""Test setup with API generic info."""
aioclient_mock.get(
"http://127.0.0.1/info",
json={
"result": "ok",
"data": {"supervisor": "222", "homeassistant": "0.110.0", "hassos": None},
},
)
data = await hassio_handler.get_info()
assert aioclient_mock.call_count == 1
assert data["hassos"] is None
assert data["homeassistant"] == "0.110.0"
assert data["supervisor"] == "222" | [
"async",
"def",
"test_api_info",
"(",
"hassio_handler",
",",
"aioclient_mock",
")",
":",
"aioclient_mock",
".",
"get",
"(",
"\"http://127.0.0.1/info\"",
",",
"json",
"=",
"{",
"\"result\"",
":",
"\"ok\"",
",",
"\"data\"",
":",
"{",
"\"supervisor\"",
":",
"\"222\"",
",",
"\"homeassistant\"",
":",
"\"0.110.0\"",
",",
"\"hassos\"",
":",
"None",
"}",
",",
"}",
",",
")",
"data",
"=",
"await",
"hassio_handler",
".",
"get_info",
"(",
")",
"assert",
"aioclient_mock",
".",
"call_count",
"==",
"1",
"assert",
"data",
"[",
"\"hassos\"",
"]",
"is",
"None",
"assert",
"data",
"[",
"\"homeassistant\"",
"]",
"==",
"\"0.110.0\"",
"assert",
"data",
"[",
"\"supervisor\"",
"]",
"==",
"\"222\""
] | [
32,
0
] | [
46,
38
] | python | en | ['en', 'haw', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.