body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
4b5f6e94f37a4691367eb08ce125ede0439657ff7497c8145b3d840919424085
def get_sequence_class(random_batches, balanced_sampling): '\n Returns the appropriate BatchSequence sub-class given a set of parameters.\n\n Note: balanced_sampling cannot be True with random_batches=False\n\n Args:\n random_batches: (bool) The BatchSequence should sample random\n batches across the SleepStudyDataset\n balanced_sampling: (bool) The BatchSequence should sample randomly\n and uniformly across individual classes.\n\n Returns:\n A BatchSequence typed class (non-initialized)\n ' if random_batches: if balanced_sampling: return BalancedRandomBatchSequence else: return RandomBatchSequence elif balanced_sampling: raise ValueError("Cannot use 'balanced_sampling' with 'random_batches' set to False.") else: return BatchSequence
Returns the appropriate BatchSequence sub-class given a set of parameters. Note: balanced_sampling cannot be True with random_batches=False Args: random_batches: (bool) The BatchSequence should sample random batches across the SleepStudyDataset balanced_sampling: (bool) The BatchSequence should sample randomly and uniformly across individual classes. Returns: A BatchSequence typed class (non-initialized)
utime/sequences/utils.py
get_sequence_class
learning310/U-Time
138
python
def get_sequence_class(random_batches, balanced_sampling): '\n Returns the appropriate BatchSequence sub-class given a set of parameters.\n\n Note: balanced_sampling cannot be True with random_batches=False\n\n Args:\n random_batches: (bool) The BatchSequence should sample random\n batches across the SleepStudyDataset\n balanced_sampling: (bool) The BatchSequence should sample randomly\n and uniformly across individual classes.\n\n Returns:\n A BatchSequence typed class (non-initialized)\n ' if random_batches: if balanced_sampling: return BalancedRandomBatchSequence else: return RandomBatchSequence elif balanced_sampling: raise ValueError("Cannot use 'balanced_sampling' with 'random_batches' set to False.") else: return BatchSequence
def get_sequence_class(random_batches, balanced_sampling): '\n Returns the appropriate BatchSequence sub-class given a set of parameters.\n\n Note: balanced_sampling cannot be True with random_batches=False\n\n Args:\n random_batches: (bool) The BatchSequence should sample random\n batches across the SleepStudyDataset\n balanced_sampling: (bool) The BatchSequence should sample randomly\n and uniformly across individual classes.\n\n Returns:\n A BatchSequence typed class (non-initialized)\n ' if random_batches: if balanced_sampling: return BalancedRandomBatchSequence else: return RandomBatchSequence elif balanced_sampling: raise ValueError("Cannot use 'balanced_sampling' with 'random_batches' set to False.") else: return BatchSequence<|docstring|>Returns the appropriate BatchSequence sub-class given a set of parameters. Note: balanced_sampling cannot be True with random_batches=False Args: random_batches: (bool) The BatchSequence should sample random batches across the SleepStudyDataset balanced_sampling: (bool) The BatchSequence should sample randomly and uniformly across individual classes. Returns: A BatchSequence typed class (non-initialized)<|endoftext|>
c1ffdebcd9684caeecbfbec8a1761c842b694b182c669f3902cf7b4874690278
def get_batch_sequence(dataset_queue, batch_size=16, random_batches=True, balanced_sampling=True, n_classes=None, margin=0, augmenters=None, scaler=None, batch_wise_scaling=False, no_log=False, **kwargs): '\n Return a utime.sequences BatchSequence object made from a dataset queue.\n A BatchSequence object is used to extract batches of data from all or\n individual SleepStudy objects represented by this SleepStudyDataset.\n\n All args pass to the BatchSequence object.\n Please refer to its documentation.\n\n Returns:\n A BatchSequence object\n ' (data_per_epoch, n_channels) = infer_dpe_and_chans(dataset_queue) sequence_class = get_sequence_class(random_batches, balanced_sampling) return sequence_class(dataset_queue=dataset_queue, batch_size=batch_size, data_per_period=data_per_epoch, n_classes=n_classes, n_channels=n_channels, margin=margin, augmenters=augmenters, batch_scaler=(scaler if batch_wise_scaling else None), logger=dataset_queue.logger, identifier=dataset_queue.dataset.identifier, no_log=no_log, **kwargs)
Return a utime.sequences BatchSequence object made from a dataset queue. A BatchSequence object is used to extract batches of data from all or individual SleepStudy objects represented by this SleepStudyDataset. All args pass to the BatchSequence object. Please refer to its documentation. Returns: A BatchSequence object
utime/sequences/utils.py
get_batch_sequence
learning310/U-Time
138
python
def get_batch_sequence(dataset_queue, batch_size=16, random_batches=True, balanced_sampling=True, n_classes=None, margin=0, augmenters=None, scaler=None, batch_wise_scaling=False, no_log=False, **kwargs): '\n Return a utime.sequences BatchSequence object made from a dataset queue.\n A BatchSequence object is used to extract batches of data from all or\n individual SleepStudy objects represented by this SleepStudyDataset.\n\n All args pass to the BatchSequence object.\n Please refer to its documentation.\n\n Returns:\n A BatchSequence object\n ' (data_per_epoch, n_channels) = infer_dpe_and_chans(dataset_queue) sequence_class = get_sequence_class(random_batches, balanced_sampling) return sequence_class(dataset_queue=dataset_queue, batch_size=batch_size, data_per_period=data_per_epoch, n_classes=n_classes, n_channels=n_channels, margin=margin, augmenters=augmenters, batch_scaler=(scaler if batch_wise_scaling else None), logger=dataset_queue.logger, identifier=dataset_queue.dataset.identifier, no_log=no_log, **kwargs)
def get_batch_sequence(dataset_queue, batch_size=16, random_batches=True, balanced_sampling=True, n_classes=None, margin=0, augmenters=None, scaler=None, batch_wise_scaling=False, no_log=False, **kwargs): '\n Return a utime.sequences BatchSequence object made from a dataset queue.\n A BatchSequence object is used to extract batches of data from all or\n individual SleepStudy objects represented by this SleepStudyDataset.\n\n All args pass to the BatchSequence object.\n Please refer to its documentation.\n\n Returns:\n A BatchSequence object\n ' (data_per_epoch, n_channels) = infer_dpe_and_chans(dataset_queue) sequence_class = get_sequence_class(random_batches, balanced_sampling) return sequence_class(dataset_queue=dataset_queue, batch_size=batch_size, data_per_period=data_per_epoch, n_classes=n_classes, n_channels=n_channels, margin=margin, augmenters=augmenters, batch_scaler=(scaler if batch_wise_scaling else None), logger=dataset_queue.logger, identifier=dataset_queue.dataset.identifier, no_log=no_log, **kwargs)<|docstring|>Return a utime.sequences BatchSequence object made from a dataset queue. A BatchSequence object is used to extract batches of data from all or individual SleepStudy objects represented by this SleepStudyDataset. All args pass to the BatchSequence object. Please refer to its documentation. Returns: A BatchSequence object<|endoftext|>
0c353a9d87e0b447d8a869bba7209499e7456af8600f307b0756031e81d612b4
def makedirs_touch(path): 'Creates the file and all parent directories in the supplied path' basedir = os.path.dirname(path) if (not os.path.exists(basedir)): os.makedirs(basedir) with open(path, 'a'): os.utime(path, None)
Creates the file and all parent directories in the supplied path
pyjournal/utils.py
makedirs_touch
Lee-Sutton/pyjournal
0
python
def makedirs_touch(path): basedir = os.path.dirname(path) if (not os.path.exists(basedir)): os.makedirs(basedir) with open(path, 'a'): os.utime(path, None)
def makedirs_touch(path): basedir = os.path.dirname(path) if (not os.path.exists(basedir)): os.makedirs(basedir) with open(path, 'a'): os.utime(path, None)<|docstring|>Creates the file and all parent directories in the supplied path<|endoftext|>
a1682e809a0ba0d4a74baa235b2d35f7f7ca56fb84443f5fa0bdaebfbf8f092b
def __init__(self, handler, thread_group=None, timeout=None): 'Initializes a new Watcher instance.\n\n :param handler: a `callable` object to be invoked for each observed\n K8s event with the event body as a single argument.\n Calling `handler` should never raise any exceptions\n other than `eventlet.greenlet.GreenletExit` caused by\n `eventlet.greenthread.GreenThread.kill` when the\n `Watcher` is operating in asynchronous mode.\n :param thread_group: an `oslo_service.threadgroup.ThreadGroup`\n object used to run the event processing loops\n asynchronously. If `thread_group` is not\n specified, the `Watcher` will operate in a\n synchronous mode.\n ' super(Watcher, self).__init__() self._client = clients.get_kubernetes_client() self._handler = handler self._thread_group = thread_group self._running = False self._resources = set() self._watching = {} self._idle = {} if (timeout is None): timeout = CONF.kubernetes.watch_retry_timeout self._timeout = timeout
Initializes a new Watcher instance. :param handler: a `callable` object to be invoked for each observed K8s event with the event body as a single argument. Calling `handler` should never raise any exceptions other than `eventlet.greenlet.GreenletExit` caused by `eventlet.greenthread.GreenThread.kill` when the `Watcher` is operating in asynchronous mode. :param thread_group: an `oslo_service.threadgroup.ThreadGroup` object used to run the event processing loops asynchronously. If `thread_group` is not specified, the `Watcher` will operate in a synchronous mode.
kuryr_kubernetes/watcher.py
__init__
BoringWenn/kuryr-kubernetes
0
python
def __init__(self, handler, thread_group=None, timeout=None): 'Initializes a new Watcher instance.\n\n :param handler: a `callable` object to be invoked for each observed\n K8s event with the event body as a single argument.\n Calling `handler` should never raise any exceptions\n other than `eventlet.greenlet.GreenletExit` caused by\n `eventlet.greenthread.GreenThread.kill` when the\n `Watcher` is operating in asynchronous mode.\n :param thread_group: an `oslo_service.threadgroup.ThreadGroup`\n object used to run the event processing loops\n asynchronously. If `thread_group` is not\n specified, the `Watcher` will operate in a\n synchronous mode.\n ' super(Watcher, self).__init__() self._client = clients.get_kubernetes_client() self._handler = handler self._thread_group = thread_group self._running = False self._resources = set() self._watching = {} self._idle = {} if (timeout is None): timeout = CONF.kubernetes.watch_retry_timeout self._timeout = timeout
def __init__(self, handler, thread_group=None, timeout=None): 'Initializes a new Watcher instance.\n\n :param handler: a `callable` object to be invoked for each observed\n K8s event with the event body as a single argument.\n Calling `handler` should never raise any exceptions\n other than `eventlet.greenlet.GreenletExit` caused by\n `eventlet.greenthread.GreenThread.kill` when the\n `Watcher` is operating in asynchronous mode.\n :param thread_group: an `oslo_service.threadgroup.ThreadGroup`\n object used to run the event processing loops\n asynchronously. If `thread_group` is not\n specified, the `Watcher` will operate in a\n synchronous mode.\n ' super(Watcher, self).__init__() self._client = clients.get_kubernetes_client() self._handler = handler self._thread_group = thread_group self._running = False self._resources = set() self._watching = {} self._idle = {} if (timeout is None): timeout = CONF.kubernetes.watch_retry_timeout self._timeout = timeout<|docstring|>Initializes a new Watcher instance. :param handler: a `callable` object to be invoked for each observed K8s event with the event body as a single argument. Calling `handler` should never raise any exceptions other than `eventlet.greenlet.GreenletExit` caused by `eventlet.greenthread.GreenThread.kill` when the `Watcher` is operating in asynchronous mode. :param thread_group: an `oslo_service.threadgroup.ThreadGroup` object used to run the event processing loops asynchronously. If `thread_group` is not specified, the `Watcher` will operate in a synchronous mode.<|endoftext|>
6b389fb3ec34e6c434a9e5212f2cfc551e4593d4f08fd4c25964adea602738ce
def add(self, path): 'Adds ths K8s resource to the Watcher.\n\n Adding a resource to a running `Watcher` also ensures that the event\n processing loop for that resource is running. This method could block\n for `Watcher`s operating in synchronous mode.\n\n :param path: K8s resource URL path\n ' self._resources.add(path) if (self._running and (path not in self._watching)): self._start_watch(path)
Adds ths K8s resource to the Watcher. Adding a resource to a running `Watcher` also ensures that the event processing loop for that resource is running. This method could block for `Watcher`s operating in synchronous mode. :param path: K8s resource URL path
kuryr_kubernetes/watcher.py
add
BoringWenn/kuryr-kubernetes
0
python
def add(self, path): 'Adds ths K8s resource to the Watcher.\n\n Adding a resource to a running `Watcher` also ensures that the event\n processing loop for that resource is running. This method could block\n for `Watcher`s operating in synchronous mode.\n\n :param path: K8s resource URL path\n ' self._resources.add(path) if (self._running and (path not in self._watching)): self._start_watch(path)
def add(self, path): 'Adds ths K8s resource to the Watcher.\n\n Adding a resource to a running `Watcher` also ensures that the event\n processing loop for that resource is running. This method could block\n for `Watcher`s operating in synchronous mode.\n\n :param path: K8s resource URL path\n ' self._resources.add(path) if (self._running and (path not in self._watching)): self._start_watch(path)<|docstring|>Adds ths K8s resource to the Watcher. Adding a resource to a running `Watcher` also ensures that the event processing loop for that resource is running. This method could block for `Watcher`s operating in synchronous mode. :param path: K8s resource URL path<|endoftext|>
8b6af6574f3dd876c1488faddd6e7628d3ad8a4260ce7875d408f394e34a9402
def remove(self, path): 'Removes the K8s resource from the Watcher.\n\n Also requests the corresponding event processing loop to stop if it\n is running.\n\n :param path: K8s resource URL path\n ' self._resources.discard(path) if (path in self._watching): self._stop_watch(path)
Removes the K8s resource from the Watcher. Also requests the corresponding event processing loop to stop if it is running. :param path: K8s resource URL path
kuryr_kubernetes/watcher.py
remove
BoringWenn/kuryr-kubernetes
0
python
def remove(self, path): 'Removes the K8s resource from the Watcher.\n\n Also requests the corresponding event processing loop to stop if it\n is running.\n\n :param path: K8s resource URL path\n ' self._resources.discard(path) if (path in self._watching): self._stop_watch(path)
def remove(self, path): 'Removes the K8s resource from the Watcher.\n\n Also requests the corresponding event processing loop to stop if it\n is running.\n\n :param path: K8s resource URL path\n ' self._resources.discard(path) if (path in self._watching): self._stop_watch(path)<|docstring|>Removes the K8s resource from the Watcher. Also requests the corresponding event processing loop to stop if it is running. :param path: K8s resource URL path<|endoftext|>
d6b38122bab161bd2926cc067a9607e352107ad12a57e0e0f562de8038d9ad8b
def start(self): 'Starts the Watcher.\n\n Also ensures that the event processing loops are running. This method\n could block for `Watcher`s operating in synchronous mode.\n ' self._running = True for path in (self._resources - set(self._watching)): self._start_watch(path)
Starts the Watcher. Also ensures that the event processing loops are running. This method could block for `Watcher`s operating in synchronous mode.
kuryr_kubernetes/watcher.py
start
BoringWenn/kuryr-kubernetes
0
python
def start(self): 'Starts the Watcher.\n\n Also ensures that the event processing loops are running. This method\n could block for `Watcher`s operating in synchronous mode.\n ' self._running = True for path in (self._resources - set(self._watching)): self._start_watch(path)
def start(self): 'Starts the Watcher.\n\n Also ensures that the event processing loops are running. This method\n could block for `Watcher`s operating in synchronous mode.\n ' self._running = True for path in (self._resources - set(self._watching)): self._start_watch(path)<|docstring|>Starts the Watcher. Also ensures that the event processing loops are running. This method could block for `Watcher`s operating in synchronous mode.<|endoftext|>
1d35b613eece6b6138378cce3e390b81095c6481a5cfe0702329f12ae5a153d6
def stop(self): 'Stops the Watcher.\n\n Also requests all running event processing loops to stop.\n ' self._running = False for path in list(self._watching): self._stop_watch(path)
Stops the Watcher. Also requests all running event processing loops to stop.
kuryr_kubernetes/watcher.py
stop
BoringWenn/kuryr-kubernetes
0
python
def stop(self): 'Stops the Watcher.\n\n Also requests all running event processing loops to stop.\n ' self._running = False for path in list(self._watching): self._stop_watch(path)
def stop(self): 'Stops the Watcher.\n\n Also requests all running event processing loops to stop.\n ' self._running = False for path in list(self._watching): self._stop_watch(path)<|docstring|>Stops the Watcher. Also requests all running event processing loops to stop.<|endoftext|>
18347931f6f758da5e586c02b09a81724a1611d86676fcdf1e38da069a717340
def version() -> str: 'Returns the version number of this library.' return VERSION
Returns the version number of this library.
redpandas/__init__.py
version
RedVoxInc/redpandas
1
python
def version() -> str: return VERSION
def version() -> str: return VERSION<|docstring|>Returns the version number of this library.<|endoftext|>
dc55a383917dd4bd2fcc22b3c3d8c869db14863970454ea88f78344a12103ccf
def print_version() -> None: 'Prints the version number of this library' print(version())
Prints the version number of this library
redpandas/__init__.py
print_version
RedVoxInc/redpandas
1
python
def print_version() -> None: print(version())
def print_version() -> None: print(version())<|docstring|>Prints the version number of this library<|endoftext|>
7aa0edb6861393b0ffc4301d80126fbd4a222df6f8d6cb7b45d0ee5366bacd3e
def get_data_files(): ' Get all data files for the package\n ' data_files = [('etc/jupyter/jupyter_server_config.d', ['etc/jupyter/jupyter_server_config.d/voila-gridstack.json']), ('etc/jupyter/jupyter_notebook_config.d', ['etc/jupyter/jupyter_notebook_config.d/voila-gridstack.json']), ('etc/jupyter/nbconfig/notebook.d', ['etc/jupyter/nbconfig/notebook.d/voila-gridstack.json']), ('share/jupyter/nbextensions/voila-gridstack', ['voila-gridstack/static/extension.js', 'voila-gridstack/static/voila-gridstack.js', 'voila-gridstack/static/voila-gridstack.css', 'voila-gridstack/static/gridstack.js', 'voila-gridstack/static/gridstack.jqueryUI_require.js'])] for (root, dirs, files) in os.walk('share'): root_files = [os.path.join(root, i) for i in files] data_files.append((root, root_files)) return data_files
Get all data files for the package
setup.py
get_data_files
JohanMabille/voila-gridstack
0
python
def get_data_files(): ' \n ' data_files = [('etc/jupyter/jupyter_server_config.d', ['etc/jupyter/jupyter_server_config.d/voila-gridstack.json']), ('etc/jupyter/jupyter_notebook_config.d', ['etc/jupyter/jupyter_notebook_config.d/voila-gridstack.json']), ('etc/jupyter/nbconfig/notebook.d', ['etc/jupyter/nbconfig/notebook.d/voila-gridstack.json']), ('share/jupyter/nbextensions/voila-gridstack', ['voila-gridstack/static/extension.js', 'voila-gridstack/static/voila-gridstack.js', 'voila-gridstack/static/voila-gridstack.css', 'voila-gridstack/static/gridstack.js', 'voila-gridstack/static/gridstack.jqueryUI_require.js'])] for (root, dirs, files) in os.walk('share'): root_files = [os.path.join(root, i) for i in files] data_files.append((root, root_files)) return data_files
def get_data_files(): ' \n ' data_files = [('etc/jupyter/jupyter_server_config.d', ['etc/jupyter/jupyter_server_config.d/voila-gridstack.json']), ('etc/jupyter/jupyter_notebook_config.d', ['etc/jupyter/jupyter_notebook_config.d/voila-gridstack.json']), ('etc/jupyter/nbconfig/notebook.d', ['etc/jupyter/nbconfig/notebook.d/voila-gridstack.json']), ('share/jupyter/nbextensions/voila-gridstack', ['voila-gridstack/static/extension.js', 'voila-gridstack/static/voila-gridstack.js', 'voila-gridstack/static/voila-gridstack.css', 'voila-gridstack/static/gridstack.js', 'voila-gridstack/static/gridstack.jqueryUI_require.js'])] for (root, dirs, files) in os.walk('share'): root_files = [os.path.join(root, i) for i in files] data_files.append((root, root_files)) return data_files<|docstring|>Get all data files for the package<|endoftext|>
e22b12ea4d06fc92775ab2863de86329138e1fb5237ab31fd48139b57418afa3
def __call__(self, src): 'Augmenter body' if (random.random() < self.p): src = (255 - src) return src
Augmenter body
cnocr/data_utils/aug.py
__call__
breezedeus/cnocr
1,562
python
def __call__(self, src): if (random.random() < self.p): src = (255 - src) return src
def __call__(self, src): if (random.random() < self.p): src = (255 - src) return src<|docstring|>Augmenter body<|endoftext|>
2f5617429f66a6d0657515f7369c5eaec22f1b62f4ab0860e36a8057abd50b3e
def __call__(self, img: torch.Tensor): '\n\n :param img: [C, H, W]\n :return:\n ' if (random.random() >= self.p): return img pad_len = random.randint(1, self.max_pad_len) pad_shape = list(img.shape) pad_shape[(- 1)] = pad_len padding = torch.zeros(pad_shape, dtype=img.dtype, device=img.device) return torch.cat((img, padding), dim=(- 1))
:param img: [C, H, W] :return:
cnocr/data_utils/aug.py
__call__
breezedeus/cnocr
1,562
python
def __call__(self, img: torch.Tensor): '\n\n :param img: [C, H, W]\n :return:\n ' if (random.random() >= self.p): return img pad_len = random.randint(1, self.max_pad_len) pad_shape = list(img.shape) pad_shape[(- 1)] = pad_len padding = torch.zeros(pad_shape, dtype=img.dtype, device=img.device) return torch.cat((img, padding), dim=(- 1))
def __call__(self, img: torch.Tensor): '\n\n :param img: [C, H, W]\n :return:\n ' if (random.random() >= self.p): return img pad_len = random.randint(1, self.max_pad_len) pad_shape = list(img.shape) pad_shape[(- 1)] = pad_len padding = torch.zeros(pad_shape, dtype=img.dtype, device=img.device) return torch.cat((img, padding), dim=(- 1))<|docstring|>:param img: [C, H, W] :return:<|endoftext|>
3169287f5d289f9a248008c5e01a59194831ad14b9923a6a57b5e297177b4915
def pickle_write(content, name, append=1): 'function to open file, pickle dump, then close' f = (open(name, 'ab') if append else open(name, 'wb')) pickle.dump(content, f) f.close()
function to open file, pickle dump, then close
forge/blade/systems/visualizer/visualizer.py
pickle_write
LYX0429/neural-mmo
4
python
def pickle_write(content, name, append=1): f = (open(name, 'ab') if append else open(name, 'wb')) pickle.dump(content, f) f.close()
def pickle_write(content, name, append=1): f = (open(name, 'ab') if append else open(name, 'wb')) pickle.dump(content, f) f.close()<|docstring|>function to open file, pickle dump, then close<|endoftext|>
b3ae758ef71ca8697b076523e0697ca09ae3d99c981329ffb79e709b91d3e116
def pickle_read(name): 'function to open file, pickle load, then close' f = open(name, 'rb') ret = pickle.load(f) f.close() return ret
function to open file, pickle load, then close
forge/blade/systems/visualizer/visualizer.py
pickle_read
LYX0429/neural-mmo
4
python
def pickle_read(name): f = open(name, 'rb') ret = pickle.load(f) f.close() return ret
def pickle_read(name): f = open(name, 'rb') ret = pickle.load(f) f.close() return ret<|docstring|>function to open file, pickle load, then close<|endoftext|>
5da824118f17a11ac7ee7c1380b874bbef059adfcd8f9cc477f1c7b546840486
def __init__(self, config): "Visualizes a stream of data with threaded refreshing. To\n add items, initialize using 'keys' kwarg or add to packet in\n stream()\n Args:\n keys : List of object names (str) to be displayed on market\n history_len : How far back to plot data\n scales : Scales for visualization\n title : Title of graph\n x : Name of x axis data\n ylabel : Name of y axis on plot\n " self.colors = [] for color in [Neon.GREEN, Neon.CYAN, Neon.BLUE]: color = bokeh.colors.RGB(*color.rgb) self.colors.append(color) self.history_len = config.HISTORY_LEN self.title = config.TITLE self.ylabel = config.YLABEL self.x = config.XAXIS self.XAXIS = config.XAXIS self.scales = config.SCALES self.scale = config.SCALES[0] self.title = config.TITLE self.log = config.LOG self.load = config.LOAD_EXP self.filename = config.NAME self.data = defaultdict(list) self.dataSource = {} self.keys = 'lifetime'.split() for key in self.keys: self.dataSource[key] = [1, 2] self.dataSource[(key + '_x')] = [1, 2] self.dataSource[(key + '_lower')] = [1, 2] self.dataSource[(key + '_upper')] = [1, 2] self.dataSource[(key + '_smooth')] = [1, 2]
Visualizes a stream of data with threaded refreshing. To add items, initialize using 'keys' kwarg or add to packet in stream() Args: keys : List of object names (str) to be displayed on market history_len : How far back to plot data scales : Scales for visualization title : Title of graph x : Name of x axis data ylabel : Name of y axis on plot
forge/blade/systems/visualizer/visualizer.py
__init__
LYX0429/neural-mmo
4
python
def __init__(self, config): "Visualizes a stream of data with threaded refreshing. To\n add items, initialize using 'keys' kwarg or add to packet in\n stream()\n Args:\n keys : List of object names (str) to be displayed on market\n history_len : How far back to plot data\n scales : Scales for visualization\n title : Title of graph\n x : Name of x axis data\n ylabel : Name of y axis on plot\n " self.colors = [] for color in [Neon.GREEN, Neon.CYAN, Neon.BLUE]: color = bokeh.colors.RGB(*color.rgb) self.colors.append(color) self.history_len = config.HISTORY_LEN self.title = config.TITLE self.ylabel = config.YLABEL self.x = config.XAXIS self.XAXIS = config.XAXIS self.scales = config.SCALES self.scale = config.SCALES[0] self.title = config.TITLE self.log = config.LOG self.load = config.LOAD_EXP self.filename = config.NAME self.data = defaultdict(list) self.dataSource = {} self.keys = 'lifetime'.split() for key in self.keys: self.dataSource[key] = [1, 2] self.dataSource[(key + '_x')] = [1, 2] self.dataSource[(key + '_lower')] = [1, 2] self.dataSource[(key + '_upper')] = [1, 2] self.dataSource[(key + '_smooth')] = [1, 2]
def __init__(self, config): "Visualizes a stream of data with threaded refreshing. To\n add items, initialize using 'keys' kwarg or add to packet in\n stream()\n Args:\n keys : List of object names (str) to be displayed on market\n history_len : How far back to plot data\n scales : Scales for visualization\n title : Title of graph\n x : Name of x axis data\n ylabel : Name of y axis on plot\n " self.colors = [] for color in [Neon.GREEN, Neon.CYAN, Neon.BLUE]: color = bokeh.colors.RGB(*color.rgb) self.colors.append(color) self.history_len = config.HISTORY_LEN self.title = config.TITLE self.ylabel = config.YLABEL self.x = config.XAXIS self.XAXIS = config.XAXIS self.scales = config.SCALES self.scale = config.SCALES[0] self.title = config.TITLE self.log = config.LOG self.load = config.LOAD_EXP self.filename = config.NAME self.data = defaultdict(list) self.dataSource = {} self.keys = 'lifetime'.split() for key in self.keys: self.dataSource[key] = [1, 2] self.dataSource[(key + '_x')] = [1, 2] self.dataSource[(key + '_lower')] = [1, 2] self.dataSource[(key + '_upper')] = [1, 2] self.dataSource[(key + '_smooth')] = [1, 2]<|docstring|>Visualizes a stream of data with threaded refreshing. To add items, initialize using 'keys' kwarg or add to packet in stream() Args: keys : List of object names (str) to be displayed on market history_len : How far back to plot data scales : Scales for visualization title : Title of graph x : Name of x axis data ylabel : Name of y axis on plot<|endoftext|>
3fe6425e54903ad561c9bca1575fb36e60a3898ff325193a696d1e2e26071434
def stream(self): 'Wrapper function for source.stream to enable\n adding new items mid-stream. Overwrite graph\n with new figure if packet has different keys.\n Args:\n packet: dictionary of singleton lists' self.dataSource = dict(self.data.copy()) if self.log: pickle_write(self.dataSource, self.filename) self.source.stream(self.dataSource, self.history_len) self.doc.remove_root(self.structure) self.init(self.doc)
Wrapper function for source.stream to enable adding new items mid-stream. Overwrite graph with new figure if packet has different keys. Args: packet: dictionary of singleton lists
forge/blade/systems/visualizer/visualizer.py
stream
LYX0429/neural-mmo
4
python
def stream(self): 'Wrapper function for source.stream to enable\n adding new items mid-stream. Overwrite graph\n with new figure if packet has different keys.\n Args:\n packet: dictionary of singleton lists' self.dataSource = dict(self.data.copy()) if self.log: pickle_write(self.dataSource, self.filename) self.source.stream(self.dataSource, self.history_len) self.doc.remove_root(self.structure) self.init(self.doc)
def stream(self): 'Wrapper function for source.stream to enable\n adding new items mid-stream. Overwrite graph\n with new figure if packet has different keys.\n Args:\n packet: dictionary of singleton lists' self.dataSource = dict(self.data.copy()) if self.log: pickle_write(self.dataSource, self.filename) self.source.stream(self.dataSource, self.history_len) self.doc.remove_root(self.structure) self.init(self.doc)<|docstring|>Wrapper function for source.stream to enable adding new items mid-stream. Overwrite graph with new figure if packet has different keys. Args: packet: dictionary of singleton lists<|endoftext|>
94b205e0d9afeb91e0dbd1f4e993b7749613f711877c2e0d10a54a5478c36436
def __init__(self, middleman, config): ' Runs an asynchronous Bokeh data streaming server.\n \n Args:\n market : The market to visualize\n args : Additional arguments\n kwargs : Additional keyword arguments\n ' self.analytics = Analytics(config) self.middleman = middleman self.thread = None server = Server({'/': self.init}, io_loop=IOLoop.current(), port=config.PORT, num_procs=1) server.start() self.server = server server.io_loop.add_callback(server.show, '/') server.io_loop.start()
Runs an asynchronous Bokeh data streaming server. Args: market : The market to visualize args : Additional arguments kwargs : Additional keyword arguments
forge/blade/systems/visualizer/visualizer.py
__init__
LYX0429/neural-mmo
4
python
def __init__(self, middleman, config): ' Runs an asynchronous Bokeh data streaming server.\n \n Args:\n market : The market to visualize\n args : Additional arguments\n kwargs : Additional keyword arguments\n ' self.analytics = Analytics(config) self.middleman = middleman self.thread = None server = Server({'/': self.init}, io_loop=IOLoop.current(), port=config.PORT, num_procs=1) server.start() self.server = server server.io_loop.add_callback(server.show, '/') server.io_loop.start()
def __init__(self, middleman, config): ' Runs an asynchronous Bokeh data streaming server.\n \n Args:\n market : The market to visualize\n args : Additional arguments\n kwargs : Additional keyword arguments\n ' self.analytics = Analytics(config) self.middleman = middleman self.thread = None server = Server({'/': self.init}, io_loop=IOLoop.current(), port=config.PORT, num_procs=1) server.start() self.server = server server.io_loop.add_callback(server.show, '/') server.io_loop.start()<|docstring|>Runs an asynchronous Bokeh data streaming server. Args: market : The market to visualize args : Additional arguments kwargs : Additional keyword arguments<|endoftext|>
df44bd0b694e189a156cbfbf6a11c3b376c12329bb44883ead94ea24e820c93d
def init(self, doc): 'Initialize document and threaded update loop\n Args:\n doc: A Bokeh document\n ' self.analytics.init(doc) self.doc = doc self.thread = Thread(target=self.update, args=[]) self.thread.start() self.started = True
Initialize document and threaded update loop Args: doc: A Bokeh document
forge/blade/systems/visualizer/visualizer.py
init
LYX0429/neural-mmo
4
python
def init(self, doc): 'Initialize document and threaded update loop\n Args:\n doc: A Bokeh document\n ' self.analytics.init(doc) self.doc = doc self.thread = Thread(target=self.update, args=[]) self.thread.start() self.started = True
def init(self, doc): 'Initialize document and threaded update loop\n Args:\n doc: A Bokeh document\n ' self.analytics.init(doc) self.doc = doc self.thread = Thread(target=self.update, args=[]) self.thread.start() self.started = True<|docstring|>Initialize document and threaded update loop Args: doc: A Bokeh document<|endoftext|>
fe5c4e2b8fae682bd4eff60b4f659134cf9e1f54c669f2da8e07a69646b30418
def update(self): 'Blocking update call to be run in a separate thread\n Ingests packets from a remote market and streams to Bokeh client' self.n = 0 while True: time.sleep(0.05) if (self.thread is None): continue if ray.get(self.middleman.getShutdown.remote()): self.middleman.setData.remote(self.analytics.data) sys.exit(0) packet = ray.get(self.middleman.getData.remote()) if (packet is None): continue self.analytics.update(packet) self.analytics.resample() self.doc.add_next_tick_callback(partial(self.stream))
Blocking update call to be run in a separate thread Ingests packets from a remote market and streams to Bokeh client
forge/blade/systems/visualizer/visualizer.py
update
LYX0429/neural-mmo
4
python
def update(self): 'Blocking update call to be run in a separate thread\n Ingests packets from a remote market and streams to Bokeh client' self.n = 0 while True: time.sleep(0.05) if (self.thread is None): continue if ray.get(self.middleman.getShutdown.remote()): self.middleman.setData.remote(self.analytics.data) sys.exit(0) packet = ray.get(self.middleman.getData.remote()) if (packet is None): continue self.analytics.update(packet) self.analytics.resample() self.doc.add_next_tick_callback(partial(self.stream))
def update(self): 'Blocking update call to be run in a separate thread\n Ingests packets from a remote market and streams to Bokeh client' self.n = 0 while True: time.sleep(0.05) if (self.thread is None): continue if ray.get(self.middleman.getShutdown.remote()): self.middleman.setData.remote(self.analytics.data) sys.exit(0) packet = ray.get(self.middleman.getData.remote()) if (packet is None): continue self.analytics.update(packet) self.analytics.resample() self.doc.add_next_tick_callback(partial(self.stream))<|docstring|>Blocking update call to be run in a separate thread Ingests packets from a remote market and streams to Bokeh client<|endoftext|>
de616b19a371cc4a7549999e79b5cfab563724a9d7facb38f706eb30505e3c02
@gen.coroutine def stream(self): 'Stream current data buffer to Bokeh client' self.analytics.stream()
Stream current data buffer to Bokeh client
forge/blade/systems/visualizer/visualizer.py
stream
LYX0429/neural-mmo
4
python
@gen.coroutine def stream(self): self.analytics.stream()
@gen.coroutine def stream(self): self.analytics.stream()<|docstring|>Stream current data buffer to Bokeh client<|endoftext|>
67e7680ef5d74a0c0647cb59f373b944ae6c611cdea402c9b02e79d01a18e042
def __init__(self): 'Remote data buffer for two processes to dump and recv data.\n Interacts with Market and BokehServer.\n This is probably not safe' self.data = None self.shutdown = 0
Remote data buffer for two processes to dump and recv data. Interacts with Market and BokehServer. This is probably not safe
forge/blade/systems/visualizer/visualizer.py
__init__
LYX0429/neural-mmo
4
python
def __init__(self): 'Remote data buffer for two processes to dump and recv data.\n Interacts with Market and BokehServer.\n This is probably not safe' self.data = None self.shutdown = 0
def __init__(self): 'Remote data buffer for two processes to dump and recv data.\n Interacts with Market and BokehServer.\n This is probably not safe' self.data = None self.shutdown = 0<|docstring|>Remote data buffer for two processes to dump and recv data. Interacts with Market and BokehServer. This is probably not safe<|endoftext|>
95a5ed26cfb8d487d457e2f41808eb1c8bb1157d1cc2c4caf00f47096ee942f4
def getData(self): 'Get data from buffer\n Returns:\n data: From buffer\n ' data = self.data self.data = None return data
Get data from buffer Returns: data: From buffer
forge/blade/systems/visualizer/visualizer.py
getData
LYX0429/neural-mmo
4
python
def getData(self): 'Get data from buffer\n Returns:\n data: From buffer\n ' data = self.data self.data = None return data
def getData(self): 'Get data from buffer\n Returns:\n data: From buffer\n ' data = self.data self.data = None return data<|docstring|>Get data from buffer Returns: data: From buffer<|endoftext|>
039ec34e5615f601b42a38047baff9831439f7982584785eb5faf228208981e0
def setData(self, data): 'Set buffer data\n Args:\n data: To set buffer\n ' self.data = data.copy()
Set buffer data Args: data: To set buffer
forge/blade/systems/visualizer/visualizer.py
setData
LYX0429/neural-mmo
4
python
def setData(self, data): 'Set buffer data\n Args:\n data: To set buffer\n ' self.data = data.copy()
def setData(self, data): 'Set buffer data\n Args:\n data: To set buffer\n ' self.data = data.copy()<|docstring|>Set buffer data Args: data: To set buffer<|endoftext|>
da64a3ae64f2074c3a3f341385fce968d3832a05a4045c727c1ac6e68b0a9eda
def switch_scale(attr, old, new): "Callback for RadioButtonGroup to switch tick scale\n and refresh document\n Args:\n attr: variable to be changed, in this case 'active'\n old: old index of active button\n new: new index of active button\n " self.scale = self.scales[new] self.source.data = self.data[self.scale]
Callback for RadioButtonGroup to switch tick scale and refresh document Args: attr: variable to be changed, in this case 'active' old: old index of active button new: new index of active button
forge/blade/systems/visualizer/visualizer.py
switch_scale
LYX0429/neural-mmo
4
python
def switch_scale(attr, old, new): "Callback for RadioButtonGroup to switch tick scale\n and refresh document\n Args:\n attr: variable to be changed, in this case 'active'\n old: old index of active button\n new: new index of active button\n " self.scale = self.scales[new] self.source.data = self.data[self.scale]
def switch_scale(attr, old, new): "Callback for RadioButtonGroup to switch tick scale\n and refresh document\n Args:\n attr: variable to be changed, in this case 'active'\n old: old index of active button\n new: new index of active button\n " self.scale = self.scales[new] self.source.data = self.data[self.scale]<|docstring|>Callback for RadioButtonGroup to switch tick scale and refresh document Args: attr: variable to be changed, in this case 'active' old: old index of active button new: new index of active button<|endoftext|>
3206ea63ff836ad9d6f43a93f1b2c9db592c61050dde1bddbd269c06582f85ff
def learn(self): '\n Performs numIters iterations with numEps episodes of self-play in each\n iteration. After every iteration, it retrains neural network with\n examples in trainExamples (which has a maximium length of maxlenofQueue).\n It then pits the new neural network against the old one and accepts it\n only if it wins >= updateThreshold fraction of games.\n ' import time gamesNum = (self.args.numSelfPlayProcess * self.args.numPerProcessSelfPlay) MyLogger.info('============== New Run ==============') MyLogger.info('sims: {} cpuct: {} gamesNum: {} coeff: {} evalDepth: {} alpha: {} eps: {}'.format(self.args.numMCTSSims, self.args.cpuct, gamesNum, self.args.coeff, self.args.evaluationDepth, self.args.alpha, self.args.epsilon)) for i in range(1, (self.args.numIters + 1)): start = time.time() print((('------ITER ' + str(i)) + '------')) iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue) temp = self.parallel_self_play() iterationTrainExamples += temp self.trainExamplesHistory.append(iterationTrainExamples) self.parallel_train_network(i) self.trainExamplesHistory.clear() self.parallel_self_test_play(i) if self.args.multiCPU: resultRand = self.parallel_check_against(i, 'rp') resultHeur = self.parallel_check_against(i, 'heuristic') resultMCTS = self.parallel_check_against(i, 'n1p') MyLogger.info('Iter:{} Heuristic: {} Random: {} MCTS: {}'.format(i, resultHeur, resultRand, resultMCTS)) else: logCurrentCapabilities(self.game, i, self.args) end = time.time() diff = (end - start) print(diff)
Performs numIters iterations with numEps episodes of self-play in each iteration. After every iteration, it retrains neural network with examples in trainExamples (which has a maximium length of maxlenofQueue). It then pits the new neural network against the old one and accepts it only if it wins >= updateThreshold fraction of games.
Coach.py
learn
danielvarga/alpha-zero-general
0
python
def learn(self): '\n Performs numIters iterations with numEps episodes of self-play in each\n iteration. After every iteration, it retrains neural network with\n examples in trainExamples (which has a maximium length of maxlenofQueue).\n It then pits the new neural network against the old one and accepts it\n only if it wins >= updateThreshold fraction of games.\n ' import time gamesNum = (self.args.numSelfPlayProcess * self.args.numPerProcessSelfPlay) MyLogger.info('============== New Run ==============') MyLogger.info('sims: {} cpuct: {} gamesNum: {} coeff: {} evalDepth: {} alpha: {} eps: {}'.format(self.args.numMCTSSims, self.args.cpuct, gamesNum, self.args.coeff, self.args.evaluationDepth, self.args.alpha, self.args.epsilon)) for i in range(1, (self.args.numIters + 1)): start = time.time() print((('------ITER ' + str(i)) + '------')) iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue) temp = self.parallel_self_play() iterationTrainExamples += temp self.trainExamplesHistory.append(iterationTrainExamples) self.parallel_train_network(i) self.trainExamplesHistory.clear() self.parallel_self_test_play(i) if self.args.multiCPU: resultRand = self.parallel_check_against(i, 'rp') resultHeur = self.parallel_check_against(i, 'heuristic') resultMCTS = self.parallel_check_against(i, 'n1p') MyLogger.info('Iter:{} Heuristic: {} Random: {} MCTS: {}'.format(i, resultHeur, resultRand, resultMCTS)) else: logCurrentCapabilities(self.game, i, self.args) end = time.time() diff = (end - start) print(diff)
def learn(self): '\n Performs numIters iterations with numEps episodes of self-play in each\n iteration. After every iteration, it retrains neural network with\n examples in trainExamples (which has a maximium length of maxlenofQueue).\n It then pits the new neural network against the old one and accepts it\n only if it wins >= updateThreshold fraction of games.\n ' import time gamesNum = (self.args.numSelfPlayProcess * self.args.numPerProcessSelfPlay) MyLogger.info('============== New Run ==============') MyLogger.info('sims: {} cpuct: {} gamesNum: {} coeff: {} evalDepth: {} alpha: {} eps: {}'.format(self.args.numMCTSSims, self.args.cpuct, gamesNum, self.args.coeff, self.args.evaluationDepth, self.args.alpha, self.args.epsilon)) for i in range(1, (self.args.numIters + 1)): start = time.time() print((('------ITER ' + str(i)) + '------')) iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue) temp = self.parallel_self_play() iterationTrainExamples += temp self.trainExamplesHistory.append(iterationTrainExamples) self.parallel_train_network(i) self.trainExamplesHistory.clear() self.parallel_self_test_play(i) if self.args.multiCPU: resultRand = self.parallel_check_against(i, 'rp') resultHeur = self.parallel_check_against(i, 'heuristic') resultMCTS = self.parallel_check_against(i, 'n1p') MyLogger.info('Iter:{} Heuristic: {} Random: {} MCTS: {}'.format(i, resultHeur, resultRand, resultMCTS)) else: logCurrentCapabilities(self.game, i, self.args) end = time.time() diff = (end - start) print(diff)<|docstring|>Performs numIters iterations with numEps episodes of self-play in each iteration. After every iteration, it retrains neural network with examples in trainExamples (which has a maximium length of maxlenofQueue). It then pits the new neural network against the old one and accepts it only if it wins >= updateThreshold fraction of games.<|endoftext|>
f486fcc4a5e645644d38881afe05b66e1e0070987156154806b39456baa1b027
def check_key(): '\n check TINY_KEY\n 代码中查找;文件中查找,没有的话,click装饰器会去环境变量中查找,再没有的话,提示用户手动输入\n :return:bool False:不需要用户输入;True:用户输入key\n ' _tiny_key = settings.TINY_KEY if (not _tiny_key): _tiny_key = os.environ.get('TINY_KEY') if (_tiny_key is None): if os.path.exists(settings.TINY_KEY_FILE): with open(settings.TINY_KEY_FILE, 'r') as f: _tiny_key = f.read() ret = (True if (not _tiny_key) else False) return (ret, _tiny_key)
check TINY_KEY 代码中查找;文件中查找,没有的话,click装饰器会去环境变量中查找,再没有的话,提示用户手动输入 :return:bool False:不需要用户输入;True:用户输入key
scripts/yst.py
check_key
imoyao/PyTinyImg
0
python
def check_key(): '\n check TINY_KEY\n 代码中查找;文件中查找,没有的话,click装饰器会去环境变量中查找,再没有的话,提示用户手动输入\n :return:bool False:不需要用户输入;True:用户输入key\n ' _tiny_key = settings.TINY_KEY if (not _tiny_key): _tiny_key = os.environ.get('TINY_KEY') if (_tiny_key is None): if os.path.exists(settings.TINY_KEY_FILE): with open(settings.TINY_KEY_FILE, 'r') as f: _tiny_key = f.read() ret = (True if (not _tiny_key) else False) return (ret, _tiny_key)
def check_key(): '\n check TINY_KEY\n 代码中查找;文件中查找,没有的话,click装饰器会去环境变量中查找,再没有的话,提示用户手动输入\n :return:bool False:不需要用户输入;True:用户输入key\n ' _tiny_key = settings.TINY_KEY if (not _tiny_key): _tiny_key = os.environ.get('TINY_KEY') if (_tiny_key is None): if os.path.exists(settings.TINY_KEY_FILE): with open(settings.TINY_KEY_FILE, 'r') as f: _tiny_key = f.read() ret = (True if (not _tiny_key) else False) return (ret, _tiny_key)<|docstring|>check TINY_KEY 代码中查找;文件中查找,没有的话,click装饰器会去环境变量中查找,再没有的话,提示用户手动输入 :return:bool False:不需要用户输入;True:用户输入key<|endoftext|>
9dbc367204717250f5d56db0a83e32171244c2e6080530c3681086a27638ddd8
def show_version(ctx, param, value): '\n show the version\n :param ctx:\n :param param: del this will get: Warning: Invoked legacy parameter callback……\n :param value:\n :return:\n ' if ((not value) or ctx.resilient_parsing): return click.echo(settings.VERSION) ctx.exit()
show the version :param ctx: :param param: del this will get: Warning: Invoked legacy parameter callback…… :param value: :return:
scripts/yst.py
show_version
imoyao/PyTinyImg
0
python
def show_version(ctx, param, value): '\n show the version\n :param ctx:\n :param param: del this will get: Warning: Invoked legacy parameter callback……\n :param value:\n :return:\n ' if ((not value) or ctx.resilient_parsing): return click.echo(settings.VERSION) ctx.exit()
def show_version(ctx, param, value): '\n show the version\n :param ctx:\n :param param: del this will get: Warning: Invoked legacy parameter callback……\n :param value:\n :return:\n ' if ((not value) or ctx.resilient_parsing): return click.echo(settings.VERSION) ctx.exit()<|docstring|>show the version :param ctx: :param param: del this will get: Warning: Invoked legacy parameter callback…… :param value: :return:<|endoftext|>
1b37d03b4259bd0e0b1e16cf622f595d512f39ff13e32b1edeea8b918e70eb54
def run(self): "\n Build image inside current environment using imagebuilder;\n It's expected this may run within (privileged) docker container.\n\n Returns:\n BuildResult\n " builder = self.workflow.builder image = builder.image.to_str() allow_repo_dir_in_dockerignore(builder.df_dir) process_args = ['imagebuilder', '-t', image] for (buildarg, buildargval) in builder.buildargs.items(): process_args.append('--build-arg') process_args.append(('%s=%s' % (buildarg, buildargval))) process_args.append(builder.df_dir) ib_process = subprocess.Popen(process_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, encoding='utf-8', errors='replace') self.log.debug('imagebuilder build has begun; waiting for it to finish') self.log.debug(process_args) output = [] while True: poll = ib_process.poll() out = ib_process.stdout.readline() if out: self.log.info('%s', out.rstrip()) output.append(out) elif (poll is not None): break if (ib_process.returncode != 0): err = (output[(- 1)] if output else '<imagebuilder had bad exit code but no output>') return BuildResult(logs=output, fail_reason='image build failed (rc={}): {}'.format(ib_process.returncode, err)) image_id = builder.get_built_image_info()['Id'] if (':' not in image_id): image_id = 'sha256:{}'.format(image_id) self.log.info('fetching image %s from docker', image) output_path = os.path.join(self.workflow.source.workdir, EXPORTED_SQUASHED_IMAGE_NAME) try: with open(output_path, 'w') as image_file: image_file.write(self.tasker.get_image(image).data) except AttributeError: with open(output_path, 'wb') as image_file: for chunk in self.tasker.get_image(image): image_file.write(chunk) img_metadata = get_exported_image_metadata(output_path, IMAGE_TYPE_DOCKER_ARCHIVE) self.workflow.exported_image_sequence.append(img_metadata) return BuildResult(logs=output, image_id=image_id, skip_layer_squash=True)
Build image inside current environment using imagebuilder; It's expected this may run within (privileged) docker container. Returns: BuildResult
atomic_reactor/plugins/build_imagebuilder.py
run
mkosiarc/atomic-reactor
0
python
def run(self): "\n Build image inside current environment using imagebuilder;\n It's expected this may run within (privileged) docker container.\n\n Returns:\n BuildResult\n " builder = self.workflow.builder image = builder.image.to_str() allow_repo_dir_in_dockerignore(builder.df_dir) process_args = ['imagebuilder', '-t', image] for (buildarg, buildargval) in builder.buildargs.items(): process_args.append('--build-arg') process_args.append(('%s=%s' % (buildarg, buildargval))) process_args.append(builder.df_dir) ib_process = subprocess.Popen(process_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, encoding='utf-8', errors='replace') self.log.debug('imagebuilder build has begun; waiting for it to finish') self.log.debug(process_args) output = [] while True: poll = ib_process.poll() out = ib_process.stdout.readline() if out: self.log.info('%s', out.rstrip()) output.append(out) elif (poll is not None): break if (ib_process.returncode != 0): err = (output[(- 1)] if output else '<imagebuilder had bad exit code but no output>') return BuildResult(logs=output, fail_reason='image build failed (rc={}): {}'.format(ib_process.returncode, err)) image_id = builder.get_built_image_info()['Id'] if (':' not in image_id): image_id = 'sha256:{}'.format(image_id) self.log.info('fetching image %s from docker', image) output_path = os.path.join(self.workflow.source.workdir, EXPORTED_SQUASHED_IMAGE_NAME) try: with open(output_path, 'w') as image_file: image_file.write(self.tasker.get_image(image).data) except AttributeError: with open(output_path, 'wb') as image_file: for chunk in self.tasker.get_image(image): image_file.write(chunk) img_metadata = get_exported_image_metadata(output_path, IMAGE_TYPE_DOCKER_ARCHIVE) self.workflow.exported_image_sequence.append(img_metadata) return BuildResult(logs=output, image_id=image_id, skip_layer_squash=True)
def run(self): "\n Build image inside current environment using imagebuilder;\n It's expected this may run within (privileged) docker container.\n\n Returns:\n BuildResult\n " builder = self.workflow.builder image = builder.image.to_str() allow_repo_dir_in_dockerignore(builder.df_dir) process_args = ['imagebuilder', '-t', image] for (buildarg, buildargval) in builder.buildargs.items(): process_args.append('--build-arg') process_args.append(('%s=%s' % (buildarg, buildargval))) process_args.append(builder.df_dir) ib_process = subprocess.Popen(process_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, encoding='utf-8', errors='replace') self.log.debug('imagebuilder build has begun; waiting for it to finish') self.log.debug(process_args) output = [] while True: poll = ib_process.poll() out = ib_process.stdout.readline() if out: self.log.info('%s', out.rstrip()) output.append(out) elif (poll is not None): break if (ib_process.returncode != 0): err = (output[(- 1)] if output else '<imagebuilder had bad exit code but no output>') return BuildResult(logs=output, fail_reason='image build failed (rc={}): {}'.format(ib_process.returncode, err)) image_id = builder.get_built_image_info()['Id'] if (':' not in image_id): image_id = 'sha256:{}'.format(image_id) self.log.info('fetching image %s from docker', image) output_path = os.path.join(self.workflow.source.workdir, EXPORTED_SQUASHED_IMAGE_NAME) try: with open(output_path, 'w') as image_file: image_file.write(self.tasker.get_image(image).data) except AttributeError: with open(output_path, 'wb') as image_file: for chunk in self.tasker.get_image(image): image_file.write(chunk) img_metadata = get_exported_image_metadata(output_path, IMAGE_TYPE_DOCKER_ARCHIVE) self.workflow.exported_image_sequence.append(img_metadata) return BuildResult(logs=output, image_id=image_id, skip_layer_squash=True)<|docstring|>Build image inside current environment using imagebuilder; It's expected this may run within (privileged) docker container. Returns: BuildResult<|endoftext|>
209d0076ee22914df44c4066529312cbc8f40f42efea78f6024dff8016f629c2
def evaluate_central(self, dataset, state): '\n Evaluates a model in central server mode.\n \n Arguments:\n dataset: tf Dataset, contains all the test set examples as a single \n tf Dataset.\n state: tff state, the federated training state of the model. \n Contains model weights\n\n Returns:\n accuracy of model in state on dataset provided\n ' keras_model = self.keras_model_fn() shape = tf.data.DatasetSpec.from_value(dataset)._element_spec[0].shape keras_model.build(shape) tff.learning.assign_weights_to_keras_model(keras_model, state.model) metrics = keras_model.evaluate(dataset) return (metrics[0].item(), metrics[1].item())
Evaluates a model in central server mode. Arguments: dataset: tf Dataset, contains all the test set examples as a single tf Dataset. state: tff state, the federated training state of the model. Contains model weights Returns: accuracy of model in state on dataset provided
experiments.py
evaluate_central
r-o-s-h-a-n/semisupervisedFL
5
python
def evaluate_central(self, dataset, state): '\n Evaluates a model in central server mode.\n \n Arguments:\n dataset: tf Dataset, contains all the test set examples as a single \n tf Dataset.\n state: tff state, the federated training state of the model. \n Contains model weights\n\n Returns:\n accuracy of model in state on dataset provided\n ' keras_model = self.keras_model_fn() shape = tf.data.DatasetSpec.from_value(dataset)._element_spec[0].shape keras_model.build(shape) tff.learning.assign_weights_to_keras_model(keras_model, state.model) metrics = keras_model.evaluate(dataset) return (metrics[0].item(), metrics[1].item())
def evaluate_central(self, dataset, state): '\n Evaluates a model in central server mode.\n \n Arguments:\n dataset: tf Dataset, contains all the test set examples as a single \n tf Dataset.\n state: tff state, the federated training state of the model. \n Contains model weights\n\n Returns:\n accuracy of model in state on dataset provided\n ' keras_model = self.keras_model_fn() shape = tf.data.DatasetSpec.from_value(dataset)._element_spec[0].shape keras_model.build(shape) tff.learning.assign_weights_to_keras_model(keras_model, state.model) metrics = keras_model.evaluate(dataset) return (metrics[0].item(), metrics[1].item())<|docstring|>Evaluates a model in central server mode. Arguments: dataset: tf Dataset, contains all the test set examples as a single tf Dataset. state: tff state, the federated training state of the model. Contains model weights Returns: accuracy of model in state on dataset provided<|endoftext|>
2e64f03580629c31460288fd78b859c85d9009e2a40ca36f029d05a2e9ba30b8
def evaluate_saved_model(self, dataset, model_fp=None): '\n Evaluates trained model in central server mode.\n \n Arguments:\n dataset: tf Dataset, contains all the test set examples as a single \n tf Dataset.\n model_fp: str, if model filepath is provided, it will load \n the model from file and evaluate on that. Otherwise, will \n evaluate the model at the last federated state.\n\n Returns:\n Nothing, but writes accuracy to file.\n ' keras_model = self.keras_model_fn.load_model_weights(model_fp) return keras_model.evaluate(dataset)
Evaluates trained model in central server mode. Arguments: dataset: tf Dataset, contains all the test set examples as a single tf Dataset. model_fp: str, if model filepath is provided, it will load the model from file and evaluate on that. Otherwise, will evaluate the model at the last federated state. Returns: Nothing, but writes accuracy to file.
experiments.py
evaluate_saved_model
r-o-s-h-a-n/semisupervisedFL
5
python
def evaluate_saved_model(self, dataset, model_fp=None): '\n Evaluates trained model in central server mode.\n \n Arguments:\n dataset: tf Dataset, contains all the test set examples as a single \n tf Dataset.\n model_fp: str, if model filepath is provided, it will load \n the model from file and evaluate on that. Otherwise, will \n evaluate the model at the last federated state.\n\n Returns:\n Nothing, but writes accuracy to file.\n ' keras_model = self.keras_model_fn.load_model_weights(model_fp) return keras_model.evaluate(dataset)
def evaluate_saved_model(self, dataset, model_fp=None): '\n Evaluates trained model in central server mode.\n \n Arguments:\n dataset: tf Dataset, contains all the test set examples as a single \n tf Dataset.\n model_fp: str, if model filepath is provided, it will load \n the model from file and evaluate on that. Otherwise, will \n evaluate the model at the last federated state.\n\n Returns:\n Nothing, but writes accuracy to file.\n ' keras_model = self.keras_model_fn.load_model_weights(model_fp) return keras_model.evaluate(dataset)<|docstring|>Evaluates trained model in central server mode. Arguments: dataset: tf Dataset, contains all the test set examples as a single tf Dataset. model_fp: str, if model filepath is provided, it will load the model from file and evaluate on that. Otherwise, will evaluate the model at the last federated state. Returns: Nothing, but writes accuracy to file.<|endoftext|>
1adf3830ab39b2637c0808c950583f1eee3db8a8d0ffe8c6c9f085086e4b65f3
def path(repo_ctx, additional_search_paths=[]): 'Return the value of the PATH environment variable that would be used by\n the which() command.' search_paths = additional_search_paths if (repo_ctx.os.name == 'mac os x'): search_paths = (search_paths + ['/usr/local/bin']) search_paths = (search_paths + ['/usr/bin', '/bin']) return ':'.join(search_paths)
Return the value of the PATH environment variable that would be used by the which() command.
third_party/drake_rules/execute.bzl
path
mingkaic/cortenn
2
python
def path(repo_ctx, additional_search_paths=[]): 'Return the value of the PATH environment variable that would be used by\n the which() command.' search_paths = additional_search_paths if (repo_ctx.os.name == 'mac os x'): search_paths = (search_paths + ['/usr/local/bin']) search_paths = (search_paths + ['/usr/bin', '/bin']) return ':'.join(search_paths)
def path(repo_ctx, additional_search_paths=[]): 'Return the value of the PATH environment variable that would be used by\n the which() command.' search_paths = additional_search_paths if (repo_ctx.os.name == 'mac os x'): search_paths = (search_paths + ['/usr/local/bin']) search_paths = (search_paths + ['/usr/bin', '/bin']) return ':'.join(search_paths)<|docstring|>Return the value of the PATH environment variable that would be used by the which() command.<|endoftext|>
6ea742387c29d746f0494e040bd419bb0c1aa187bda3594ddafc0baa3aeb9c72
def which(repo_ctx, program, additional_search_paths=[]): "Return the path of the given program or None if there is no such program\n in the PATH as defined by the path() function above. The value of the\n user's PATH environment variable is ignored.\n " exec_result = repo_ctx.execute(['which', program], environment={'PATH': path(repo_ctx, additional_search_paths)}) if (exec_result.return_code != 0): return None return repo_ctx.path(exec_result.stdout.strip())
Return the path of the given program or None if there is no such program in the PATH as defined by the path() function above. The value of the user's PATH environment variable is ignored.
third_party/drake_rules/execute.bzl
which
mingkaic/cortenn
2
python
def which(repo_ctx, program, additional_search_paths=[]): "Return the path of the given program or None if there is no such program\n in the PATH as defined by the path() function above. The value of the\n user's PATH environment variable is ignored.\n " exec_result = repo_ctx.execute(['which', program], environment={'PATH': path(repo_ctx, additional_search_paths)}) if (exec_result.return_code != 0): return None return repo_ctx.path(exec_result.stdout.strip())
def which(repo_ctx, program, additional_search_paths=[]): "Return the path of the given program or None if there is no such program\n in the PATH as defined by the path() function above. The value of the\n user's PATH environment variable is ignored.\n " exec_result = repo_ctx.execute(['which', program], environment={'PATH': path(repo_ctx, additional_search_paths)}) if (exec_result.return_code != 0): return None return repo_ctx.path(exec_result.stdout.strip())<|docstring|>Return the path of the given program or None if there is no such program in the PATH as defined by the path() function above. The value of the user's PATH environment variable is ignored.<|endoftext|>
d736d8f9cdb8f0ea856cf4537bf39dfb9a2caa5487eac9100fcbe23bbacca53f
def execute_and_return(repo_ctx, command, additional_search_paths=[]): 'Runs the `command` (list) and returns a status value. The return value\n is a struct with a field `error` that will be None on success or else a\n detailed message on command failure.\n ' if ('/' in command[0]): program = command[0] else: program = which(repo_ctx, command[0], additional_search_paths) if (not program): error = "Could not find a program named '{}'".format(command[0]) return struct(error=error) exec_result = repo_ctx.execute(([program] + command[1:])) if (exec_result.return_code == 0): error = None else: error = ('Failure running ' + ' '.join(["'{}'".format(x) for x in command])) if exec_result.stdout: error += ('\n' + exec_result.stdout) if exec_result.stderr: error += ('\n' + exec_result.stderr) return struct(error=error, stdout=exec_result.stdout)
Runs the `command` (list) and returns a status value. The return value is a struct with a field `error` that will be None on success or else a detailed message on command failure.
third_party/drake_rules/execute.bzl
execute_and_return
mingkaic/cortenn
2
python
def execute_and_return(repo_ctx, command, additional_search_paths=[]): 'Runs the `command` (list) and returns a status value. The return value\n is a struct with a field `error` that will be None on success or else a\n detailed message on command failure.\n ' if ('/' in command[0]): program = command[0] else: program = which(repo_ctx, command[0], additional_search_paths) if (not program): error = "Could not find a program named '{}'".format(command[0]) return struct(error=error) exec_result = repo_ctx.execute(([program] + command[1:])) if (exec_result.return_code == 0): error = None else: error = ('Failure running ' + ' '.join(["'{}'".format(x) for x in command])) if exec_result.stdout: error += ('\n' + exec_result.stdout) if exec_result.stderr: error += ('\n' + exec_result.stderr) return struct(error=error, stdout=exec_result.stdout)
def execute_and_return(repo_ctx, command, additional_search_paths=[]): 'Runs the `command` (list) and returns a status value. The return value\n is a struct with a field `error` that will be None on success or else a\n detailed message on command failure.\n ' if ('/' in command[0]): program = command[0] else: program = which(repo_ctx, command[0], additional_search_paths) if (not program): error = "Could not find a program named '{}'".format(command[0]) return struct(error=error) exec_result = repo_ctx.execute(([program] + command[1:])) if (exec_result.return_code == 0): error = None else: error = ('Failure running ' + ' '.join(["'{}'".format(x) for x in command])) if exec_result.stdout: error += ('\n' + exec_result.stdout) if exec_result.stderr: error += ('\n' + exec_result.stderr) return struct(error=error, stdout=exec_result.stdout)<|docstring|>Runs the `command` (list) and returns a status value. The return value is a struct with a field `error` that will be None on success or else a detailed message on command failure.<|endoftext|>
6d665fc09b5f16a4d214dd58529713686644f5c41bb581681cfd6312222319ef
def execute_or_fail(repo_ctx, command): 'Runs the `command` (list) and immediately fails on any error.\n Returns a struct with the stdout value.' result = execute_and_return(repo_ctx, command) if result.error: fail('Unable to complete setup for @{} repository: {}'.format(repo_ctx.name, result.error)) return result
Runs the `command` (list) and immediately fails on any error. Returns a struct with the stdout value.
third_party/drake_rules/execute.bzl
execute_or_fail
mingkaic/cortenn
2
python
def execute_or_fail(repo_ctx, command): 'Runs the `command` (list) and immediately fails on any error.\n Returns a struct with the stdout value.' result = execute_and_return(repo_ctx, command) if result.error: fail('Unable to complete setup for @{} repository: {}'.format(repo_ctx.name, result.error)) return result
def execute_or_fail(repo_ctx, command): 'Runs the `command` (list) and immediately fails on any error.\n Returns a struct with the stdout value.' result = execute_and_return(repo_ctx, command) if result.error: fail('Unable to complete setup for @{} repository: {}'.format(repo_ctx.name, result.error)) return result<|docstring|>Runs the `command` (list) and immediately fails on any error. Returns a struct with the stdout value.<|endoftext|>
a38bde9f8ff6ce410a1cab6536fd72935bbc359b80f5b3373b12640dfe9bccf8
def update_create_computations_fn_kwargs(arg_names: Iterable[Text], kwargs: Dict[(Text, Any)], eval_config: Optional[config_pb2.EvalConfig]=None, schema: Optional[schema_pb2.Schema]=None, model_names: Optional[List[Text]]=None, output_names: Optional[List[Text]]=None, sub_keys: Optional[List[Optional[SubKey]]]=None, aggregation_type: Optional[AggregationType]=None, class_weights: Optional[Dict[(int, float)]]=None, query_key: Optional[Text]=None, is_diff: Optional[bool]=False): "Updates create_computations_fn kwargs based on arg spec.\n\n Each metric's create_computations_fn is invoked with a variable set of\n parameters, depending on the argument names of the callable. If an argument\n name matches one of the reserved names, this function will update the kwargs\n with the appropriate value for that arg.\n\n Args:\n arg_names: The arg_names for the create_computations_fn.\n kwargs: The existing kwargs for create_computations_fn.\n eval_config: The value to use when `eval_config` is in arg_names.\n schema: The value to use when `schema` is in arg_names.\n model_names: The value to use when `model_names` is in arg_names.\n output_names: The value to use when `output_names` is in arg_names.\n sub_keys: The value to use when `sub_keys` is in arg_names.\n aggregation_type: The value to use when `aggregation_type` is in arg_names.\n class_weights: The value to use when `class_weights` is in arg_names.\n query_key: The value to use when `query_key` is in arg_names.\n is_diff: The value to use when `is_diff` is in arg_names.\n\n Returns:\n The kwargs passed as input, updated with the appropriate additional args.\n " if ('eval_config' in arg_names): kwargs['eval_config'] = eval_config if ('schema' in arg_names): kwargs['schema'] = schema if ('model_names' in arg_names): kwargs['model_names'] = model_names if ('output_names' in arg_names): kwargs['output_names'] = output_names if ('sub_keys' in arg_names): kwargs['sub_keys'] = sub_keys if ('aggregation_type' in arg_names): kwargs['aggregation_type'] = aggregation_type if ('class_weights' in arg_names): kwargs['class_weights'] = class_weights if ('query_key' in arg_names): kwargs['query_key'] = query_key if ('is_diff' in arg_names): kwargs['is_diff'] = is_diff return kwargs
Updates create_computations_fn kwargs based on arg spec. Each metric's create_computations_fn is invoked with a variable set of parameters, depending on the argument names of the callable. If an argument name matches one of the reserved names, this function will update the kwargs with the appropriate value for that arg. Args: arg_names: The arg_names for the create_computations_fn. kwargs: The existing kwargs for create_computations_fn. eval_config: The value to use when `eval_config` is in arg_names. schema: The value to use when `schema` is in arg_names. model_names: The value to use when `model_names` is in arg_names. output_names: The value to use when `output_names` is in arg_names. sub_keys: The value to use when `sub_keys` is in arg_names. aggregation_type: The value to use when `aggregation_type` is in arg_names. class_weights: The value to use when `class_weights` is in arg_names. query_key: The value to use when `query_key` is in arg_names. is_diff: The value to use when `is_diff` is in arg_names. Returns: The kwargs passed as input, updated with the appropriate additional args.
tensorflow_model_analysis/metrics/metric_types.py
update_create_computations_fn_kwargs
jaymessina3/model-analysis
1,118
python
def update_create_computations_fn_kwargs(arg_names: Iterable[Text], kwargs: Dict[(Text, Any)], eval_config: Optional[config_pb2.EvalConfig]=None, schema: Optional[schema_pb2.Schema]=None, model_names: Optional[List[Text]]=None, output_names: Optional[List[Text]]=None, sub_keys: Optional[List[Optional[SubKey]]]=None, aggregation_type: Optional[AggregationType]=None, class_weights: Optional[Dict[(int, float)]]=None, query_key: Optional[Text]=None, is_diff: Optional[bool]=False): "Updates create_computations_fn kwargs based on arg spec.\n\n Each metric's create_computations_fn is invoked with a variable set of\n parameters, depending on the argument names of the callable. If an argument\n name matches one of the reserved names, this function will update the kwargs\n with the appropriate value for that arg.\n\n Args:\n arg_names: The arg_names for the create_computations_fn.\n kwargs: The existing kwargs for create_computations_fn.\n eval_config: The value to use when `eval_config` is in arg_names.\n schema: The value to use when `schema` is in arg_names.\n model_names: The value to use when `model_names` is in arg_names.\n output_names: The value to use when `output_names` is in arg_names.\n sub_keys: The value to use when `sub_keys` is in arg_names.\n aggregation_type: The value to use when `aggregation_type` is in arg_names.\n class_weights: The value to use when `class_weights` is in arg_names.\n query_key: The value to use when `query_key` is in arg_names.\n is_diff: The value to use when `is_diff` is in arg_names.\n\n Returns:\n The kwargs passed as input, updated with the appropriate additional args.\n " if ('eval_config' in arg_names): kwargs['eval_config'] = eval_config if ('schema' in arg_names): kwargs['schema'] = schema if ('model_names' in arg_names): kwargs['model_names'] = model_names if ('output_names' in arg_names): kwargs['output_names'] = output_names if ('sub_keys' in arg_names): kwargs['sub_keys'] = sub_keys if ('aggregation_type' in arg_names): kwargs['aggregation_type'] = aggregation_type if ('class_weights' in arg_names): kwargs['class_weights'] = class_weights if ('query_key' in arg_names): kwargs['query_key'] = query_key if ('is_diff' in arg_names): kwargs['is_diff'] = is_diff return kwargs
def update_create_computations_fn_kwargs(arg_names: Iterable[Text], kwargs: Dict[(Text, Any)], eval_config: Optional[config_pb2.EvalConfig]=None, schema: Optional[schema_pb2.Schema]=None, model_names: Optional[List[Text]]=None, output_names: Optional[List[Text]]=None, sub_keys: Optional[List[Optional[SubKey]]]=None, aggregation_type: Optional[AggregationType]=None, class_weights: Optional[Dict[(int, float)]]=None, query_key: Optional[Text]=None, is_diff: Optional[bool]=False): "Updates create_computations_fn kwargs based on arg spec.\n\n Each metric's create_computations_fn is invoked with a variable set of\n parameters, depending on the argument names of the callable. If an argument\n name matches one of the reserved names, this function will update the kwargs\n with the appropriate value for that arg.\n\n Args:\n arg_names: The arg_names for the create_computations_fn.\n kwargs: The existing kwargs for create_computations_fn.\n eval_config: The value to use when `eval_config` is in arg_names.\n schema: The value to use when `schema` is in arg_names.\n model_names: The value to use when `model_names` is in arg_names.\n output_names: The value to use when `output_names` is in arg_names.\n sub_keys: The value to use when `sub_keys` is in arg_names.\n aggregation_type: The value to use when `aggregation_type` is in arg_names.\n class_weights: The value to use when `class_weights` is in arg_names.\n query_key: The value to use when `query_key` is in arg_names.\n is_diff: The value to use when `is_diff` is in arg_names.\n\n Returns:\n The kwargs passed as input, updated with the appropriate additional args.\n " if ('eval_config' in arg_names): kwargs['eval_config'] = eval_config if ('schema' in arg_names): kwargs['schema'] = schema if ('model_names' in arg_names): kwargs['model_names'] = model_names if ('output_names' in arg_names): kwargs['output_names'] = output_names if ('sub_keys' in arg_names): kwargs['sub_keys'] = sub_keys if ('aggregation_type' in arg_names): kwargs['aggregation_type'] = aggregation_type if ('class_weights' in arg_names): kwargs['class_weights'] = class_weights if ('query_key' in arg_names): kwargs['query_key'] = query_key if ('is_diff' in arg_names): kwargs['is_diff'] = is_diff return kwargs<|docstring|>Updates create_computations_fn kwargs based on arg spec. Each metric's create_computations_fn is invoked with a variable set of parameters, depending on the argument names of the callable. If an argument name matches one of the reserved names, this function will update the kwargs with the appropriate value for that arg. Args: arg_names: The arg_names for the create_computations_fn. kwargs: The existing kwargs for create_computations_fn. eval_config: The value to use when `eval_config` is in arg_names. schema: The value to use when `schema` is in arg_names. model_names: The value to use when `model_names` is in arg_names. output_names: The value to use when `output_names` is in arg_names. sub_keys: The value to use when `sub_keys` is in arg_names. aggregation_type: The value to use when `aggregation_type` is in arg_names. class_weights: The value to use when `class_weights` is in arg_names. query_key: The value to use when `query_key` is in arg_names. is_diff: The value to use when `is_diff` is in arg_names. Returns: The kwargs passed as input, updated with the appropriate additional args.<|endoftext|>
eaa3daacb2c2c7c815c84dcd89e37b39b8ec91969e6a16fd77cb0ae4559a8be5
def register_metric(cls: Type[Metric]): 'Registers metric under the list of standard TFMA metrics.' _METRIC_OBJECTS[cls.__name__] = cls
Registers metric under the list of standard TFMA metrics.
tensorflow_model_analysis/metrics/metric_types.py
register_metric
jaymessina3/model-analysis
1,118
python
def register_metric(cls: Type[Metric]): _METRIC_OBJECTS[cls.__name__] = cls
def register_metric(cls: Type[Metric]): _METRIC_OBJECTS[cls.__name__] = cls<|docstring|>Registers metric under the list of standard TFMA metrics.<|endoftext|>
44e1fb5f2d205326e13c4b11a040c47b1bf93bac5a75cb1b740a33a9927e6a25
def registered_metrics() -> Dict[(Text, Type[Metric])]: 'Returns standard TFMA metrics.' return copy.copy(_METRIC_OBJECTS)
Returns standard TFMA metrics.
tensorflow_model_analysis/metrics/metric_types.py
registered_metrics
jaymessina3/model-analysis
1,118
python
def registered_metrics() -> Dict[(Text, Type[Metric])]: return copy.copy(_METRIC_OBJECTS)
def registered_metrics() -> Dict[(Text, Type[Metric])]: return copy.copy(_METRIC_OBJECTS)<|docstring|>Returns standard TFMA metrics.<|endoftext|>
f3fc400545294cce03840e2a33ccc81935f03e7a0b315dd00a484ae1d93a3ea1
def InputPreprocessor(include_default_inputs: bool=False) -> StandardMetricInputsPreprocessor: 'Returns preprocessor for including raw inputs in StandardMetricInputs.\n\n Args:\n include_default_inputs: True to include default inputs (labels, predictions,\n example weights) in addition to the inputs.\n ' return StandardMetricInputsPreprocessor(include_filter={constants.INPUT_KEY: {}}, include_default_inputs=include_default_inputs)
Returns preprocessor for including raw inputs in StandardMetricInputs. Args: include_default_inputs: True to include default inputs (labels, predictions, example weights) in addition to the inputs.
tensorflow_model_analysis/metrics/metric_types.py
InputPreprocessor
jaymessina3/model-analysis
1,118
python
def InputPreprocessor(include_default_inputs: bool=False) -> StandardMetricInputsPreprocessor: 'Returns preprocessor for including raw inputs in StandardMetricInputs.\n\n Args:\n include_default_inputs: True to include default inputs (labels, predictions,\n example weights) in addition to the inputs.\n ' return StandardMetricInputsPreprocessor(include_filter={constants.INPUT_KEY: {}}, include_default_inputs=include_default_inputs)
def InputPreprocessor(include_default_inputs: bool=False) -> StandardMetricInputsPreprocessor: 'Returns preprocessor for including raw inputs in StandardMetricInputs.\n\n Args:\n include_default_inputs: True to include default inputs (labels, predictions,\n example weights) in addition to the inputs.\n ' return StandardMetricInputsPreprocessor(include_filter={constants.INPUT_KEY: {}}, include_default_inputs=include_default_inputs)<|docstring|>Returns preprocessor for including raw inputs in StandardMetricInputs. Args: include_default_inputs: True to include default inputs (labels, predictions, example weights) in addition to the inputs.<|endoftext|>
a6cb1d437f74dd32c25978156304c78f95467d4732938b54a4490936171682cb
def FeaturePreprocessor(feature_keys: Iterable[Text], include_default_inputs: bool=True, model_names: Optional[Iterable[Text]]=None, output_names: Optional[Iterable[Text]]=None) -> StandardMetricInputsPreprocessor: 'Returns preprocessor for including features in StandardMetricInputs.\n\n Args:\n feature_keys: List of feature keys. An empty list means all.\n include_default_inputs: True to include default inputs (labels, predictions,\n example weights) in addition to the features.\n model_names: Optional model names. Only used if include_default_inputs is\n True. If unset all models will be included with the default inputs.\n output_names: Optional output names. Only used if include_default_inputs is\n True. If unset all outputs will be included with the default inputs.\n ' if feature_keys: include_features = {k: {} for k in feature_keys} else: include_features = {} return StandardMetricInputsPreprocessor(include_filter={constants.FEATURES_KEY: include_features}, include_default_inputs=include_default_inputs, model_names=model_names, output_names=output_names)
Returns preprocessor for including features in StandardMetricInputs. Args: feature_keys: List of feature keys. An empty list means all. include_default_inputs: True to include default inputs (labels, predictions, example weights) in addition to the features. model_names: Optional model names. Only used if include_default_inputs is True. If unset all models will be included with the default inputs. output_names: Optional output names. Only used if include_default_inputs is True. If unset all outputs will be included with the default inputs.
tensorflow_model_analysis/metrics/metric_types.py
FeaturePreprocessor
jaymessina3/model-analysis
1,118
python
def FeaturePreprocessor(feature_keys: Iterable[Text], include_default_inputs: bool=True, model_names: Optional[Iterable[Text]]=None, output_names: Optional[Iterable[Text]]=None) -> StandardMetricInputsPreprocessor: 'Returns preprocessor for including features in StandardMetricInputs.\n\n Args:\n feature_keys: List of feature keys. An empty list means all.\n include_default_inputs: True to include default inputs (labels, predictions,\n example weights) in addition to the features.\n model_names: Optional model names. Only used if include_default_inputs is\n True. If unset all models will be included with the default inputs.\n output_names: Optional output names. Only used if include_default_inputs is\n True. If unset all outputs will be included with the default inputs.\n ' if feature_keys: include_features = {k: {} for k in feature_keys} else: include_features = {} return StandardMetricInputsPreprocessor(include_filter={constants.FEATURES_KEY: include_features}, include_default_inputs=include_default_inputs, model_names=model_names, output_names=output_names)
def FeaturePreprocessor(feature_keys: Iterable[Text], include_default_inputs: bool=True, model_names: Optional[Iterable[Text]]=None, output_names: Optional[Iterable[Text]]=None) -> StandardMetricInputsPreprocessor: 'Returns preprocessor for including features in StandardMetricInputs.\n\n Args:\n feature_keys: List of feature keys. An empty list means all.\n include_default_inputs: True to include default inputs (labels, predictions,\n example weights) in addition to the features.\n model_names: Optional model names. Only used if include_default_inputs is\n True. If unset all models will be included with the default inputs.\n output_names: Optional output names. Only used if include_default_inputs is\n True. If unset all outputs will be included with the default inputs.\n ' if feature_keys: include_features = {k: {} for k in feature_keys} else: include_features = {} return StandardMetricInputsPreprocessor(include_filter={constants.FEATURES_KEY: include_features}, include_default_inputs=include_default_inputs, model_names=model_names, output_names=output_names)<|docstring|>Returns preprocessor for including features in StandardMetricInputs. Args: feature_keys: List of feature keys. An empty list means all. include_default_inputs: True to include default inputs (labels, predictions, example weights) in addition to the features. model_names: Optional model names. Only used if include_default_inputs is True. If unset all models will be included with the default inputs. output_names: Optional output names. Only used if include_default_inputs is True. If unset all outputs will be included with the default inputs.<|endoftext|>
8ea9172cdde1db243aa56eee208fad14dc347a105fa702e7fd99f349bf573e2b
def TransformedFeaturePreprocessor(feature_keys: Iterable[Text], include_default_inputs: bool=True, model_names: Optional[Iterable[Text]]=None, output_names: Optional[Iterable[Text]]=None) -> StandardMetricInputsPreprocessor: 'Returns preprocessor for incl transformed features in StandardMetricInputs.\n\n Args:\n feature_keys: List of feature keys. An empty list means all.\n include_default_inputs: True to include default inputs (labels, predictions,\n example weights) in addition to the transformed features.\n model_names: Optional model names (required if transformed_features used\n with multi-model evaluations).\n output_names: Optional output names. Only used if include_default_inputs is\n True. If unset all outputs will be included with the default inputs.\n ' if feature_keys: include_features = {k: {} for k in feature_keys} else: include_features = {} if model_names: include_features = {name: include_features for name in model_names} return StandardMetricInputsPreprocessor(include_filter={constants.TRANSFORMED_FEATURES_KEY: include_features}, include_default_inputs=include_default_inputs, model_names=model_names, output_names=output_names)
Returns preprocessor for incl transformed features in StandardMetricInputs. Args: feature_keys: List of feature keys. An empty list means all. include_default_inputs: True to include default inputs (labels, predictions, example weights) in addition to the transformed features. model_names: Optional model names (required if transformed_features used with multi-model evaluations). output_names: Optional output names. Only used if include_default_inputs is True. If unset all outputs will be included with the default inputs.
tensorflow_model_analysis/metrics/metric_types.py
TransformedFeaturePreprocessor
jaymessina3/model-analysis
1,118
python
def TransformedFeaturePreprocessor(feature_keys: Iterable[Text], include_default_inputs: bool=True, model_names: Optional[Iterable[Text]]=None, output_names: Optional[Iterable[Text]]=None) -> StandardMetricInputsPreprocessor: 'Returns preprocessor for incl transformed features in StandardMetricInputs.\n\n Args:\n feature_keys: List of feature keys. An empty list means all.\n include_default_inputs: True to include default inputs (labels, predictions,\n example weights) in addition to the transformed features.\n model_names: Optional model names (required if transformed_features used\n with multi-model evaluations).\n output_names: Optional output names. Only used if include_default_inputs is\n True. If unset all outputs will be included with the default inputs.\n ' if feature_keys: include_features = {k: {} for k in feature_keys} else: include_features = {} if model_names: include_features = {name: include_features for name in model_names} return StandardMetricInputsPreprocessor(include_filter={constants.TRANSFORMED_FEATURES_KEY: include_features}, include_default_inputs=include_default_inputs, model_names=model_names, output_names=output_names)
def TransformedFeaturePreprocessor(feature_keys: Iterable[Text], include_default_inputs: bool=True, model_names: Optional[Iterable[Text]]=None, output_names: Optional[Iterable[Text]]=None) -> StandardMetricInputsPreprocessor: 'Returns preprocessor for incl transformed features in StandardMetricInputs.\n\n Args:\n feature_keys: List of feature keys. An empty list means all.\n include_default_inputs: True to include default inputs (labels, predictions,\n example weights) in addition to the transformed features.\n model_names: Optional model names (required if transformed_features used\n with multi-model evaluations).\n output_names: Optional output names. Only used if include_default_inputs is\n True. If unset all outputs will be included with the default inputs.\n ' if feature_keys: include_features = {k: {} for k in feature_keys} else: include_features = {} if model_names: include_features = {name: include_features for name in model_names} return StandardMetricInputsPreprocessor(include_filter={constants.TRANSFORMED_FEATURES_KEY: include_features}, include_default_inputs=include_default_inputs, model_names=model_names, output_names=output_names)<|docstring|>Returns preprocessor for incl transformed features in StandardMetricInputs. Args: feature_keys: List of feature keys. An empty list means all. include_default_inputs: True to include default inputs (labels, predictions, example weights) in addition to the transformed features. model_names: Optional model names (required if transformed_features used with multi-model evaluations). output_names: Optional output names. Only used if include_default_inputs is True. If unset all outputs will be included with the default inputs.<|endoftext|>
7331daddbe73a78576fb2e5b6a0241f0b60c500bae14b763cd26ad06834e18af
def AttributionPreprocessor(feature_keys: Iterable[Text], include_default_inputs: bool=True, model_names: Optional[Iterable[Text]]=None, output_names: Optional[Iterable[Text]]=None) -> StandardMetricInputsPreprocessor: 'Returns preprocessor for including attributions in StandardMetricInputs.\n\n Args:\n feature_keys: List of feature keys under attributions to keep. An empty list\n means all.\n include_default_inputs: True to include default inputs (labels, predictions,\n example weights) in addition to the transformed features.\n model_names: Optional model names (required for multi-model evaluations).\n output_names: Optional output names (required for multi-output evaluations).\n ' if feature_keys: include_features = {k: {} for k in feature_keys} else: include_features = {} if output_names: include_features = {name: include_features for name in output_names} if model_names: include_features = {name: include_features for name in model_names} return StandardMetricInputsPreprocessor(include_filter={constants.ATTRIBUTIONS_KEY: include_features}, include_default_inputs=include_default_inputs, model_names=model_names, output_names=output_names)
Returns preprocessor for including attributions in StandardMetricInputs. Args: feature_keys: List of feature keys under attributions to keep. An empty list means all. include_default_inputs: True to include default inputs (labels, predictions, example weights) in addition to the transformed features. model_names: Optional model names (required for multi-model evaluations). output_names: Optional output names (required for multi-output evaluations).
tensorflow_model_analysis/metrics/metric_types.py
AttributionPreprocessor
jaymessina3/model-analysis
1,118
python
def AttributionPreprocessor(feature_keys: Iterable[Text], include_default_inputs: bool=True, model_names: Optional[Iterable[Text]]=None, output_names: Optional[Iterable[Text]]=None) -> StandardMetricInputsPreprocessor: 'Returns preprocessor for including attributions in StandardMetricInputs.\n\n Args:\n feature_keys: List of feature keys under attributions to keep. An empty list\n means all.\n include_default_inputs: True to include default inputs (labels, predictions,\n example weights) in addition to the transformed features.\n model_names: Optional model names (required for multi-model evaluations).\n output_names: Optional output names (required for multi-output evaluations).\n ' if feature_keys: include_features = {k: {} for k in feature_keys} else: include_features = {} if output_names: include_features = {name: include_features for name in output_names} if model_names: include_features = {name: include_features for name in model_names} return StandardMetricInputsPreprocessor(include_filter={constants.ATTRIBUTIONS_KEY: include_features}, include_default_inputs=include_default_inputs, model_names=model_names, output_names=output_names)
def AttributionPreprocessor(feature_keys: Iterable[Text], include_default_inputs: bool=True, model_names: Optional[Iterable[Text]]=None, output_names: Optional[Iterable[Text]]=None) -> StandardMetricInputsPreprocessor: 'Returns preprocessor for including attributions in StandardMetricInputs.\n\n Args:\n feature_keys: List of feature keys under attributions to keep. An empty list\n means all.\n include_default_inputs: True to include default inputs (labels, predictions,\n example weights) in addition to the transformed features.\n model_names: Optional model names (required for multi-model evaluations).\n output_names: Optional output names (required for multi-output evaluations).\n ' if feature_keys: include_features = {k: {} for k in feature_keys} else: include_features = {} if output_names: include_features = {name: include_features for name in output_names} if model_names: include_features = {name: include_features for name in model_names} return StandardMetricInputsPreprocessor(include_filter={constants.ATTRIBUTIONS_KEY: include_features}, include_default_inputs=include_default_inputs, model_names=model_names, output_names=output_names)<|docstring|>Returns preprocessor for including attributions in StandardMetricInputs. Args: feature_keys: List of feature keys under attributions to keep. An empty list means all. include_default_inputs: True to include default inputs (labels, predictions, example weights) in addition to the transformed features. model_names: Optional model names (required for multi-model evaluations). output_names: Optional output names (required for multi-output evaluations).<|endoftext|>
d63f498c2abc21b7404a79406efb109cf2d70c972defe59255e577571e924c7a
def StandardMetricInputsPreprocessorList(preprocessors: List[StandardMetricInputsPreprocessor]) -> StandardMetricInputsPreprocessor: 'Returns preprocessor combining multiple standard preprocessors together.\n\n Args:\n preprocessors: List of StandardMetricInputsPreprocessors. Must be of type\n StandardMetricInputsPreprocessor (subclasses not supported).\n ' include_filter = {} for p in preprocessors: if (type(p) != StandardMetricInputsPreprocessor): raise ValueError('Only direct instances of StandardMetricsInputPreprocessor (excluding sub-classes) are supported') if (not include_filter): include_filter = p.include_filter else: include_filter = util.merge_filters(include_filter, p.include_filter) return StandardMetricInputsPreprocessor(include_filter=include_filter, include_default_inputs=False)
Returns preprocessor combining multiple standard preprocessors together. Args: preprocessors: List of StandardMetricInputsPreprocessors. Must be of type StandardMetricInputsPreprocessor (subclasses not supported).
tensorflow_model_analysis/metrics/metric_types.py
StandardMetricInputsPreprocessorList
jaymessina3/model-analysis
1,118
python
def StandardMetricInputsPreprocessorList(preprocessors: List[StandardMetricInputsPreprocessor]) -> StandardMetricInputsPreprocessor: 'Returns preprocessor combining multiple standard preprocessors together.\n\n Args:\n preprocessors: List of StandardMetricInputsPreprocessors. Must be of type\n StandardMetricInputsPreprocessor (subclasses not supported).\n ' include_filter = {} for p in preprocessors: if (type(p) != StandardMetricInputsPreprocessor): raise ValueError('Only direct instances of StandardMetricsInputPreprocessor (excluding sub-classes) are supported') if (not include_filter): include_filter = p.include_filter else: include_filter = util.merge_filters(include_filter, p.include_filter) return StandardMetricInputsPreprocessor(include_filter=include_filter, include_default_inputs=False)
def StandardMetricInputsPreprocessorList(preprocessors: List[StandardMetricInputsPreprocessor]) -> StandardMetricInputsPreprocessor: 'Returns preprocessor combining multiple standard preprocessors together.\n\n Args:\n preprocessors: List of StandardMetricInputsPreprocessors. Must be of type\n StandardMetricInputsPreprocessor (subclasses not supported).\n ' include_filter = {} for p in preprocessors: if (type(p) != StandardMetricInputsPreprocessor): raise ValueError('Only direct instances of StandardMetricsInputPreprocessor (excluding sub-classes) are supported') if (not include_filter): include_filter = p.include_filter else: include_filter = util.merge_filters(include_filter, p.include_filter) return StandardMetricInputsPreprocessor(include_filter=include_filter, include_default_inputs=False)<|docstring|>Returns preprocessor combining multiple standard preprocessors together. Args: preprocessors: List of StandardMetricInputsPreprocessors. Must be of type StandardMetricInputsPreprocessor (subclasses not supported).<|endoftext|>
514a527692f2bf309951ff686199b64f93e7c2d63181e7ab572500a833919191
def to_proto(self) -> metrics_for_slice_pb2.SubKey: 'Converts key to proto.' sub_key = metrics_for_slice_pb2.SubKey() if (self.class_id is not None): sub_key.class_id.value = self.class_id if (self.k is not None): sub_key.k.value = self.k if (self.top_k is not None): sub_key.top_k.value = self.top_k return sub_key
Converts key to proto.
tensorflow_model_analysis/metrics/metric_types.py
to_proto
jaymessina3/model-analysis
1,118
python
def to_proto(self) -> metrics_for_slice_pb2.SubKey: sub_key = metrics_for_slice_pb2.SubKey() if (self.class_id is not None): sub_key.class_id.value = self.class_id if (self.k is not None): sub_key.k.value = self.k if (self.top_k is not None): sub_key.top_k.value = self.top_k return sub_key
def to_proto(self) -> metrics_for_slice_pb2.SubKey: sub_key = metrics_for_slice_pb2.SubKey() if (self.class_id is not None): sub_key.class_id.value = self.class_id if (self.k is not None): sub_key.k.value = self.k if (self.top_k is not None): sub_key.top_k.value = self.top_k return sub_key<|docstring|>Converts key to proto.<|endoftext|>
74d50e385d8297cb0d78addf3bc1f596fb56ec5b0fd7d5d9efae28c5971602eb
@staticmethod def from_proto(pb: metrics_for_slice_pb2.SubKey) -> Optional['SubKey']: 'Creates class from proto.' class_id = None if pb.HasField('class_id'): class_id = pb.class_id.value k = None if pb.HasField('k'): k = pb.k.value top_k = None if pb.HasField('top_k'): top_k = pb.top_k.value if ((class_id is None) and (k is None) and (top_k is None)): return None else: return SubKey(class_id=class_id, k=k, top_k=top_k)
Creates class from proto.
tensorflow_model_analysis/metrics/metric_types.py
from_proto
jaymessina3/model-analysis
1,118
python
@staticmethod def from_proto(pb: metrics_for_slice_pb2.SubKey) -> Optional['SubKey']: class_id = None if pb.HasField('class_id'): class_id = pb.class_id.value k = None if pb.HasField('k'): k = pb.k.value top_k = None if pb.HasField('top_k'): top_k = pb.top_k.value if ((class_id is None) and (k is None) and (top_k is None)): return None else: return SubKey(class_id=class_id, k=k, top_k=top_k)
@staticmethod def from_proto(pb: metrics_for_slice_pb2.SubKey) -> Optional['SubKey']: class_id = None if pb.HasField('class_id'): class_id = pb.class_id.value k = None if pb.HasField('k'): k = pb.k.value top_k = None if pb.HasField('top_k'): top_k = pb.top_k.value if ((class_id is None) and (k is None) and (top_k is None)): return None else: return SubKey(class_id=class_id, k=k, top_k=top_k)<|docstring|>Creates class from proto.<|endoftext|>
00b71c635498c322efa4038250ed794109da7ddced1f365426dffb8c65f5c1ee
def to_proto(self) -> metrics_for_slice_pb2.AggregationType: 'Converts key to proto.' aggregration_type = metrics_for_slice_pb2.AggregationType() if (self.micro_average is not None): aggregration_type.micro_average = True if (self.macro_average is not None): aggregration_type.macro_average = True if (self.weighted_macro_average is not None): aggregration_type.weighted_macro_average = True return aggregration_type
Converts key to proto.
tensorflow_model_analysis/metrics/metric_types.py
to_proto
jaymessina3/model-analysis
1,118
python
def to_proto(self) -> metrics_for_slice_pb2.AggregationType: aggregration_type = metrics_for_slice_pb2.AggregationType() if (self.micro_average is not None): aggregration_type.micro_average = True if (self.macro_average is not None): aggregration_type.macro_average = True if (self.weighted_macro_average is not None): aggregration_type.weighted_macro_average = True return aggregration_type
def to_proto(self) -> metrics_for_slice_pb2.AggregationType: aggregration_type = metrics_for_slice_pb2.AggregationType() if (self.micro_average is not None): aggregration_type.micro_average = True if (self.macro_average is not None): aggregration_type.macro_average = True if (self.weighted_macro_average is not None): aggregration_type.weighted_macro_average = True return aggregration_type<|docstring|>Converts key to proto.<|endoftext|>
77a0779851d982b214c022cb2da60c2a1ce52a701f4d45d8c62cfc662b06ce15
@staticmethod def from_proto(pb: metrics_for_slice_pb2.AggregationType) -> Optional['AggregationType']: 'Creates class from proto.' if (pb.micro_average or pb.macro_average or pb.weighted_macro_average): return AggregationType(micro_average=(pb.micro_average or None), macro_average=(pb.macro_average or None), weighted_macro_average=(pb.weighted_macro_average or None)) else: return None
Creates class from proto.
tensorflow_model_analysis/metrics/metric_types.py
from_proto
jaymessina3/model-analysis
1,118
python
@staticmethod def from_proto(pb: metrics_for_slice_pb2.AggregationType) -> Optional['AggregationType']: if (pb.micro_average or pb.macro_average or pb.weighted_macro_average): return AggregationType(micro_average=(pb.micro_average or None), macro_average=(pb.macro_average or None), weighted_macro_average=(pb.weighted_macro_average or None)) else: return None
@staticmethod def from_proto(pb: metrics_for_slice_pb2.AggregationType) -> Optional['AggregationType']: if (pb.micro_average or pb.macro_average or pb.weighted_macro_average): return AggregationType(micro_average=(pb.micro_average or None), macro_average=(pb.macro_average or None), weighted_macro_average=(pb.weighted_macro_average or None)) else: return None<|docstring|>Creates class from proto.<|endoftext|>
2dfead7cd95ebc69d270b37b7cb98a278f3e59b08789e2fa320fe859df678ed0
def to_proto(self) -> metrics_for_slice_pb2.MetricKey: 'Converts key to proto.' metric_key = metrics_for_slice_pb2.MetricKey() if self.name: metric_key.name = self.name if self.model_name: metric_key.model_name = self.model_name if self.output_name: metric_key.output_name = self.output_name if self.sub_key: metric_key.sub_key.CopyFrom(self.sub_key.to_proto()) if self.aggregation_type: metric_key.aggregation_type.CopyFrom(self.aggregation_type.to_proto()) if self.is_diff: metric_key.is_diff = self.is_diff return metric_key
Converts key to proto.
tensorflow_model_analysis/metrics/metric_types.py
to_proto
jaymessina3/model-analysis
1,118
python
def to_proto(self) -> metrics_for_slice_pb2.MetricKey: metric_key = metrics_for_slice_pb2.MetricKey() if self.name: metric_key.name = self.name if self.model_name: metric_key.model_name = self.model_name if self.output_name: metric_key.output_name = self.output_name if self.sub_key: metric_key.sub_key.CopyFrom(self.sub_key.to_proto()) if self.aggregation_type: metric_key.aggregation_type.CopyFrom(self.aggregation_type.to_proto()) if self.is_diff: metric_key.is_diff = self.is_diff return metric_key
def to_proto(self) -> metrics_for_slice_pb2.MetricKey: metric_key = metrics_for_slice_pb2.MetricKey() if self.name: metric_key.name = self.name if self.model_name: metric_key.model_name = self.model_name if self.output_name: metric_key.output_name = self.output_name if self.sub_key: metric_key.sub_key.CopyFrom(self.sub_key.to_proto()) if self.aggregation_type: metric_key.aggregation_type.CopyFrom(self.aggregation_type.to_proto()) if self.is_diff: metric_key.is_diff = self.is_diff return metric_key<|docstring|>Converts key to proto.<|endoftext|>
f92d649cab5127b2ff39af04132f8def5c16c2a3045bf69d19c8e41340708504
@staticmethod def from_proto(pb: metrics_for_slice_pb2.MetricKey) -> 'MetricKey': 'Configures class from proto.' return MetricKey(name=pb.name, model_name=pb.model_name, output_name=pb.output_name, sub_key=SubKey.from_proto(pb.sub_key), aggregation_type=AggregationType.from_proto(pb.aggregation_type), is_diff=pb.is_diff)
Configures class from proto.
tensorflow_model_analysis/metrics/metric_types.py
from_proto
jaymessina3/model-analysis
1,118
python
@staticmethod def from_proto(pb: metrics_for_slice_pb2.MetricKey) -> 'MetricKey': return MetricKey(name=pb.name, model_name=pb.model_name, output_name=pb.output_name, sub_key=SubKey.from_proto(pb.sub_key), aggregation_type=AggregationType.from_proto(pb.aggregation_type), is_diff=pb.is_diff)
@staticmethod def from_proto(pb: metrics_for_slice_pb2.MetricKey) -> 'MetricKey': return MetricKey(name=pb.name, model_name=pb.model_name, output_name=pb.output_name, sub_key=SubKey.from_proto(pb.sub_key), aggregation_type=AggregationType.from_proto(pb.aggregation_type), is_diff=pb.is_diff)<|docstring|>Configures class from proto.<|endoftext|>
797bee3df2d00ab5d0d9533e9e3a3d9635b98af89b97b3b3fb0bc1e54fd1ff54
def to_proto(self) -> metrics_for_slice_pb2.PlotKey: 'Converts key to proto.' plot_key = metrics_for_slice_pb2.PlotKey() if self.name: raise ValueError('plot values must be combined into a single proto andstored under a plot key without a name') if self.model_name: plot_key.model_name = self.model_name if self.output_name: plot_key.output_name = self.output_name if self.sub_key: plot_key.sub_key.CopyFrom(self.sub_key.to_proto()) return plot_key
Converts key to proto.
tensorflow_model_analysis/metrics/metric_types.py
to_proto
jaymessina3/model-analysis
1,118
python
def to_proto(self) -> metrics_for_slice_pb2.PlotKey: plot_key = metrics_for_slice_pb2.PlotKey() if self.name: raise ValueError('plot values must be combined into a single proto andstored under a plot key without a name') if self.model_name: plot_key.model_name = self.model_name if self.output_name: plot_key.output_name = self.output_name if self.sub_key: plot_key.sub_key.CopyFrom(self.sub_key.to_proto()) return plot_key
def to_proto(self) -> metrics_for_slice_pb2.PlotKey: plot_key = metrics_for_slice_pb2.PlotKey() if self.name: raise ValueError('plot values must be combined into a single proto andstored under a plot key without a name') if self.model_name: plot_key.model_name = self.model_name if self.output_name: plot_key.output_name = self.output_name if self.sub_key: plot_key.sub_key.CopyFrom(self.sub_key.to_proto()) return plot_key<|docstring|>Converts key to proto.<|endoftext|>
00930b216965987f280e99b98fb2fc1440fdd72591ac38046b8c4277aff31648
@staticmethod def from_proto(pb: metrics_for_slice_pb2.PlotKey) -> 'PlotKey': 'Configures class from proto.' return PlotKey(name='', model_name=pb.model_name, output_name=pb.output_name, sub_key=SubKey.from_proto(pb.sub_key))
Configures class from proto.
tensorflow_model_analysis/metrics/metric_types.py
from_proto
jaymessina3/model-analysis
1,118
python
@staticmethod def from_proto(pb: metrics_for_slice_pb2.PlotKey) -> 'PlotKey': return PlotKey(name=, model_name=pb.model_name, output_name=pb.output_name, sub_key=SubKey.from_proto(pb.sub_key))
@staticmethod def from_proto(pb: metrics_for_slice_pb2.PlotKey) -> 'PlotKey': return PlotKey(name=, model_name=pb.model_name, output_name=pb.output_name, sub_key=SubKey.from_proto(pb.sub_key))<|docstring|>Configures class from proto.<|endoftext|>
587c450fee07fbd9047cea05974369513ff54139ebcb06c08fee2910137a77d6
def to_proto(self) -> metrics_for_slice_pb2.AttributionsKey: 'Converts key to proto.' attribution_key = metrics_for_slice_pb2.AttributionsKey() if self.name: attribution_key.name = self.name if self.model_name: attribution_key.model_name = self.model_name if self.output_name: attribution_key.output_name = self.output_name if self.sub_key: attribution_key.sub_key.CopyFrom(self.sub_key.to_proto()) return attribution_key
Converts key to proto.
tensorflow_model_analysis/metrics/metric_types.py
to_proto
jaymessina3/model-analysis
1,118
python
def to_proto(self) -> metrics_for_slice_pb2.AttributionsKey: attribution_key = metrics_for_slice_pb2.AttributionsKey() if self.name: attribution_key.name = self.name if self.model_name: attribution_key.model_name = self.model_name if self.output_name: attribution_key.output_name = self.output_name if self.sub_key: attribution_key.sub_key.CopyFrom(self.sub_key.to_proto()) return attribution_key
def to_proto(self) -> metrics_for_slice_pb2.AttributionsKey: attribution_key = metrics_for_slice_pb2.AttributionsKey() if self.name: attribution_key.name = self.name if self.model_name: attribution_key.model_name = self.model_name if self.output_name: attribution_key.output_name = self.output_name if self.sub_key: attribution_key.sub_key.CopyFrom(self.sub_key.to_proto()) return attribution_key<|docstring|>Converts key to proto.<|endoftext|>
2be0303e5079ec8a2d9252e4b8e7508afa165ec4828ae1e07ba9a729a582b884
@staticmethod def from_proto(pb: metrics_for_slice_pb2.AttributionsKey) -> 'AttributionsKey': 'Configures class from proto.' return AttributionsKey(name=pb.name, model_name=pb.model_name, output_name=pb.output_name, sub_key=SubKey.from_proto(pb.sub_key))
Configures class from proto.
tensorflow_model_analysis/metrics/metric_types.py
from_proto
jaymessina3/model-analysis
1,118
python
@staticmethod def from_proto(pb: metrics_for_slice_pb2.AttributionsKey) -> 'AttributionsKey': return AttributionsKey(name=pb.name, model_name=pb.model_name, output_name=pb.output_name, sub_key=SubKey.from_proto(pb.sub_key))
@staticmethod def from_proto(pb: metrics_for_slice_pb2.AttributionsKey) -> 'AttributionsKey': return AttributionsKey(name=pb.name, model_name=pb.model_name, output_name=pb.output_name, sub_key=SubKey.from_proto(pb.sub_key))<|docstring|>Configures class from proto.<|endoftext|>
3be57738be6b4dab359f8895a292bdb3231ae1e63779a7ccb4eed0f51385fea3
def __init__(self, create_computations_fn: Callable[(..., MetricComputations)], **kwargs): 'Initializes metric.\n\n Args:\n create_computations_fn: Function to create the metrics computations (e.g.\n mean_label, etc). This function should take the args passed to __init__\n as as input along with any of eval_config, schema, model_names,\n output_names, sub_keys, aggregation_type, or query_key (where needed).\n **kwargs: Any additional kwargs to pass to create_computations_fn. These\n should only contain primitive types or lists/dicts of primitive types.\n The kwargs passed to computations have precendence over these kwargs.\n ' self.create_computations_fn = create_computations_fn self.kwargs = kwargs if ('name' in kwargs): self.name = kwargs['name'] else: self.name = None if hasattr(inspect, 'getfullargspec'): self._args = inspect.getfullargspec(self.create_computations_fn).args else: self._args = inspect.getargspec(self.create_computations_fn).args
Initializes metric. Args: create_computations_fn: Function to create the metrics computations (e.g. mean_label, etc). This function should take the args passed to __init__ as as input along with any of eval_config, schema, model_names, output_names, sub_keys, aggregation_type, or query_key (where needed). **kwargs: Any additional kwargs to pass to create_computations_fn. These should only contain primitive types or lists/dicts of primitive types. The kwargs passed to computations have precendence over these kwargs.
tensorflow_model_analysis/metrics/metric_types.py
__init__
jaymessina3/model-analysis
1,118
python
def __init__(self, create_computations_fn: Callable[(..., MetricComputations)], **kwargs): 'Initializes metric.\n\n Args:\n create_computations_fn: Function to create the metrics computations (e.g.\n mean_label, etc). This function should take the args passed to __init__\n as as input along with any of eval_config, schema, model_names,\n output_names, sub_keys, aggregation_type, or query_key (where needed).\n **kwargs: Any additional kwargs to pass to create_computations_fn. These\n should only contain primitive types or lists/dicts of primitive types.\n The kwargs passed to computations have precendence over these kwargs.\n ' self.create_computations_fn = create_computations_fn self.kwargs = kwargs if ('name' in kwargs): self.name = kwargs['name'] else: self.name = None if hasattr(inspect, 'getfullargspec'): self._args = inspect.getfullargspec(self.create_computations_fn).args else: self._args = inspect.getargspec(self.create_computations_fn).args
def __init__(self, create_computations_fn: Callable[(..., MetricComputations)], **kwargs): 'Initializes metric.\n\n Args:\n create_computations_fn: Function to create the metrics computations (e.g.\n mean_label, etc). This function should take the args passed to __init__\n as as input along with any of eval_config, schema, model_names,\n output_names, sub_keys, aggregation_type, or query_key (where needed).\n **kwargs: Any additional kwargs to pass to create_computations_fn. These\n should only contain primitive types or lists/dicts of primitive types.\n The kwargs passed to computations have precendence over these kwargs.\n ' self.create_computations_fn = create_computations_fn self.kwargs = kwargs if ('name' in kwargs): self.name = kwargs['name'] else: self.name = None if hasattr(inspect, 'getfullargspec'): self._args = inspect.getfullargspec(self.create_computations_fn).args else: self._args = inspect.getargspec(self.create_computations_fn).args<|docstring|>Initializes metric. Args: create_computations_fn: Function to create the metrics computations (e.g. mean_label, etc). This function should take the args passed to __init__ as as input along with any of eval_config, schema, model_names, output_names, sub_keys, aggregation_type, or query_key (where needed). **kwargs: Any additional kwargs to pass to create_computations_fn. These should only contain primitive types or lists/dicts of primitive types. The kwargs passed to computations have precendence over these kwargs.<|endoftext|>
89477ac96d00ee4ef1253e18fc551418a7585d8378e85143a8d6a861904b85be
def get_config(self) -> Dict[(Text, Any)]: 'Returns serializable config.' return self.kwargs
Returns serializable config.
tensorflow_model_analysis/metrics/metric_types.py
get_config
jaymessina3/model-analysis
1,118
python
def get_config(self) -> Dict[(Text, Any)]: return self.kwargs
def get_config(self) -> Dict[(Text, Any)]: return self.kwargs<|docstring|>Returns serializable config.<|endoftext|>
02443b5486556b92195d41fd0fcbdb51cce11ec52548a6d69688efa3960dff7a
@property def compute_confidence_interval(self) -> bool: 'Whether to compute confidence intervals for this metric.\n\n Note that this may not completely remove the computational overhead\n involved in computing a given metric. This is only respected by the\n jackknife confidence interval method.\n\n Returns:\n Whether to compute confidence intervals for this metric.\n ' return True
Whether to compute confidence intervals for this metric. Note that this may not completely remove the computational overhead involved in computing a given metric. This is only respected by the jackknife confidence interval method. Returns: Whether to compute confidence intervals for this metric.
tensorflow_model_analysis/metrics/metric_types.py
compute_confidence_interval
jaymessina3/model-analysis
1,118
python
@property def compute_confidence_interval(self) -> bool: 'Whether to compute confidence intervals for this metric.\n\n Note that this may not completely remove the computational overhead\n involved in computing a given metric. This is only respected by the\n jackknife confidence interval method.\n\n Returns:\n Whether to compute confidence intervals for this metric.\n ' return True
@property def compute_confidence_interval(self) -> bool: 'Whether to compute confidence intervals for this metric.\n\n Note that this may not completely remove the computational overhead\n involved in computing a given metric. This is only respected by the\n jackknife confidence interval method.\n\n Returns:\n Whether to compute confidence intervals for this metric.\n ' return True<|docstring|>Whether to compute confidence intervals for this metric. Note that this may not completely remove the computational overhead involved in computing a given metric. This is only respected by the jackknife confidence interval method. Returns: Whether to compute confidence intervals for this metric.<|endoftext|>
f515fe8ea035a71f862c5626bd5f5f94ff84086e9230b37e441ef4fdb3b3b863
def computations(self, eval_config: Optional[config_pb2.EvalConfig]=None, schema: Optional[schema_pb2.Schema]=None, model_names: Optional[List[Text]]=None, output_names: Optional[List[Text]]=None, sub_keys: Optional[List[Optional[SubKey]]]=None, aggregation_type: Optional[AggregationType]=None, class_weights: Optional[Dict[(int, float)]]=None, query_key: Optional[Text]=None, is_diff: Optional[bool]=False) -> MetricComputations: 'Creates computations associated with metric.' updated_kwargs = update_create_computations_fn_kwargs(self._args, self.kwargs.copy(), eval_config, schema, model_names, output_names, sub_keys, aggregation_type, class_weights, query_key, is_diff) return self.create_computations_fn(**updated_kwargs)
Creates computations associated with metric.
tensorflow_model_analysis/metrics/metric_types.py
computations
jaymessina3/model-analysis
1,118
python
def computations(self, eval_config: Optional[config_pb2.EvalConfig]=None, schema: Optional[schema_pb2.Schema]=None, model_names: Optional[List[Text]]=None, output_names: Optional[List[Text]]=None, sub_keys: Optional[List[Optional[SubKey]]]=None, aggregation_type: Optional[AggregationType]=None, class_weights: Optional[Dict[(int, float)]]=None, query_key: Optional[Text]=None, is_diff: Optional[bool]=False) -> MetricComputations: updated_kwargs = update_create_computations_fn_kwargs(self._args, self.kwargs.copy(), eval_config, schema, model_names, output_names, sub_keys, aggregation_type, class_weights, query_key, is_diff) return self.create_computations_fn(**updated_kwargs)
def computations(self, eval_config: Optional[config_pb2.EvalConfig]=None, schema: Optional[schema_pb2.Schema]=None, model_names: Optional[List[Text]]=None, output_names: Optional[List[Text]]=None, sub_keys: Optional[List[Optional[SubKey]]]=None, aggregation_type: Optional[AggregationType]=None, class_weights: Optional[Dict[(int, float)]]=None, query_key: Optional[Text]=None, is_diff: Optional[bool]=False) -> MetricComputations: updated_kwargs = update_create_computations_fn_kwargs(self._args, self.kwargs.copy(), eval_config, schema, model_names, output_names, sub_keys, aggregation_type, class_weights, query_key, is_diff) return self.create_computations_fn(**updated_kwargs)<|docstring|>Creates computations associated with metric.<|endoftext|>
8221e00d24c4f375c6b820d368d07d1ad38b6a2455dc881490824ea337f3875f
@property def label(self) -> Optional[types.TensorValueMaybeMultiLevelDict]: 'Same as labels (DEPRECATED - use labels).' return self.get_labels()
Same as labels (DEPRECATED - use labels).
tensorflow_model_analysis/metrics/metric_types.py
label
jaymessina3/model-analysis
1,118
python
@property def label(self) -> Optional[types.TensorValueMaybeMultiLevelDict]: return self.get_labels()
@property def label(self) -> Optional[types.TensorValueMaybeMultiLevelDict]: return self.get_labels()<|docstring|>Same as labels (DEPRECATED - use labels).<|endoftext|>
bb196a80ee1edffe967210bd3c8441460590700cfdc8b390e0749a394f5cfa81
@property def prediction(self) -> Optional[types.TensorValueMaybeMultiLevelDict]: 'Same as predictions (DEPRECATED - use predictions).' return self.get_predictions()
Same as predictions (DEPRECATED - use predictions).
tensorflow_model_analysis/metrics/metric_types.py
prediction
jaymessina3/model-analysis
1,118
python
@property def prediction(self) -> Optional[types.TensorValueMaybeMultiLevelDict]: return self.get_predictions()
@property def prediction(self) -> Optional[types.TensorValueMaybeMultiLevelDict]: return self.get_predictions()<|docstring|>Same as predictions (DEPRECATED - use predictions).<|endoftext|>
6516503272130855decee728e087e3ca7349db4763b131dfdfea65938f43c178
@property def example_weight(self) -> Optional[types.TensorValueMaybeMultiLevelDict]: 'Same as example_weights (DEPRECATED - use example_weights).' return self.get_example_weights()
Same as example_weights (DEPRECATED - use example_weights).
tensorflow_model_analysis/metrics/metric_types.py
example_weight
jaymessina3/model-analysis
1,118
python
@property def example_weight(self) -> Optional[types.TensorValueMaybeMultiLevelDict]: return self.get_example_weights()
@property def example_weight(self) -> Optional[types.TensorValueMaybeMultiLevelDict]: return self.get_example_weights()<|docstring|>Same as example_weights (DEPRECATED - use example_weights).<|endoftext|>
f494c4a30394e33d29eeb2f10bec8603f9c40cea37b3a57dd3b39506ea5afbd4
def __init__(self, include_filter: Optional[Union[(Iterable[Text], Dict[(Text, Any)])]]=None, include_default_inputs: bool=True, model_names: Optional[Iterable[Text]]=None, output_names: Optional[Iterable[Text]]=None): "Initializes preprocessor.\n\n Args:\n include_filter: Optional list or map of extracts keys to include in\n output. If a map of keys is passed then the keys and sub-keys that exist\n in the map will be included in the output. An empty dict behaves as a\n wildcard matching all keys or the value itself. Since matching on values\n is not currently supported, an empty dict must be used to represent the\n leaf nodes. For example, {'key1': {'key1-subkey': {}}, 'key2': {}}.\n include_default_inputs: True to include default inputs (labels,\n predictions, example weights) in addition to any inputs that may be\n specified using include_filter.\n model_names: Optional model names. Only used if include_default_inputs is\n True. If unset all models will be included with the default inputs.\n output_names: Optional output names. Only used if include_default_inputs\n is True. If unset all outputs will be included with the default inputs.\n " if (include_filter is None): include_filter = {} if (not isinstance(include_filter, MutableMapping)): if isinstance(include_filter, Iterable): include_filter = {k: {} for k in (include_filter or [])} else: raise ValueError('include_filter must be a list or dict') if include_default_inputs: default_filter = {} if output_names: default_filter = {name: default_filter for name in output_names} if model_names: default_filter = {name: default_filter for name in model_names} include_filter = copy.copy(include_filter) include_filter.update({constants.LABELS_KEY: default_filter, constants.PREDICTIONS_KEY: default_filter, constants.EXAMPLE_WEIGHTS_KEY: default_filter}) self.include_filter = include_filter
Initializes preprocessor. Args: include_filter: Optional list or map of extracts keys to include in output. If a map of keys is passed then the keys and sub-keys that exist in the map will be included in the output. An empty dict behaves as a wildcard matching all keys or the value itself. Since matching on values is not currently supported, an empty dict must be used to represent the leaf nodes. For example, {'key1': {'key1-subkey': {}}, 'key2': {}}. include_default_inputs: True to include default inputs (labels, predictions, example weights) in addition to any inputs that may be specified using include_filter. model_names: Optional model names. Only used if include_default_inputs is True. If unset all models will be included with the default inputs. output_names: Optional output names. Only used if include_default_inputs is True. If unset all outputs will be included with the default inputs.
tensorflow_model_analysis/metrics/metric_types.py
__init__
jaymessina3/model-analysis
1,118
python
def __init__(self, include_filter: Optional[Union[(Iterable[Text], Dict[(Text, Any)])]]=None, include_default_inputs: bool=True, model_names: Optional[Iterable[Text]]=None, output_names: Optional[Iterable[Text]]=None): "Initializes preprocessor.\n\n Args:\n include_filter: Optional list or map of extracts keys to include in\n output. If a map of keys is passed then the keys and sub-keys that exist\n in the map will be included in the output. An empty dict behaves as a\n wildcard matching all keys or the value itself. Since matching on values\n is not currently supported, an empty dict must be used to represent the\n leaf nodes. For example, {'key1': {'key1-subkey': {}}, 'key2': {}}.\n include_default_inputs: True to include default inputs (labels,\n predictions, example weights) in addition to any inputs that may be\n specified using include_filter.\n model_names: Optional model names. Only used if include_default_inputs is\n True. If unset all models will be included with the default inputs.\n output_names: Optional output names. Only used if include_default_inputs\n is True. If unset all outputs will be included with the default inputs.\n " if (include_filter is None): include_filter = {} if (not isinstance(include_filter, MutableMapping)): if isinstance(include_filter, Iterable): include_filter = {k: {} for k in (include_filter or [])} else: raise ValueError('include_filter must be a list or dict') if include_default_inputs: default_filter = {} if output_names: default_filter = {name: default_filter for name in output_names} if model_names: default_filter = {name: default_filter for name in model_names} include_filter = copy.copy(include_filter) include_filter.update({constants.LABELS_KEY: default_filter, constants.PREDICTIONS_KEY: default_filter, constants.EXAMPLE_WEIGHTS_KEY: default_filter}) self.include_filter = include_filter
def __init__(self, include_filter: Optional[Union[(Iterable[Text], Dict[(Text, Any)])]]=None, include_default_inputs: bool=True, model_names: Optional[Iterable[Text]]=None, output_names: Optional[Iterable[Text]]=None): "Initializes preprocessor.\n\n Args:\n include_filter: Optional list or map of extracts keys to include in\n output. If a map of keys is passed then the keys and sub-keys that exist\n in the map will be included in the output. An empty dict behaves as a\n wildcard matching all keys or the value itself. Since matching on values\n is not currently supported, an empty dict must be used to represent the\n leaf nodes. For example, {'key1': {'key1-subkey': {}}, 'key2': {}}.\n include_default_inputs: True to include default inputs (labels,\n predictions, example weights) in addition to any inputs that may be\n specified using include_filter.\n model_names: Optional model names. Only used if include_default_inputs is\n True. If unset all models will be included with the default inputs.\n output_names: Optional output names. Only used if include_default_inputs\n is True. If unset all outputs will be included with the default inputs.\n " if (include_filter is None): include_filter = {} if (not isinstance(include_filter, MutableMapping)): if isinstance(include_filter, Iterable): include_filter = {k: {} for k in (include_filter or [])} else: raise ValueError('include_filter must be a list or dict') if include_default_inputs: default_filter = {} if output_names: default_filter = {name: default_filter for name in output_names} if model_names: default_filter = {name: default_filter for name in model_names} include_filter = copy.copy(include_filter) include_filter.update({constants.LABELS_KEY: default_filter, constants.PREDICTIONS_KEY: default_filter, constants.EXAMPLE_WEIGHTS_KEY: default_filter}) self.include_filter = include_filter<|docstring|>Initializes preprocessor. Args: include_filter: Optional list or map of extracts keys to include in output. If a map of keys is passed then the keys and sub-keys that exist in the map will be included in the output. An empty dict behaves as a wildcard matching all keys or the value itself. Since matching on values is not currently supported, an empty dict must be used to represent the leaf nodes. For example, {'key1': {'key1-subkey': {}}, 'key2': {}}. include_default_inputs: True to include default inputs (labels, predictions, example weights) in addition to any inputs that may be specified using include_filter. model_names: Optional model names. Only used if include_default_inputs is True. If unset all models will be included with the default inputs. output_names: Optional output names. Only used if include_default_inputs is True. If unset all outputs will be included with the default inputs.<|endoftext|>
de36835b3d7d43ebc133b90f0cc6c1ac4e8d743ea5552c8e7fdef2ebe531bdd9
def includeme(config): " Set up standard configurator registrations. Use via:\n\n .. code-block:: python\n\n config = Configurator()\n config.include('pyramid_keystone')\n\n " def register(): registry = config.registry settings = parse_settings(registry.settings) registry.settings.update(settings) def ensure(): if (config.registry.queryUtility(ISessionFactory) is None): raise ConfigurationError('pyramid_keystone requires a registered session factory. (use the set_session_factory method)') config.action('keystone-configure', register) config.action(None, ensure, order=10) config.add_directive('keystone_auth_policy', '.authentication.add_auth_policy') config.add_request_method('.keystone.request_keystone', name='keystone', property=True, reify=True)
Set up standard configurator registrations. Use via: .. code-block:: python config = Configurator() config.include('pyramid_keystone')
pyramid_keystone/__init__.py
includeme
bertjwregeer/pyramid_keystone
0
python
def includeme(config): " Set up standard configurator registrations. Use via:\n\n .. code-block:: python\n\n config = Configurator()\n config.include('pyramid_keystone')\n\n " def register(): registry = config.registry settings = parse_settings(registry.settings) registry.settings.update(settings) def ensure(): if (config.registry.queryUtility(ISessionFactory) is None): raise ConfigurationError('pyramid_keystone requires a registered session factory. (use the set_session_factory method)') config.action('keystone-configure', register) config.action(None, ensure, order=10) config.add_directive('keystone_auth_policy', '.authentication.add_auth_policy') config.add_request_method('.keystone.request_keystone', name='keystone', property=True, reify=True)
def includeme(config): " Set up standard configurator registrations. Use via:\n\n .. code-block:: python\n\n config = Configurator()\n config.include('pyramid_keystone')\n\n " def register(): registry = config.registry settings = parse_settings(registry.settings) registry.settings.update(settings) def ensure(): if (config.registry.queryUtility(ISessionFactory) is None): raise ConfigurationError('pyramid_keystone requires a registered session factory. (use the set_session_factory method)') config.action('keystone-configure', register) config.action(None, ensure, order=10) config.add_directive('keystone_auth_policy', '.authentication.add_auth_policy') config.add_request_method('.keystone.request_keystone', name='keystone', property=True, reify=True)<|docstring|>Set up standard configurator registrations. Use via: .. code-block:: python config = Configurator() config.include('pyramid_keystone')<|endoftext|>
6cc8678acce51ac876a05416a991db7c1ec5f6e301fcb5d3ced8d0dc16e4cf0d
def set_as_anonymous(self): 'Removes all IPs from the whitelist.' self.testbed.setup_env(USER_EMAIL='', overwrite=True) auth.ip_whitelist_key(auth.bots_ip_whitelist()).delete() auth_testing.reset_local_state() auth_testing.mock_get_current_identity(self, auth.Anonymous)
Removes all IPs from the whitelist.
appengine/swarming/test_env_handlers.py
set_as_anonymous
Swift1313/luci-py
0
python
def set_as_anonymous(self): self.testbed.setup_env(USER_EMAIL=, overwrite=True) auth.ip_whitelist_key(auth.bots_ip_whitelist()).delete() auth_testing.reset_local_state() auth_testing.mock_get_current_identity(self, auth.Anonymous)
def set_as_anonymous(self): self.testbed.setup_env(USER_EMAIL=, overwrite=True) auth.ip_whitelist_key(auth.bots_ip_whitelist()).delete() auth_testing.reset_local_state() auth_testing.mock_get_current_identity(self, auth.Anonymous)<|docstring|>Removes all IPs from the whitelist.<|endoftext|>
d1ba39cf8aedab7bed91a8783755dfc5e437aab2892201a363a4133710fd8926
def get_xsrf_token(self): 'Gets the generic XSRF token for web clients.' resp = self.auth_app.post('/auth/api/v1/accounts/self/xsrf_token', headers={'X-XSRF-Token-Request': '1'}).json return resp['xsrf_token'].encode('ascii')
Gets the generic XSRF token for web clients.
appengine/swarming/test_env_handlers.py
get_xsrf_token
Swift1313/luci-py
0
python
def get_xsrf_token(self): resp = self.auth_app.post('/auth/api/v1/accounts/self/xsrf_token', headers={'X-XSRF-Token-Request': '1'}).json return resp['xsrf_token'].encode('ascii')
def get_xsrf_token(self): resp = self.auth_app.post('/auth/api/v1/accounts/self/xsrf_token', headers={'X-XSRF-Token-Request': '1'}).json return resp['xsrf_token'].encode('ascii')<|docstring|>Gets the generic XSRF token for web clients.<|endoftext|>
be73f0f1e427f226044fc5b76c7fddec0ab3aa710a28baa6baccc00d4b290b36
def post_json(self, url, params, **kwargs): 'Does an HTTP POST with a JSON API and return JSON response.' return self.app.post_json(url, params=params, **kwargs).json
Does an HTTP POST with a JSON API and return JSON response.
appengine/swarming/test_env_handlers.py
post_json
Swift1313/luci-py
0
python
def post_json(self, url, params, **kwargs): return self.app.post_json(url, params=params, **kwargs).json
def post_json(self, url, params, **kwargs): return self.app.post_json(url, params=params, **kwargs).json<|docstring|>Does an HTTP POST with a JSON API and return JSON response.<|endoftext|>
feab837273acd36ceabe23b8ce66c7a69bc56c78f782fdcb1f1bd4b6c0ddd108
def mock_task_service_accounts(self, exc=None): 'Mocks support for task-associated service accounts.' self.mock(service_accounts, 'has_token_server', (lambda : True)) calls = [] def mocked(service_account, validity_duration): calls.append((service_account, validity_duration)) if exc: raise exc return ('token-grant-%s-%d' % (str(service_account), validity_duration.total_seconds())) self.mock(service_accounts, 'get_oauth_token_grant', mocked) return calls
Mocks support for task-associated service accounts.
appengine/swarming/test_env_handlers.py
mock_task_service_accounts
Swift1313/luci-py
0
python
def mock_task_service_accounts(self, exc=None): self.mock(service_accounts, 'has_token_server', (lambda : True)) calls = [] def mocked(service_account, validity_duration): calls.append((service_account, validity_duration)) if exc: raise exc return ('token-grant-%s-%d' % (str(service_account), validity_duration.total_seconds())) self.mock(service_accounts, 'get_oauth_token_grant', mocked) return calls
def mock_task_service_accounts(self, exc=None): self.mock(service_accounts, 'has_token_server', (lambda : True)) calls = [] def mocked(service_account, validity_duration): calls.append((service_account, validity_duration)) if exc: raise exc return ('token-grant-%s-%d' % (str(service_account), validity_duration.total_seconds())) self.mock(service_accounts, 'get_oauth_token_grant', mocked) return calls<|docstring|>Mocks support for task-associated service accounts.<|endoftext|>
9b389ea304a455d15f9194e0c2e97cbf37f5dadff55108bddba8970bcf87002c
def mock_default_pool_acl(self, service_accounts): "Mocks ACLs of 'default' pool to allow usage of given service accounts." assert isinstance(service_accounts, (list, tuple)), service_accounts def mocked_fetch_pools_config(): default_isolate = pools_config.IsolateServer(server='https://pool.config.isolate.example.com', namespace='default-gzip') default_cipd = pools_config.CipdServer(server='https://pool.config.cipd.example.com', client_version='from_pool_config') return pools_config._PoolsCfg({'template': pools_config.PoolConfig(name='template', rev='pools_cfg_rev', scheduling_users=frozenset([auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]')]), scheduling_groups=frozenset(), trusted_delegatees={}, service_accounts=frozenset(service_accounts), service_accounts_groups=(), task_template_deployment=pools_config.TaskTemplateDeployment(prod=pools_config.TaskTemplate(cache=(), cipd_package=(), env=(pools_config.Env('VAR', 'prod', (), False),), inclusions=()), canary=pools_config.TaskTemplate(cache=(), cipd_package=(), env=(pools_config.Env('VAR', 'canary', (), False),), inclusions=()), canary_chance=0.5), default_isolate=default_isolate, default_cipd=default_cipd, bot_monitoring=None, external_schedulers=None), 'default': pools_config.PoolConfig(name='default', rev='pools_cfg_rev', scheduling_users=frozenset([auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]')]), scheduling_groups=frozenset(), trusted_delegatees={}, service_accounts=frozenset(service_accounts), service_accounts_groups=(), task_template_deployment=None, bot_monitoring=None, default_isolate=default_isolate, default_cipd=default_cipd, external_schedulers=None)}, (default_isolate, default_cipd)) self.mock(pools_config, '_fetch_pools_config', mocked_fetch_pools_config)
Mocks ACLs of 'default' pool to allow usage of given service accounts.
appengine/swarming/test_env_handlers.py
mock_default_pool_acl
Swift1313/luci-py
0
python
def mock_default_pool_acl(self, service_accounts): assert isinstance(service_accounts, (list, tuple)), service_accounts def mocked_fetch_pools_config(): default_isolate = pools_config.IsolateServer(server='https://pool.config.isolate.example.com', namespace='default-gzip') default_cipd = pools_config.CipdServer(server='https://pool.config.cipd.example.com', client_version='from_pool_config') return pools_config._PoolsCfg({'template': pools_config.PoolConfig(name='template', rev='pools_cfg_rev', scheduling_users=frozenset([auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]')]), scheduling_groups=frozenset(), trusted_delegatees={}, service_accounts=frozenset(service_accounts), service_accounts_groups=(), task_template_deployment=pools_config.TaskTemplateDeployment(prod=pools_config.TaskTemplate(cache=(), cipd_package=(), env=(pools_config.Env('VAR', 'prod', (), False),), inclusions=()), canary=pools_config.TaskTemplate(cache=(), cipd_package=(), env=(pools_config.Env('VAR', 'canary', (), False),), inclusions=()), canary_chance=0.5), default_isolate=default_isolate, default_cipd=default_cipd, bot_monitoring=None, external_schedulers=None), 'default': pools_config.PoolConfig(name='default', rev='pools_cfg_rev', scheduling_users=frozenset([auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]')]), scheduling_groups=frozenset(), trusted_delegatees={}, service_accounts=frozenset(service_accounts), service_accounts_groups=(), task_template_deployment=None, bot_monitoring=None, default_isolate=default_isolate, default_cipd=default_cipd, external_schedulers=None)}, (default_isolate, default_cipd)) self.mock(pools_config, '_fetch_pools_config', mocked_fetch_pools_config)
def mock_default_pool_acl(self, service_accounts): assert isinstance(service_accounts, (list, tuple)), service_accounts def mocked_fetch_pools_config(): default_isolate = pools_config.IsolateServer(server='https://pool.config.isolate.example.com', namespace='default-gzip') default_cipd = pools_config.CipdServer(server='https://pool.config.cipd.example.com', client_version='from_pool_config') return pools_config._PoolsCfg({'template': pools_config.PoolConfig(name='template', rev='pools_cfg_rev', scheduling_users=frozenset([auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]')]), scheduling_groups=frozenset(), trusted_delegatees={}, service_accounts=frozenset(service_accounts), service_accounts_groups=(), task_template_deployment=pools_config.TaskTemplateDeployment(prod=pools_config.TaskTemplate(cache=(), cipd_package=(), env=(pools_config.Env('VAR', 'prod', (), False),), inclusions=()), canary=pools_config.TaskTemplate(cache=(), cipd_package=(), env=(pools_config.Env('VAR', 'canary', (), False),), inclusions=()), canary_chance=0.5), default_isolate=default_isolate, default_cipd=default_cipd, bot_monitoring=None, external_schedulers=None), 'default': pools_config.PoolConfig(name='default', rev='pools_cfg_rev', scheduling_users=frozenset([auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]'), auth.Identity(auth.IDENTITY_USER, '[email protected]')]), scheduling_groups=frozenset(), trusted_delegatees={}, service_accounts=frozenset(service_accounts), service_accounts_groups=(), task_template_deployment=None, bot_monitoring=None, default_isolate=default_isolate, default_cipd=default_cipd, external_schedulers=None)}, (default_isolate, default_cipd)) self.mock(pools_config, '_fetch_pools_config', mocked_fetch_pools_config)<|docstring|>Mocks ACLs of 'default' pool to allow usage of given service accounts.<|endoftext|>
a2364978af606f16ca4c2291a35558bee615a885dabaafe13ce650011060828e
def do_handshake(self, bot='bot1'): 'Performs bot handshake, returns data to be sent to bot handlers.\n\n Also populates self.bot_version.\n ' params = {'dimensions': {'id': [bot], 'os': ['Amiga'], 'pool': ['default']}, 'state': {'running_time': 1234.0, 'sleep_streak': 0, 'started_ts': 1410990411.111}, 'version': '123'} response = self.app.post_json('/swarming/api/v1/bot/handshake', params=params).json self.bot_version = response['bot_version'] params['version'] = self.bot_version params['state']['bot_group_cfg_version'] = response['bot_group_cfg_version'] if response.get('bot_config'): params['bot_config'] = response['bot_config'] return params
Performs bot handshake, returns data to be sent to bot handlers. Also populates self.bot_version.
appengine/swarming/test_env_handlers.py
do_handshake
Swift1313/luci-py
0
python
def do_handshake(self, bot='bot1'): 'Performs bot handshake, returns data to be sent to bot handlers.\n\n Also populates self.bot_version.\n ' params = {'dimensions': {'id': [bot], 'os': ['Amiga'], 'pool': ['default']}, 'state': {'running_time': 1234.0, 'sleep_streak': 0, 'started_ts': 1410990411.111}, 'version': '123'} response = self.app.post_json('/swarming/api/v1/bot/handshake', params=params).json self.bot_version = response['bot_version'] params['version'] = self.bot_version params['state']['bot_group_cfg_version'] = response['bot_group_cfg_version'] if response.get('bot_config'): params['bot_config'] = response['bot_config'] return params
def do_handshake(self, bot='bot1'): 'Performs bot handshake, returns data to be sent to bot handlers.\n\n Also populates self.bot_version.\n ' params = {'dimensions': {'id': [bot], 'os': ['Amiga'], 'pool': ['default']}, 'state': {'running_time': 1234.0, 'sleep_streak': 0, 'started_ts': 1410990411.111}, 'version': '123'} response = self.app.post_json('/swarming/api/v1/bot/handshake', params=params).json self.bot_version = response['bot_version'] params['version'] = self.bot_version params['state']['bot_group_cfg_version'] = response['bot_group_cfg_version'] if response.get('bot_config'): params['bot_config'] = response['bot_config'] return params<|docstring|>Performs bot handshake, returns data to be sent to bot handlers. Also populates self.bot_version.<|endoftext|>
ac1143581c217770268a8c3dcc4b023abdd4384c24a8b46d5df911ff064172ff
def bot_poll(self, bot='bot1', params=None): 'Simulates a bot that polls for task.' if (not params): params = self.do_handshake(bot) return self.post_json('/swarming/api/v1/bot/poll', params)
Simulates a bot that polls for task.
appengine/swarming/test_env_handlers.py
bot_poll
Swift1313/luci-py
0
python
def bot_poll(self, bot='bot1', params=None): if (not params): params = self.do_handshake(bot) return self.post_json('/swarming/api/v1/bot/poll', params)
def bot_poll(self, bot='bot1', params=None): if (not params): params = self.do_handshake(bot) return self.post_json('/swarming/api/v1/bot/poll', params)<|docstring|>Simulates a bot that polls for task.<|endoftext|>
8a1ea0af904cb90d2c3212758e12d10799e27e8eb52a245a70849779184255cf
@staticmethod def create_props(**kwargs): 'Returns a serialized swarming_rpcs.TaskProperties.' out = {u'cipd_input': {u'client_package': {u'package_name': u'infra/tools/cipd/${platform}', u'version': u'git_revision:deadbeef'}, u'packages': [{u'package_name': u'rm', u'path': u'bin', u'version': u'git_revision:deadbeef'}], u'server': u'https://pool.config.cipd.example.com'}, u'dimensions': [{u'key': u'os', u'value': u'Amiga'}, {u'key': u'pool', u'value': u'default'}], u'env': [], u'execution_timeout_secs': 3600, u'io_timeout_secs': 1200, u'outputs': [u'foo', u'path/to/foobar']} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out
Returns a serialized swarming_rpcs.TaskProperties.
appengine/swarming/test_env_handlers.py
create_props
Swift1313/luci-py
0
python
@staticmethod def create_props(**kwargs): out = {u'cipd_input': {u'client_package': {u'package_name': u'infra/tools/cipd/${platform}', u'version': u'git_revision:deadbeef'}, u'packages': [{u'package_name': u'rm', u'path': u'bin', u'version': u'git_revision:deadbeef'}], u'server': u'https://pool.config.cipd.example.com'}, u'dimensions': [{u'key': u'os', u'value': u'Amiga'}, {u'key': u'pool', u'value': u'default'}], u'env': [], u'execution_timeout_secs': 3600, u'io_timeout_secs': 1200, u'outputs': [u'foo', u'path/to/foobar']} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out
@staticmethod def create_props(**kwargs): out = {u'cipd_input': {u'client_package': {u'package_name': u'infra/tools/cipd/${platform}', u'version': u'git_revision:deadbeef'}, u'packages': [{u'package_name': u'rm', u'path': u'bin', u'version': u'git_revision:deadbeef'}], u'server': u'https://pool.config.cipd.example.com'}, u'dimensions': [{u'key': u'os', u'value': u'Amiga'}, {u'key': u'pool', u'value': u'default'}], u'env': [], u'execution_timeout_secs': 3600, u'io_timeout_secs': 1200, u'outputs': [u'foo', u'path/to/foobar']} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out<|docstring|>Returns a serialized swarming_rpcs.TaskProperties.<|endoftext|>
845bcfe0ea0a835a8636154b7d0964373bb2e7b59bc0cab91541feaa21954193
def create_new_request(self, **kwargs): 'Returns an initialized swarming_rpcs.TaskNewRequest.\n\n Useful to use a swarming_rpcs.TaskSlice.\n ' out = {'expiration_secs': ((24 * 60) * 60), 'name': 'job1', 'priority': 20, 'tags': [u'a:tag'], 'user': 'joe@localhost'} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return swarming_rpcs.NewTaskRequest(**out)
Returns an initialized swarming_rpcs.TaskNewRequest. Useful to use a swarming_rpcs.TaskSlice.
appengine/swarming/test_env_handlers.py
create_new_request
Swift1313/luci-py
0
python
def create_new_request(self, **kwargs): 'Returns an initialized swarming_rpcs.TaskNewRequest.\n\n Useful to use a swarming_rpcs.TaskSlice.\n ' out = {'expiration_secs': ((24 * 60) * 60), 'name': 'job1', 'priority': 20, 'tags': [u'a:tag'], 'user': 'joe@localhost'} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return swarming_rpcs.NewTaskRequest(**out)
def create_new_request(self, **kwargs): 'Returns an initialized swarming_rpcs.TaskNewRequest.\n\n Useful to use a swarming_rpcs.TaskSlice.\n ' out = {'expiration_secs': ((24 * 60) * 60), 'name': 'job1', 'priority': 20, 'tags': [u'a:tag'], 'user': 'joe@localhost'} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return swarming_rpcs.NewTaskRequest(**out)<|docstring|>Returns an initialized swarming_rpcs.TaskNewRequest. Useful to use a swarming_rpcs.TaskSlice.<|endoftext|>
3e8971f4cce12994ea539c676f605e5b1884943c11626ddf2d49db7c4da8ef6d
def client_create_task(self, **kwargs): 'Creates a minimal task request via the Cloud Endpoints API.' request = self.create_new_request(**kwargs) response = self.endpoint_call(handlers_endpoints.SwarmingTasksService, 'new', request) return (response, response['task_id'])
Creates a minimal task request via the Cloud Endpoints API.
appengine/swarming/test_env_handlers.py
client_create_task
Swift1313/luci-py
0
python
def client_create_task(self, **kwargs): request = self.create_new_request(**kwargs) response = self.endpoint_call(handlers_endpoints.SwarmingTasksService, 'new', request) return (response, response['task_id'])
def client_create_task(self, **kwargs): request = self.create_new_request(**kwargs) response = self.endpoint_call(handlers_endpoints.SwarmingTasksService, 'new', request) return (response, response['task_id'])<|docstring|>Creates a minimal task request via the Cloud Endpoints API.<|endoftext|>
b01c8beaf703735a18731178e97cae2df91cd56838e5444442a7bcb0eafb2e4b
def client_create_task_isolated(self, properties=None, **kwargs): 'Creates a TaskRequest using an isolated tree via the Cloud Endpoints API.\n ' properties = (properties or {}).copy() properties['inputs_ref'] = {'isolated': '0123456789012345678901234567890123456789', 'isolatedserver': 'http://localhost:1', 'namespace': 'default-gzip'} return self.client_create_task(properties=self.create_props(**properties), **kwargs)
Creates a TaskRequest using an isolated tree via the Cloud Endpoints API.
appengine/swarming/test_env_handlers.py
client_create_task_isolated
Swift1313/luci-py
0
python
def client_create_task_isolated(self, properties=None, **kwargs): '\n ' properties = (properties or {}).copy() properties['inputs_ref'] = {'isolated': '0123456789012345678901234567890123456789', 'isolatedserver': 'http://localhost:1', 'namespace': 'default-gzip'} return self.client_create_task(properties=self.create_props(**properties), **kwargs)
def client_create_task_isolated(self, properties=None, **kwargs): '\n ' properties = (properties or {}).copy() properties['inputs_ref'] = {'isolated': '0123456789012345678901234567890123456789', 'isolatedserver': 'http://localhost:1', 'namespace': 'default-gzip'} return self.client_create_task(properties=self.create_props(**properties), **kwargs)<|docstring|>Creates a TaskRequest using an isolated tree via the Cloud Endpoints API.<|endoftext|>
12baeee666a55e4dc5ddb3f7446aab6c9766d6f1b76912e2e31f098a13f2d8d1
def client_create_task_raw(self, properties=None, **kwargs): 'Creates a raw command TaskRequest via the Cloud Endpoints API.' properties = (properties or {}).copy() properties['command'] = ['python', 'run_test.py'] return self.client_create_task(properties=self.create_props(**properties), **kwargs)
Creates a raw command TaskRequest via the Cloud Endpoints API.
appengine/swarming/test_env_handlers.py
client_create_task_raw
Swift1313/luci-py
0
python
def client_create_task_raw(self, properties=None, **kwargs): properties = (properties or {}).copy() properties['command'] = ['python', 'run_test.py'] return self.client_create_task(properties=self.create_props(**properties), **kwargs)
def client_create_task_raw(self, properties=None, **kwargs): properties = (properties or {}).copy() properties['command'] = ['python', 'run_test.py'] return self.client_create_task(properties=self.create_props(**properties), **kwargs)<|docstring|>Creates a raw command TaskRequest via the Cloud Endpoints API.<|endoftext|>
bee7dc3a39f8c517591b3c2137f87f66a10d9cd3f891ab582db8f35792414103
@staticmethod def gen_props(**kwargs): 'Returns a serialized swarming_rpcs.TaskProperties.\n\n To be used for expectations.\n ' out = {u'cipd_input': {u'client_package': {u'package_name': u'infra/tools/cipd/${platform}', u'version': u'git_revision:deadbeef'}, u'packages': [{u'package_name': u'rm', u'path': u'bin', u'version': u'git_revision:deadbeef'}], u'server': u'https://pool.config.cipd.example.com'}, u'dimensions': [{u'key': u'os', u'value': u'Amiga'}, {u'key': u'pool', u'value': u'default'}], u'execution_timeout_secs': u'3600', u'grace_period_secs': u'30', u'idempotent': False, u'inputs_ref': {'isolatedserver': 'https://pool.config.isolate.example.com', 'namespace': 'default-gzip'}, u'io_timeout_secs': u'1200', u'outputs': [u'foo', u'path/to/foobar']} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out
Returns a serialized swarming_rpcs.TaskProperties. To be used for expectations.
appengine/swarming/test_env_handlers.py
gen_props
Swift1313/luci-py
0
python
@staticmethod def gen_props(**kwargs): 'Returns a serialized swarming_rpcs.TaskProperties.\n\n To be used for expectations.\n ' out = {u'cipd_input': {u'client_package': {u'package_name': u'infra/tools/cipd/${platform}', u'version': u'git_revision:deadbeef'}, u'packages': [{u'package_name': u'rm', u'path': u'bin', u'version': u'git_revision:deadbeef'}], u'server': u'https://pool.config.cipd.example.com'}, u'dimensions': [{u'key': u'os', u'value': u'Amiga'}, {u'key': u'pool', u'value': u'default'}], u'execution_timeout_secs': u'3600', u'grace_period_secs': u'30', u'idempotent': False, u'inputs_ref': {'isolatedserver': 'https://pool.config.isolate.example.com', 'namespace': 'default-gzip'}, u'io_timeout_secs': u'1200', u'outputs': [u'foo', u'path/to/foobar']} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out
@staticmethod def gen_props(**kwargs): 'Returns a serialized swarming_rpcs.TaskProperties.\n\n To be used for expectations.\n ' out = {u'cipd_input': {u'client_package': {u'package_name': u'infra/tools/cipd/${platform}', u'version': u'git_revision:deadbeef'}, u'packages': [{u'package_name': u'rm', u'path': u'bin', u'version': u'git_revision:deadbeef'}], u'server': u'https://pool.config.cipd.example.com'}, u'dimensions': [{u'key': u'os', u'value': u'Amiga'}, {u'key': u'pool', u'value': u'default'}], u'execution_timeout_secs': u'3600', u'grace_period_secs': u'30', u'idempotent': False, u'inputs_ref': {'isolatedserver': 'https://pool.config.isolate.example.com', 'namespace': 'default-gzip'}, u'io_timeout_secs': u'1200', u'outputs': [u'foo', u'path/to/foobar']} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out<|docstring|>Returns a serialized swarming_rpcs.TaskProperties. To be used for expectations.<|endoftext|>
98ab125383422ecb75692173c2c52b9152fc0a3a2853966a25dc584fdb43dfc4
@staticmethod def gen_request(**kwargs): 'Returns a serialized swarming_rpcs.TaskRequest.\n\n To be used for expectations.\n ' out = {u'authenticated': u'user:[email protected]', u'expiration_secs': u'86400', u'name': u'job1', u'priority': u'20', u'service_account': u'none', u'tags': [u'a:tag', u'os:Amiga', u'pool:default', u'priority:20', u'service_account:none', u'swarming.pool.template:none', u'swarming.pool.version:pools_cfg_rev', u'user:joe@localhost'], u'user': u'joe@localhost'} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out
Returns a serialized swarming_rpcs.TaskRequest. To be used for expectations.
appengine/swarming/test_env_handlers.py
gen_request
Swift1313/luci-py
0
python
@staticmethod def gen_request(**kwargs): 'Returns a serialized swarming_rpcs.TaskRequest.\n\n To be used for expectations.\n ' out = {u'authenticated': u'user:[email protected]', u'expiration_secs': u'86400', u'name': u'job1', u'priority': u'20', u'service_account': u'none', u'tags': [u'a:tag', u'os:Amiga', u'pool:default', u'priority:20', u'service_account:none', u'swarming.pool.template:none', u'swarming.pool.version:pools_cfg_rev', u'user:joe@localhost'], u'user': u'joe@localhost'} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out
@staticmethod def gen_request(**kwargs): 'Returns a serialized swarming_rpcs.TaskRequest.\n\n To be used for expectations.\n ' out = {u'authenticated': u'user:[email protected]', u'expiration_secs': u'86400', u'name': u'job1', u'priority': u'20', u'service_account': u'none', u'tags': [u'a:tag', u'os:Amiga', u'pool:default', u'priority:20', u'service_account:none', u'swarming.pool.template:none', u'swarming.pool.version:pools_cfg_rev', u'user:joe@localhost'], u'user': u'joe@localhost'} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out<|docstring|>Returns a serialized swarming_rpcs.TaskRequest. To be used for expectations.<|endoftext|>
6284d9569b412f90f781559dee3b8bff1b137f8f9080936da63e9b6542754572
@staticmethod def gen_perf_stats(**kwargs): 'Returns a serialized swarming_rpcs.PerformanceStats.\n\n To be used for expectations.\n ' out = {u'bot_overhead': 0.1, u'isolated_download': {u'duration': 1.0, u'initial_number_items': u'10', u'initial_size': u'100000', u'items_cold': [20], u'items_hot': [30, 40], u'num_items_cold': u'1', u'total_bytes_items_cold': u'20', u'num_items_hot': u'2', u'total_bytes_items_hot': u'70'}, u'isolated_upload': {u'duration': 2.0, u'items_cold': [1, 2, 40], u'items_hot': [1, 2, 3, 50], u'num_items_cold': u'3', u'total_bytes_items_cold': u'43', u'num_items_hot': u'4', u'total_bytes_items_hot': u'56'}} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out
Returns a serialized swarming_rpcs.PerformanceStats. To be used for expectations.
appengine/swarming/test_env_handlers.py
gen_perf_stats
Swift1313/luci-py
0
python
@staticmethod def gen_perf_stats(**kwargs): 'Returns a serialized swarming_rpcs.PerformanceStats.\n\n To be used for expectations.\n ' out = {u'bot_overhead': 0.1, u'isolated_download': {u'duration': 1.0, u'initial_number_items': u'10', u'initial_size': u'100000', u'items_cold': [20], u'items_hot': [30, 40], u'num_items_cold': u'1', u'total_bytes_items_cold': u'20', u'num_items_hot': u'2', u'total_bytes_items_hot': u'70'}, u'isolated_upload': {u'duration': 2.0, u'items_cold': [1, 2, 40], u'items_hot': [1, 2, 3, 50], u'num_items_cold': u'3', u'total_bytes_items_cold': u'43', u'num_items_hot': u'4', u'total_bytes_items_hot': u'56'}} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out
@staticmethod def gen_perf_stats(**kwargs): 'Returns a serialized swarming_rpcs.PerformanceStats.\n\n To be used for expectations.\n ' out = {u'bot_overhead': 0.1, u'isolated_download': {u'duration': 1.0, u'initial_number_items': u'10', u'initial_size': u'100000', u'items_cold': [20], u'items_hot': [30, 40], u'num_items_cold': u'1', u'total_bytes_items_cold': u'20', u'num_items_hot': u'2', u'total_bytes_items_hot': u'70'}, u'isolated_upload': {u'duration': 2.0, u'items_cold': [1, 2, 40], u'items_hot': [1, 2, 3, 50], u'num_items_cold': u'3', u'total_bytes_items_cold': u'43', u'num_items_hot': u'4', u'total_bytes_items_hot': u'56'}} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out<|docstring|>Returns a serialized swarming_rpcs.PerformanceStats. To be used for expectations.<|endoftext|>
3bcce5cd48a3ded3e2bd178ab166badf3c23ce676e6adec791763ffffbacd351
def gen_result_summary(self, **kwargs): 'Returns a serialized swarming_rpcs.TaskResult initialized from a\n TaskResultSummary.\n\n To be used for expectations.\n ' out = {u'bot_dimensions': [{u'key': u'id', u'value': [u'bot1']}, {u'key': u'os', u'value': [u'Amiga']}, {u'key': u'pool', u'value': [u'default']}], u'bot_id': u'bot1', u'bot_version': self.bot_version, u'current_task_slice': u'0', u'failure': False, u'internal_failure': False, u'name': u'job1', u'run_id': u'5cee488008811', u'server_versions': [u'v1a'], u'state': u'COMPLETED', u'tags': [u'a:tag', u'os:Amiga', u'pool:default', u'priority:20', u'service_account:none', u'swarming.pool.template:no_config', u'user:joe@localhost'], u'task_id': u'5cee488008810', u'try_number': u'0', u'user': u'joe@localhost'} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out
Returns a serialized swarming_rpcs.TaskResult initialized from a TaskResultSummary. To be used for expectations.
appengine/swarming/test_env_handlers.py
gen_result_summary
Swift1313/luci-py
0
python
def gen_result_summary(self, **kwargs): 'Returns a serialized swarming_rpcs.TaskResult initialized from a\n TaskResultSummary.\n\n To be used for expectations.\n ' out = {u'bot_dimensions': [{u'key': u'id', u'value': [u'bot1']}, {u'key': u'os', u'value': [u'Amiga']}, {u'key': u'pool', u'value': [u'default']}], u'bot_id': u'bot1', u'bot_version': self.bot_version, u'current_task_slice': u'0', u'failure': False, u'internal_failure': False, u'name': u'job1', u'run_id': u'5cee488008811', u'server_versions': [u'v1a'], u'state': u'COMPLETED', u'tags': [u'a:tag', u'os:Amiga', u'pool:default', u'priority:20', u'service_account:none', u'swarming.pool.template:no_config', u'user:joe@localhost'], u'task_id': u'5cee488008810', u'try_number': u'0', u'user': u'joe@localhost'} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out
def gen_result_summary(self, **kwargs): 'Returns a serialized swarming_rpcs.TaskResult initialized from a\n TaskResultSummary.\n\n To be used for expectations.\n ' out = {u'bot_dimensions': [{u'key': u'id', u'value': [u'bot1']}, {u'key': u'os', u'value': [u'Amiga']}, {u'key': u'pool', u'value': [u'default']}], u'bot_id': u'bot1', u'bot_version': self.bot_version, u'current_task_slice': u'0', u'failure': False, u'internal_failure': False, u'name': u'job1', u'run_id': u'5cee488008811', u'server_versions': [u'v1a'], u'state': u'COMPLETED', u'tags': [u'a:tag', u'os:Amiga', u'pool:default', u'priority:20', u'service_account:none', u'swarming.pool.template:no_config', u'user:joe@localhost'], u'task_id': u'5cee488008810', u'try_number': u'0', u'user': u'joe@localhost'} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out<|docstring|>Returns a serialized swarming_rpcs.TaskResult initialized from a TaskResultSummary. To be used for expectations.<|endoftext|>
1b1a1658855a475756b10a1c0f8ff0798e6fb021237584c3f130230d4a36eb7c
def gen_run_result(self, **kwargs): 'Returns a serialized swarming_rpcs.TaskResult initialized from a\n TaskRunResult.\n\n To be used for expectations.\n ' out = {u'bot_dimensions': [{u'key': u'id', u'value': [u'bot1']}, {u'key': u'os', u'value': [u'Amiga']}, {u'key': u'pool', u'value': [u'default']}], u'bot_id': u'bot1', u'bot_version': self.bot_version, u'costs_usd': [0.0], u'current_task_slice': u'0', u'failure': False, u'internal_failure': False, u'name': u'job1', u'run_id': u'5cee488008811', u'server_versions': [u'v1a'], u'state': u'RUNNING', u'task_id': u'5cee488008811', u'try_number': u'1'} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out
Returns a serialized swarming_rpcs.TaskResult initialized from a TaskRunResult. To be used for expectations.
appengine/swarming/test_env_handlers.py
gen_run_result
Swift1313/luci-py
0
python
def gen_run_result(self, **kwargs): 'Returns a serialized swarming_rpcs.TaskResult initialized from a\n TaskRunResult.\n\n To be used for expectations.\n ' out = {u'bot_dimensions': [{u'key': u'id', u'value': [u'bot1']}, {u'key': u'os', u'value': [u'Amiga']}, {u'key': u'pool', u'value': [u'default']}], u'bot_id': u'bot1', u'bot_version': self.bot_version, u'costs_usd': [0.0], u'current_task_slice': u'0', u'failure': False, u'internal_failure': False, u'name': u'job1', u'run_id': u'5cee488008811', u'server_versions': [u'v1a'], u'state': u'RUNNING', u'task_id': u'5cee488008811', u'try_number': u'1'} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out
def gen_run_result(self, **kwargs): 'Returns a serialized swarming_rpcs.TaskResult initialized from a\n TaskRunResult.\n\n To be used for expectations.\n ' out = {u'bot_dimensions': [{u'key': u'id', u'value': [u'bot1']}, {u'key': u'os', u'value': [u'Amiga']}, {u'key': u'pool', u'value': [u'default']}], u'bot_id': u'bot1', u'bot_version': self.bot_version, u'costs_usd': [0.0], u'current_task_slice': u'0', u'failure': False, u'internal_failure': False, u'name': u'job1', u'run_id': u'5cee488008811', u'server_versions': [u'v1a'], u'state': u'RUNNING', u'task_id': u'5cee488008811', u'try_number': u'1'} out.update(((unicode(k), v) for (k, v) in kwargs.iteritems())) return out<|docstring|>Returns a serialized swarming_rpcs.TaskResult initialized from a TaskRunResult. To be used for expectations.<|endoftext|>
971e3637f3cdb7d57bc039d3d17555b3f14ef52935d98d73f9edd8911db4254d
def image(self, windowName, imgNumpyArray, **kwargs): ' Takes numpy array and plots it into visdom ' opts = {'caption': windowName, 'title': windowName} for key in kwargs: opts[key] = kwargs[key] self.viz.image(imgNumpyArray, opts)
Takes numpy array and plots it into visdom
visualizer.py
image
smerzbach/pysmtb
1
python
def image(self, windowName, imgNumpyArray, **kwargs): ' ' opts = {'caption': windowName, 'title': windowName} for key in kwargs: opts[key] = kwargs[key] self.viz.image(imgNumpyArray, opts)
def image(self, windowName, imgNumpyArray, **kwargs): ' ' opts = {'caption': windowName, 'title': windowName} for key in kwargs: opts[key] = kwargs[key] self.viz.image(imgNumpyArray, opts)<|docstring|>Takes numpy array and plots it into visdom<|endoftext|>
fa43f3dbe40c8d86831b5193cbc46322f85ee5e3cc4f29ca521890a7a68db4bc
def __init__(self): '\n Initialize your data structure here.\n ' self.data = set()
Initialize your data structure here.
0380_Insert_Delete_GetRandom_O(1).py
__init__
coldmanck/leetcode-python
4
python
def __init__(self): '\n \n ' self.data = set()
def __init__(self): '\n \n ' self.data = set()<|docstring|>Initialize your data structure here.<|endoftext|>
9c179052aa5e07d4314d1413d6f94d028ff95cf5ae20e6809b021f324d6c317d
def insert(self, val: int) -> bool: '\n Inserts a value to the set. Returns true if the set did not already contain the specified element.\n ' if (val in self.data): return False self.data.add(val) return True
Inserts a value to the set. Returns true if the set did not already contain the specified element.
0380_Insert_Delete_GetRandom_O(1).py
insert
coldmanck/leetcode-python
4
python
def insert(self, val: int) -> bool: '\n \n ' if (val in self.data): return False self.data.add(val) return True
def insert(self, val: int) -> bool: '\n \n ' if (val in self.data): return False self.data.add(val) return True<|docstring|>Inserts a value to the set. Returns true if the set did not already contain the specified element.<|endoftext|>
ed498a29655162578a4f2320b2b7ddf73dc256fcad1305f77bf7d7eacee1535b
def remove(self, val: int) -> bool: '\n Removes a value from the set. Returns true if the set contained the specified element.\n ' if (not (val in self.data)): return False self.data.remove(val) return True
Removes a value from the set. Returns true if the set contained the specified element.
0380_Insert_Delete_GetRandom_O(1).py
remove
coldmanck/leetcode-python
4
python
def remove(self, val: int) -> bool: '\n \n ' if (not (val in self.data)): return False self.data.remove(val) return True
def remove(self, val: int) -> bool: '\n \n ' if (not (val in self.data)): return False self.data.remove(val) return True<|docstring|>Removes a value from the set. Returns true if the set contained the specified element.<|endoftext|>
9367b360de65b68eb9f03eb05d297bea02b0c750850cfb1c5747c47343b07520
def getRandom(self) -> int: '\n Get a random element from the set.\n ' if (len(self.data) > 0): return random.sample(self.data, 1)[0]
Get a random element from the set.
0380_Insert_Delete_GetRandom_O(1).py
getRandom
coldmanck/leetcode-python
4
python
def getRandom(self) -> int: '\n \n ' if (len(self.data) > 0): return random.sample(self.data, 1)[0]
def getRandom(self) -> int: '\n \n ' if (len(self.data) > 0): return random.sample(self.data, 1)[0]<|docstring|>Get a random element from the set.<|endoftext|>
7eaca8ce2f16d2f3cbd831a88281e754ec6fb11ed8171a9d4c7b3ec99b212eaf
def interact_with_user(device, user_source, source_type, username, my_username, interaction_strategy: InteractionStrategy, on_action) -> (bool, bool): '\n :return: (whether some photos was liked, whether @username was followed during the interaction,\n whether stories were watched, whether was commented)\n ' global liked_count, is_followed, is_scrolled_down, is_commented liked_count = 0 is_followed = False is_watched = False is_scrolled_down = False is_commented = False if (username == my_username): print("It's you, skip.") return ((liked_count == interaction_strategy.likes_count), is_followed, is_watched, is_commented) if interaction_strategy.do_story_watch: is_watched = _watch_stories(device, user_source, source_type, username, interaction_strategy.stories_count, on_action) def do_like_actions(): global is_scrolled_down if (interaction_strategy.do_like or interaction_strategy.do_comment): suggestions_container = device.find(resourceId=f'{device.app_id}:id/similar_accounts_container', className='android.widget.LinearLayout') if suggestions_container.exists(quick=True): print('Close suggestions to avoid bugs while scrolling') arrow_button = device.find(resourceId=f'{device.app_id}:id/row_profile_header_button_chaining', className='android.widget.Button') arrow_button.click(ignore_if_missing=True) sleeper.random_sleep() coordinator_layout = device.find(resourceId=f'{device.app_id}:id/coordinator_root_layout') if coordinator_layout.exists(): print('Scroll down to see more photos.') coordinator_layout.scroll(DeviceFacade.Direction.BOTTOM) is_scrolled_down = True number_of_rows_to_use = min((((interaction_strategy.likes_count * 2) // 3) + 1), 4) photos_indices = list(range(0, (number_of_rows_to_use * 3))) shuffle(photos_indices) photos_indices = photos_indices[:interaction_strategy.likes_count] photos_indices = sorted(photos_indices) def on_like(): global liked_count liked_count += 1 print(((COLOR_OKGREEN + '@{} - photo been liked.'.format(username)) + COLOR_ENDC)) on_action(LikeAction(source_name=user_source, source_type=source_type, user=username)) def on_comment(comment): global is_commented is_commented = True print(((COLOR_OKGREEN + '@{} - photo been commented.'.format(username)) + COLOR_ENDC)) on_action(CommentAction(source_name=user_source, source_type=source_type, user=username, comment=comment)) for i in range(0, interaction_strategy.likes_count): photo_index = photos_indices[i] row = (photo_index // 3) column = (photo_index - (row * 3)) sleeper.random_sleep() print((((((('Open and like photo #' + str((i + 1))) + ' (') + str((row + 1))) + ' row, ') + str((column + 1))) + ' column)')) if (not _open_photo_and_like_and_comment(device, row, column, interaction_strategy.do_like, interaction_strategy.do_comment, interaction_strategy.like_percentage, on_like, interaction_strategy.comment_percentage, interaction_strategy.comments_list, my_username, on_comment)): print(((((COLOR_OKGREEN + 'Less than ') + str((number_of_rows_to_use * 3))) + ' photos.') + COLOR_ENDC)) break def do_follow_action(): global is_followed if interaction_strategy.do_follow: is_followed = _follow(device, username, interaction_strategy.follow_percentage, is_scrolled_down) if is_followed: on_action(FollowAction(source_name=user_source, source_type=source_type, user=username)) if (interaction_strategy.do_follow and (interaction_strategy.do_like or interaction_strategy.do_comment)): like_first_chance = randint(1, 100) if (like_first_chance > 50): print('Going to like-images first and then follow') do_like_actions() do_follow_action() else: print('Going to follow first and then like-images') do_follow_action() do_like_actions() else: do_like_actions() do_follow_action() return ((liked_count > 0), is_followed, is_watched, is_commented)
:return: (whether some photos was liked, whether @username was followed during the interaction, whether stories were watched, whether was commented)
insomniac/actions_impl.py
interact_with_user
davebaird/Insomniac
0
python
def interact_with_user(device, user_source, source_type, username, my_username, interaction_strategy: InteractionStrategy, on_action) -> (bool, bool): '\n :return: (whether some photos was liked, whether @username was followed during the interaction,\n whether stories were watched, whether was commented)\n ' global liked_count, is_followed, is_scrolled_down, is_commented liked_count = 0 is_followed = False is_watched = False is_scrolled_down = False is_commented = False if (username == my_username): print("It's you, skip.") return ((liked_count == interaction_strategy.likes_count), is_followed, is_watched, is_commented) if interaction_strategy.do_story_watch: is_watched = _watch_stories(device, user_source, source_type, username, interaction_strategy.stories_count, on_action) def do_like_actions(): global is_scrolled_down if (interaction_strategy.do_like or interaction_strategy.do_comment): suggestions_container = device.find(resourceId=f'{device.app_id}:id/similar_accounts_container', className='android.widget.LinearLayout') if suggestions_container.exists(quick=True): print('Close suggestions to avoid bugs while scrolling') arrow_button = device.find(resourceId=f'{device.app_id}:id/row_profile_header_button_chaining', className='android.widget.Button') arrow_button.click(ignore_if_missing=True) sleeper.random_sleep() coordinator_layout = device.find(resourceId=f'{device.app_id}:id/coordinator_root_layout') if coordinator_layout.exists(): print('Scroll down to see more photos.') coordinator_layout.scroll(DeviceFacade.Direction.BOTTOM) is_scrolled_down = True number_of_rows_to_use = min((((interaction_strategy.likes_count * 2) // 3) + 1), 4) photos_indices = list(range(0, (number_of_rows_to_use * 3))) shuffle(photos_indices) photos_indices = photos_indices[:interaction_strategy.likes_count] photos_indices = sorted(photos_indices) def on_like(): global liked_count liked_count += 1 print(((COLOR_OKGREEN + '@{} - photo been liked.'.format(username)) + COLOR_ENDC)) on_action(LikeAction(source_name=user_source, source_type=source_type, user=username)) def on_comment(comment): global is_commented is_commented = True print(((COLOR_OKGREEN + '@{} - photo been commented.'.format(username)) + COLOR_ENDC)) on_action(CommentAction(source_name=user_source, source_type=source_type, user=username, comment=comment)) for i in range(0, interaction_strategy.likes_count): photo_index = photos_indices[i] row = (photo_index // 3) column = (photo_index - (row * 3)) sleeper.random_sleep() print((((((('Open and like photo #' + str((i + 1))) + ' (') + str((row + 1))) + ' row, ') + str((column + 1))) + ' column)')) if (not _open_photo_and_like_and_comment(device, row, column, interaction_strategy.do_like, interaction_strategy.do_comment, interaction_strategy.like_percentage, on_like, interaction_strategy.comment_percentage, interaction_strategy.comments_list, my_username, on_comment)): print(((((COLOR_OKGREEN + 'Less than ') + str((number_of_rows_to_use * 3))) + ' photos.') + COLOR_ENDC)) break def do_follow_action(): global is_followed if interaction_strategy.do_follow: is_followed = _follow(device, username, interaction_strategy.follow_percentage, is_scrolled_down) if is_followed: on_action(FollowAction(source_name=user_source, source_type=source_type, user=username)) if (interaction_strategy.do_follow and (interaction_strategy.do_like or interaction_strategy.do_comment)): like_first_chance = randint(1, 100) if (like_first_chance > 50): print('Going to like-images first and then follow') do_like_actions() do_follow_action() else: print('Going to follow first and then like-images') do_follow_action() do_like_actions() else: do_like_actions() do_follow_action() return ((liked_count > 0), is_followed, is_watched, is_commented)
def interact_with_user(device, user_source, source_type, username, my_username, interaction_strategy: InteractionStrategy, on_action) -> (bool, bool): '\n :return: (whether some photos was liked, whether @username was followed during the interaction,\n whether stories were watched, whether was commented)\n ' global liked_count, is_followed, is_scrolled_down, is_commented liked_count = 0 is_followed = False is_watched = False is_scrolled_down = False is_commented = False if (username == my_username): print("It's you, skip.") return ((liked_count == interaction_strategy.likes_count), is_followed, is_watched, is_commented) if interaction_strategy.do_story_watch: is_watched = _watch_stories(device, user_source, source_type, username, interaction_strategy.stories_count, on_action) def do_like_actions(): global is_scrolled_down if (interaction_strategy.do_like or interaction_strategy.do_comment): suggestions_container = device.find(resourceId=f'{device.app_id}:id/similar_accounts_container', className='android.widget.LinearLayout') if suggestions_container.exists(quick=True): print('Close suggestions to avoid bugs while scrolling') arrow_button = device.find(resourceId=f'{device.app_id}:id/row_profile_header_button_chaining', className='android.widget.Button') arrow_button.click(ignore_if_missing=True) sleeper.random_sleep() coordinator_layout = device.find(resourceId=f'{device.app_id}:id/coordinator_root_layout') if coordinator_layout.exists(): print('Scroll down to see more photos.') coordinator_layout.scroll(DeviceFacade.Direction.BOTTOM) is_scrolled_down = True number_of_rows_to_use = min((((interaction_strategy.likes_count * 2) // 3) + 1), 4) photos_indices = list(range(0, (number_of_rows_to_use * 3))) shuffle(photos_indices) photos_indices = photos_indices[:interaction_strategy.likes_count] photos_indices = sorted(photos_indices) def on_like(): global liked_count liked_count += 1 print(((COLOR_OKGREEN + '@{} - photo been liked.'.format(username)) + COLOR_ENDC)) on_action(LikeAction(source_name=user_source, source_type=source_type, user=username)) def on_comment(comment): global is_commented is_commented = True print(((COLOR_OKGREEN + '@{} - photo been commented.'.format(username)) + COLOR_ENDC)) on_action(CommentAction(source_name=user_source, source_type=source_type, user=username, comment=comment)) for i in range(0, interaction_strategy.likes_count): photo_index = photos_indices[i] row = (photo_index // 3) column = (photo_index - (row * 3)) sleeper.random_sleep() print((((((('Open and like photo #' + str((i + 1))) + ' (') + str((row + 1))) + ' row, ') + str((column + 1))) + ' column)')) if (not _open_photo_and_like_and_comment(device, row, column, interaction_strategy.do_like, interaction_strategy.do_comment, interaction_strategy.like_percentage, on_like, interaction_strategy.comment_percentage, interaction_strategy.comments_list, my_username, on_comment)): print(((((COLOR_OKGREEN + 'Less than ') + str((number_of_rows_to_use * 3))) + ' photos.') + COLOR_ENDC)) break def do_follow_action(): global is_followed if interaction_strategy.do_follow: is_followed = _follow(device, username, interaction_strategy.follow_percentage, is_scrolled_down) if is_followed: on_action(FollowAction(source_name=user_source, source_type=source_type, user=username)) if (interaction_strategy.do_follow and (interaction_strategy.do_like or interaction_strategy.do_comment)): like_first_chance = randint(1, 100) if (like_first_chance > 50): print('Going to like-images first and then follow') do_like_actions() do_follow_action() else: print('Going to follow first and then like-images') do_follow_action() do_like_actions() else: do_like_actions() do_follow_action() return ((liked_count > 0), is_followed, is_watched, is_commented)<|docstring|>:return: (whether some photos was liked, whether @username was followed during the interaction, whether stories were watched, whether was commented)<|endoftext|>
b71bb699fa03229b0fd3094a73fcb30053e52f1767b70e5f0b4302472915a622
def do_unfollow(device, my_username, username, storage, check_if_is_follower, username_view, follow_status_button_view, on_action): '\n :return: whether unfollow was successful\n ' need_to_go_back_to_list = True unfollow_from_list_chance = randint(1, 100) if ((follow_status_button_view is not None) and (not check_if_is_follower) and (unfollow_from_list_chance > 50)): need_to_go_back_to_list = False print('Unfollowing a profile directly from the following list.') follow_status_button_view.click() else: print('Unfollowing a profile from his profile page.') username_view.click() on_action(GetProfileAction(user=username)) sleeper.random_sleep() if_profile_empty = softban_indicator.detect_empty_profile(device) if if_profile_empty: print('Back to the followings list.') device.back() return False if (check_if_is_follower and _check_is_follower(device, username, my_username)): print((('Skip @' + username) + '. This user is following you.')) storage.update_follow_status(username, True, True) print('Back to the followings list.') device.back() return False unfollow_button = device.find(classNameMatches=TEXTVIEW_OR_BUTTON_REGEX, clickable=True, text='Following') if (not unfollow_button.exists()): print(((COLOR_FAIL + 'Cannot find Following button. Maybe not English language is set?') + COLOR_ENDC)) save_crash(device) switch_to_english(device) raise LanguageChangedException() print(f'Unfollowing @{username}...') unfollow_button.click() sleeper.random_sleep() confirm_unfollow_button = device.find(resourceId=f'{device.app_id}:id/follow_sheet_unfollow_row', className='android.widget.TextView') if (not confirm_unfollow_button.exists()): print(((COLOR_FAIL + 'Cannot confirm unfollow.') + COLOR_ENDC)) save_crash(device) device.back() return False confirm_unfollow_button.click() sleeper.random_sleep() _close_confirm_dialog_if_shown(device) softban_indicator.detect_action_blocked_dialog(device) if need_to_go_back_to_list: print('Back to the followings list.') device.back() return True
:return: whether unfollow was successful
insomniac/actions_impl.py
do_unfollow
davebaird/Insomniac
0
python
def do_unfollow(device, my_username, username, storage, check_if_is_follower, username_view, follow_status_button_view, on_action): '\n \n ' need_to_go_back_to_list = True unfollow_from_list_chance = randint(1, 100) if ((follow_status_button_view is not None) and (not check_if_is_follower) and (unfollow_from_list_chance > 50)): need_to_go_back_to_list = False print('Unfollowing a profile directly from the following list.') follow_status_button_view.click() else: print('Unfollowing a profile from his profile page.') username_view.click() on_action(GetProfileAction(user=username)) sleeper.random_sleep() if_profile_empty = softban_indicator.detect_empty_profile(device) if if_profile_empty: print('Back to the followings list.') device.back() return False if (check_if_is_follower and _check_is_follower(device, username, my_username)): print((('Skip @' + username) + '. This user is following you.')) storage.update_follow_status(username, True, True) print('Back to the followings list.') device.back() return False unfollow_button = device.find(classNameMatches=TEXTVIEW_OR_BUTTON_REGEX, clickable=True, text='Following') if (not unfollow_button.exists()): print(((COLOR_FAIL + 'Cannot find Following button. Maybe not English language is set?') + COLOR_ENDC)) save_crash(device) switch_to_english(device) raise LanguageChangedException() print(f'Unfollowing @{username}...') unfollow_button.click() sleeper.random_sleep() confirm_unfollow_button = device.find(resourceId=f'{device.app_id}:id/follow_sheet_unfollow_row', className='android.widget.TextView') if (not confirm_unfollow_button.exists()): print(((COLOR_FAIL + 'Cannot confirm unfollow.') + COLOR_ENDC)) save_crash(device) device.back() return False confirm_unfollow_button.click() sleeper.random_sleep() _close_confirm_dialog_if_shown(device) softban_indicator.detect_action_blocked_dialog(device) if need_to_go_back_to_list: print('Back to the followings list.') device.back() return True
def do_unfollow(device, my_username, username, storage, check_if_is_follower, username_view, follow_status_button_view, on_action): '\n \n ' need_to_go_back_to_list = True unfollow_from_list_chance = randint(1, 100) if ((follow_status_button_view is not None) and (not check_if_is_follower) and (unfollow_from_list_chance > 50)): need_to_go_back_to_list = False print('Unfollowing a profile directly from the following list.') follow_status_button_view.click() else: print('Unfollowing a profile from his profile page.') username_view.click() on_action(GetProfileAction(user=username)) sleeper.random_sleep() if_profile_empty = softban_indicator.detect_empty_profile(device) if if_profile_empty: print('Back to the followings list.') device.back() return False if (check_if_is_follower and _check_is_follower(device, username, my_username)): print((('Skip @' + username) + '. This user is following you.')) storage.update_follow_status(username, True, True) print('Back to the followings list.') device.back() return False unfollow_button = device.find(classNameMatches=TEXTVIEW_OR_BUTTON_REGEX, clickable=True, text='Following') if (not unfollow_button.exists()): print(((COLOR_FAIL + 'Cannot find Following button. Maybe not English language is set?') + COLOR_ENDC)) save_crash(device) switch_to_english(device) raise LanguageChangedException() print(f'Unfollowing @{username}...') unfollow_button.click() sleeper.random_sleep() confirm_unfollow_button = device.find(resourceId=f'{device.app_id}:id/follow_sheet_unfollow_row', className='android.widget.TextView') if (not confirm_unfollow_button.exists()): print(((COLOR_FAIL + 'Cannot confirm unfollow.') + COLOR_ENDC)) save_crash(device) device.back() return False confirm_unfollow_button.click() sleeper.random_sleep() _close_confirm_dialog_if_shown(device) softban_indicator.detect_action_blocked_dialog(device) if need_to_go_back_to_list: print('Back to the followings list.') device.back() return True<|docstring|>:return: whether unfollow was successful<|endoftext|>
b4ae759cb3b3b202b96f100bbc290d9e86ecb14fc6a961dd87eb3106f70bdd5e
def ineichen(apparent_zenith, airmass_absolute, linke_turbidity, altitude=0, dni_extra=1364.0, perez_enhancement=False): '\n Determine clear sky GHI, DNI, and DHI from Ineichen/Perez model.\n\n Implements the Ineichen and Perez clear sky model for global\n horizontal irradiance (GHI), direct normal irradiance (DNI), and\n calculates the clear-sky diffuse horizontal (DHI) component as the\n difference between GHI and DNI*cos(zenith) as presented in [1, 2]. A\n report on clear sky models found the Ineichen/Perez model to have\n excellent performance with a minimal input data set [3].\n\n Default values for monthly Linke turbidity provided by SoDa [4, 5].\n\n Parameters\n -----------\n apparent_zenith : numeric\n Refraction corrected solar zenith angle in degrees.\n\n airmass_absolute : numeric\n Pressure corrected airmass.\n\n linke_turbidity : numeric\n Linke Turbidity.\n\n altitude : numeric, default 0\n Altitude above sea level in meters.\n\n dni_extra : numeric, default 1364\n Extraterrestrial irradiance. The units of ``dni_extra``\n determine the units of the output.\n\n perez_enhancement : bool, default False\n Controls if the Perez enhancement factor should be applied.\n Setting to True may produce spurious results for times when\n the Sun is near the horizon and the airmass is high.\n See https://github.com/pvlib/pvlib-python/issues/435\n\n Returns\n -------\n clearsky : DataFrame (if Series input) or OrderedDict of arrays\n DataFrame/OrderedDict contains the columns/keys\n ``\'dhi\', \'dni\', \'ghi\'``.\n\n See also\n --------\n lookup_linke_turbidity\n pvlib.location.Location.get_clearsky\n\n References\n ----------\n .. [1] P. Ineichen and R. Perez, "A New airmass independent formulation for\n the Linke turbidity coefficient", Solar Energy, vol 73, pp. 151-157,\n 2002.\n\n .. [2] R. Perez et. al., "A New Operational Model for Satellite-Derived\n Irradiances: Description and Validation", Solar Energy, vol 73, pp.\n 307-317, 2002.\n\n .. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance\n Clear Sky Models: Implementation and Analysis", Sandia National\n Laboratories, SAND2012-2389, 2012.\n\n .. [4] http://www.soda-is.com/eng/services/climat_free_eng.php#c5 (obtained\n July 17, 2012).\n\n .. [5] J. Remund, et. al., "Worldwide Linke Turbidity Information", Proc.\n ISES Solar World Congress, June 2003. Goteborg, Sweden.\n ' cos_zenith = np.maximum(tools.cosd(apparent_zenith), 0) tl = linke_turbidity fh1 = np.exp(((- altitude) / 8000.0)) fh2 = np.exp(((- altitude) / 1250.0)) cg1 = ((5.09e-05 * altitude) + 0.868) cg2 = ((3.92e-05 * altitude) + 0.0387) ghi = np.exp((((- cg2) * airmass_absolute) * (fh1 + (fh2 * (tl - 1))))) if perez_enhancement: ghi *= np.exp((0.01 * (airmass_absolute ** 1.8))) ghi = (((((cg1 * dni_extra) * cos_zenith) * tl) / tl) * np.fmax(ghi, 0)) b = (0.664 + (0.163 / fh1)) bnci = (b * np.exp((((- 0.09) * airmass_absolute) * (tl - 1)))) bnci = (dni_extra * np.fmax(bnci, 0)) bnci_2 = ((1 - ((0.1 - (0.2 * np.exp((- tl)))) / (0.1 + (0.882 / fh1)))) / cos_zenith) bnci_2 = (ghi * np.fmin(np.fmax(bnci_2, 0), 1e+20)) dni = np.minimum(bnci, bnci_2) dhi = (ghi - (dni * cos_zenith)) irrads = OrderedDict() irrads['ghi'] = ghi irrads['dni'] = dni irrads['dhi'] = dhi if isinstance(dni, pd.Series): irrads = pd.DataFrame.from_dict(irrads) return irrads
Determine clear sky GHI, DNI, and DHI from Ineichen/Perez model. Implements the Ineichen and Perez clear sky model for global horizontal irradiance (GHI), direct normal irradiance (DNI), and calculates the clear-sky diffuse horizontal (DHI) component as the difference between GHI and DNI*cos(zenith) as presented in [1, 2]. A report on clear sky models found the Ineichen/Perez model to have excellent performance with a minimal input data set [3]. Default values for monthly Linke turbidity provided by SoDa [4, 5]. Parameters ----------- apparent_zenith : numeric Refraction corrected solar zenith angle in degrees. airmass_absolute : numeric Pressure corrected airmass. linke_turbidity : numeric Linke Turbidity. altitude : numeric, default 0 Altitude above sea level in meters. dni_extra : numeric, default 1364 Extraterrestrial irradiance. The units of ``dni_extra`` determine the units of the output. perez_enhancement : bool, default False Controls if the Perez enhancement factor should be applied. Setting to True may produce spurious results for times when the Sun is near the horizon and the airmass is high. See https://github.com/pvlib/pvlib-python/issues/435 Returns ------- clearsky : DataFrame (if Series input) or OrderedDict of arrays DataFrame/OrderedDict contains the columns/keys ``'dhi', 'dni', 'ghi'``. See also -------- lookup_linke_turbidity pvlib.location.Location.get_clearsky References ---------- .. [1] P. Ineichen and R. Perez, "A New airmass independent formulation for the Linke turbidity coefficient", Solar Energy, vol 73, pp. 151-157, 2002. .. [2] R. Perez et. al., "A New Operational Model for Satellite-Derived Irradiances: Description and Validation", Solar Energy, vol 73, pp. 307-317, 2002. .. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance Clear Sky Models: Implementation and Analysis", Sandia National Laboratories, SAND2012-2389, 2012. .. [4] http://www.soda-is.com/eng/services/climat_free_eng.php#c5 (obtained July 17, 2012). .. [5] J. Remund, et. al., "Worldwide Linke Turbidity Information", Proc. ISES Solar World Congress, June 2003. Goteborg, Sweden.
pvlib/clearsky.py
ineichen
Antoine-0/pvlib-python
695
python
def ineichen(apparent_zenith, airmass_absolute, linke_turbidity, altitude=0, dni_extra=1364.0, perez_enhancement=False): '\n Determine clear sky GHI, DNI, and DHI from Ineichen/Perez model.\n\n Implements the Ineichen and Perez clear sky model for global\n horizontal irradiance (GHI), direct normal irradiance (DNI), and\n calculates the clear-sky diffuse horizontal (DHI) component as the\n difference between GHI and DNI*cos(zenith) as presented in [1, 2]. A\n report on clear sky models found the Ineichen/Perez model to have\n excellent performance with a minimal input data set [3].\n\n Default values for monthly Linke turbidity provided by SoDa [4, 5].\n\n Parameters\n -----------\n apparent_zenith : numeric\n Refraction corrected solar zenith angle in degrees.\n\n airmass_absolute : numeric\n Pressure corrected airmass.\n\n linke_turbidity : numeric\n Linke Turbidity.\n\n altitude : numeric, default 0\n Altitude above sea level in meters.\n\n dni_extra : numeric, default 1364\n Extraterrestrial irradiance. The units of ``dni_extra``\n determine the units of the output.\n\n perez_enhancement : bool, default False\n Controls if the Perez enhancement factor should be applied.\n Setting to True may produce spurious results for times when\n the Sun is near the horizon and the airmass is high.\n See https://github.com/pvlib/pvlib-python/issues/435\n\n Returns\n -------\n clearsky : DataFrame (if Series input) or OrderedDict of arrays\n DataFrame/OrderedDict contains the columns/keys\n ``\'dhi\', \'dni\', \'ghi\'``.\n\n See also\n --------\n lookup_linke_turbidity\n pvlib.location.Location.get_clearsky\n\n References\n ----------\n .. [1] P. Ineichen and R. Perez, "A New airmass independent formulation for\n the Linke turbidity coefficient", Solar Energy, vol 73, pp. 151-157,\n 2002.\n\n .. [2] R. Perez et. al., "A New Operational Model for Satellite-Derived\n Irradiances: Description and Validation", Solar Energy, vol 73, pp.\n 307-317, 2002.\n\n .. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance\n Clear Sky Models: Implementation and Analysis", Sandia National\n Laboratories, SAND2012-2389, 2012.\n\n .. [4] http://www.soda-is.com/eng/services/climat_free_eng.php#c5 (obtained\n July 17, 2012).\n\n .. [5] J. Remund, et. al., "Worldwide Linke Turbidity Information", Proc.\n ISES Solar World Congress, June 2003. Goteborg, Sweden.\n ' cos_zenith = np.maximum(tools.cosd(apparent_zenith), 0) tl = linke_turbidity fh1 = np.exp(((- altitude) / 8000.0)) fh2 = np.exp(((- altitude) / 1250.0)) cg1 = ((5.09e-05 * altitude) + 0.868) cg2 = ((3.92e-05 * altitude) + 0.0387) ghi = np.exp((((- cg2) * airmass_absolute) * (fh1 + (fh2 * (tl - 1))))) if perez_enhancement: ghi *= np.exp((0.01 * (airmass_absolute ** 1.8))) ghi = (((((cg1 * dni_extra) * cos_zenith) * tl) / tl) * np.fmax(ghi, 0)) b = (0.664 + (0.163 / fh1)) bnci = (b * np.exp((((- 0.09) * airmass_absolute) * (tl - 1)))) bnci = (dni_extra * np.fmax(bnci, 0)) bnci_2 = ((1 - ((0.1 - (0.2 * np.exp((- tl)))) / (0.1 + (0.882 / fh1)))) / cos_zenith) bnci_2 = (ghi * np.fmin(np.fmax(bnci_2, 0), 1e+20)) dni = np.minimum(bnci, bnci_2) dhi = (ghi - (dni * cos_zenith)) irrads = OrderedDict() irrads['ghi'] = ghi irrads['dni'] = dni irrads['dhi'] = dhi if isinstance(dni, pd.Series): irrads = pd.DataFrame.from_dict(irrads) return irrads
def ineichen(apparent_zenith, airmass_absolute, linke_turbidity, altitude=0, dni_extra=1364.0, perez_enhancement=False): '\n Determine clear sky GHI, DNI, and DHI from Ineichen/Perez model.\n\n Implements the Ineichen and Perez clear sky model for global\n horizontal irradiance (GHI), direct normal irradiance (DNI), and\n calculates the clear-sky diffuse horizontal (DHI) component as the\n difference between GHI and DNI*cos(zenith) as presented in [1, 2]. A\n report on clear sky models found the Ineichen/Perez model to have\n excellent performance with a minimal input data set [3].\n\n Default values for monthly Linke turbidity provided by SoDa [4, 5].\n\n Parameters\n -----------\n apparent_zenith : numeric\n Refraction corrected solar zenith angle in degrees.\n\n airmass_absolute : numeric\n Pressure corrected airmass.\n\n linke_turbidity : numeric\n Linke Turbidity.\n\n altitude : numeric, default 0\n Altitude above sea level in meters.\n\n dni_extra : numeric, default 1364\n Extraterrestrial irradiance. The units of ``dni_extra``\n determine the units of the output.\n\n perez_enhancement : bool, default False\n Controls if the Perez enhancement factor should be applied.\n Setting to True may produce spurious results for times when\n the Sun is near the horizon and the airmass is high.\n See https://github.com/pvlib/pvlib-python/issues/435\n\n Returns\n -------\n clearsky : DataFrame (if Series input) or OrderedDict of arrays\n DataFrame/OrderedDict contains the columns/keys\n ``\'dhi\', \'dni\', \'ghi\'``.\n\n See also\n --------\n lookup_linke_turbidity\n pvlib.location.Location.get_clearsky\n\n References\n ----------\n .. [1] P. Ineichen and R. Perez, "A New airmass independent formulation for\n the Linke turbidity coefficient", Solar Energy, vol 73, pp. 151-157,\n 2002.\n\n .. [2] R. Perez et. al., "A New Operational Model for Satellite-Derived\n Irradiances: Description and Validation", Solar Energy, vol 73, pp.\n 307-317, 2002.\n\n .. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance\n Clear Sky Models: Implementation and Analysis", Sandia National\n Laboratories, SAND2012-2389, 2012.\n\n .. [4] http://www.soda-is.com/eng/services/climat_free_eng.php#c5 (obtained\n July 17, 2012).\n\n .. [5] J. Remund, et. al., "Worldwide Linke Turbidity Information", Proc.\n ISES Solar World Congress, June 2003. Goteborg, Sweden.\n ' cos_zenith = np.maximum(tools.cosd(apparent_zenith), 0) tl = linke_turbidity fh1 = np.exp(((- altitude) / 8000.0)) fh2 = np.exp(((- altitude) / 1250.0)) cg1 = ((5.09e-05 * altitude) + 0.868) cg2 = ((3.92e-05 * altitude) + 0.0387) ghi = np.exp((((- cg2) * airmass_absolute) * (fh1 + (fh2 * (tl - 1))))) if perez_enhancement: ghi *= np.exp((0.01 * (airmass_absolute ** 1.8))) ghi = (((((cg1 * dni_extra) * cos_zenith) * tl) / tl) * np.fmax(ghi, 0)) b = (0.664 + (0.163 / fh1)) bnci = (b * np.exp((((- 0.09) * airmass_absolute) * (tl - 1)))) bnci = (dni_extra * np.fmax(bnci, 0)) bnci_2 = ((1 - ((0.1 - (0.2 * np.exp((- tl)))) / (0.1 + (0.882 / fh1)))) / cos_zenith) bnci_2 = (ghi * np.fmin(np.fmax(bnci_2, 0), 1e+20)) dni = np.minimum(bnci, bnci_2) dhi = (ghi - (dni * cos_zenith)) irrads = OrderedDict() irrads['ghi'] = ghi irrads['dni'] = dni irrads['dhi'] = dhi if isinstance(dni, pd.Series): irrads = pd.DataFrame.from_dict(irrads) return irrads<|docstring|>Determine clear sky GHI, DNI, and DHI from Ineichen/Perez model. Implements the Ineichen and Perez clear sky model for global horizontal irradiance (GHI), direct normal irradiance (DNI), and calculates the clear-sky diffuse horizontal (DHI) component as the difference between GHI and DNI*cos(zenith) as presented in [1, 2]. A report on clear sky models found the Ineichen/Perez model to have excellent performance with a minimal input data set [3]. Default values for monthly Linke turbidity provided by SoDa [4, 5]. Parameters ----------- apparent_zenith : numeric Refraction corrected solar zenith angle in degrees. airmass_absolute : numeric Pressure corrected airmass. linke_turbidity : numeric Linke Turbidity. altitude : numeric, default 0 Altitude above sea level in meters. dni_extra : numeric, default 1364 Extraterrestrial irradiance. The units of ``dni_extra`` determine the units of the output. perez_enhancement : bool, default False Controls if the Perez enhancement factor should be applied. Setting to True may produce spurious results for times when the Sun is near the horizon and the airmass is high. See https://github.com/pvlib/pvlib-python/issues/435 Returns ------- clearsky : DataFrame (if Series input) or OrderedDict of arrays DataFrame/OrderedDict contains the columns/keys ``'dhi', 'dni', 'ghi'``. See also -------- lookup_linke_turbidity pvlib.location.Location.get_clearsky References ---------- .. [1] P. Ineichen and R. Perez, "A New airmass independent formulation for the Linke turbidity coefficient", Solar Energy, vol 73, pp. 151-157, 2002. .. [2] R. Perez et. al., "A New Operational Model for Satellite-Derived Irradiances: Description and Validation", Solar Energy, vol 73, pp. 307-317, 2002. .. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance Clear Sky Models: Implementation and Analysis", Sandia National Laboratories, SAND2012-2389, 2012. .. [4] http://www.soda-is.com/eng/services/climat_free_eng.php#c5 (obtained July 17, 2012). .. [5] J. Remund, et. al., "Worldwide Linke Turbidity Information", Proc. ISES Solar World Congress, June 2003. Goteborg, Sweden.<|endoftext|>
f47894dcc8a3fcdf34fa5a46e698c401b1f1ad0bcd86e532f3e8360c6a07ff06
def lookup_linke_turbidity(time, latitude, longitude, filepath=None, interp_turbidity=True): '\n Look up the Linke Turibidity from the ``LinkeTurbidities.h5``\n data file supplied with pvlib.\n\n Parameters\n ----------\n time : pandas.DatetimeIndex\n\n latitude : float or int\n\n longitude : float or int\n\n filepath : None or string, default None\n The path to the ``.h5`` file.\n\n interp_turbidity : bool, default True\n If ``True``, interpolates the monthly Linke turbidity values\n found in ``LinkeTurbidities.h5`` to daily values.\n\n Returns\n -------\n turbidity : Series\n ' if (filepath is None): pvlib_path = os.path.dirname(os.path.abspath(__file__)) filepath = os.path.join(pvlib_path, 'data', 'LinkeTurbidities.h5') latitude_index = _degrees_to_index(latitude, coordinate='latitude') longitude_index = _degrees_to_index(longitude, coordinate='longitude') with h5py.File(filepath, 'r') as lt_h5_file: lts = lt_h5_file['LinkeTurbidity'][(latitude_index, longitude_index)] if interp_turbidity: linke_turbidity = _interpolate_turbidity(lts, time) else: months = (time.month - 1) linke_turbidity = pd.Series(lts[months], index=time) linke_turbidity /= 20.0 return linke_turbidity
Look up the Linke Turibidity from the ``LinkeTurbidities.h5`` data file supplied with pvlib. Parameters ---------- time : pandas.DatetimeIndex latitude : float or int longitude : float or int filepath : None or string, default None The path to the ``.h5`` file. interp_turbidity : bool, default True If ``True``, interpolates the monthly Linke turbidity values found in ``LinkeTurbidities.h5`` to daily values. Returns ------- turbidity : Series
pvlib/clearsky.py
lookup_linke_turbidity
Antoine-0/pvlib-python
695
python
def lookup_linke_turbidity(time, latitude, longitude, filepath=None, interp_turbidity=True): '\n Look up the Linke Turibidity from the ``LinkeTurbidities.h5``\n data file supplied with pvlib.\n\n Parameters\n ----------\n time : pandas.DatetimeIndex\n\n latitude : float or int\n\n longitude : float or int\n\n filepath : None or string, default None\n The path to the ``.h5`` file.\n\n interp_turbidity : bool, default True\n If ``True``, interpolates the monthly Linke turbidity values\n found in ``LinkeTurbidities.h5`` to daily values.\n\n Returns\n -------\n turbidity : Series\n ' if (filepath is None): pvlib_path = os.path.dirname(os.path.abspath(__file__)) filepath = os.path.join(pvlib_path, 'data', 'LinkeTurbidities.h5') latitude_index = _degrees_to_index(latitude, coordinate='latitude') longitude_index = _degrees_to_index(longitude, coordinate='longitude') with h5py.File(filepath, 'r') as lt_h5_file: lts = lt_h5_file['LinkeTurbidity'][(latitude_index, longitude_index)] if interp_turbidity: linke_turbidity = _interpolate_turbidity(lts, time) else: months = (time.month - 1) linke_turbidity = pd.Series(lts[months], index=time) linke_turbidity /= 20.0 return linke_turbidity
def lookup_linke_turbidity(time, latitude, longitude, filepath=None, interp_turbidity=True): '\n Look up the Linke Turibidity from the ``LinkeTurbidities.h5``\n data file supplied with pvlib.\n\n Parameters\n ----------\n time : pandas.DatetimeIndex\n\n latitude : float or int\n\n longitude : float or int\n\n filepath : None or string, default None\n The path to the ``.h5`` file.\n\n interp_turbidity : bool, default True\n If ``True``, interpolates the monthly Linke turbidity values\n found in ``LinkeTurbidities.h5`` to daily values.\n\n Returns\n -------\n turbidity : Series\n ' if (filepath is None): pvlib_path = os.path.dirname(os.path.abspath(__file__)) filepath = os.path.join(pvlib_path, 'data', 'LinkeTurbidities.h5') latitude_index = _degrees_to_index(latitude, coordinate='latitude') longitude_index = _degrees_to_index(longitude, coordinate='longitude') with h5py.File(filepath, 'r') as lt_h5_file: lts = lt_h5_file['LinkeTurbidity'][(latitude_index, longitude_index)] if interp_turbidity: linke_turbidity = _interpolate_turbidity(lts, time) else: months = (time.month - 1) linke_turbidity = pd.Series(lts[months], index=time) linke_turbidity /= 20.0 return linke_turbidity<|docstring|>Look up the Linke Turibidity from the ``LinkeTurbidities.h5`` data file supplied with pvlib. Parameters ---------- time : pandas.DatetimeIndex latitude : float or int longitude : float or int filepath : None or string, default None The path to the ``.h5`` file. interp_turbidity : bool, default True If ``True``, interpolates the monthly Linke turbidity values found in ``LinkeTurbidities.h5`` to daily values. Returns ------- turbidity : Series<|endoftext|>
ff346fb4c9c3a9691629e4c2ed0d78b763fc0582973b268c0c1fe1f46979a939
def _is_leap_year(year): 'Determine if a year is leap year.\n\n Parameters\n ----------\n year : numeric\n\n Returns\n -------\n isleap : array of bools\n ' isleap = ((np.mod(year, 4) == 0) & ((np.mod(year, 100) != 0) | (np.mod(year, 400) == 0))) return isleap
Determine if a year is leap year. Parameters ---------- year : numeric Returns ------- isleap : array of bools
pvlib/clearsky.py
_is_leap_year
Antoine-0/pvlib-python
695
python
def _is_leap_year(year): 'Determine if a year is leap year.\n\n Parameters\n ----------\n year : numeric\n\n Returns\n -------\n isleap : array of bools\n ' isleap = ((np.mod(year, 4) == 0) & ((np.mod(year, 100) != 0) | (np.mod(year, 400) == 0))) return isleap
def _is_leap_year(year): 'Determine if a year is leap year.\n\n Parameters\n ----------\n year : numeric\n\n Returns\n -------\n isleap : array of bools\n ' isleap = ((np.mod(year, 4) == 0) & ((np.mod(year, 100) != 0) | (np.mod(year, 400) == 0))) return isleap<|docstring|>Determine if a year is leap year. Parameters ---------- year : numeric Returns ------- isleap : array of bools<|endoftext|>
705597f2634249965d1f729570e0e3b192efb36a0871f7a77aed41927136886d
def _interpolate_turbidity(lts, time): '\n Interpolated monthly Linke turbidity onto daily values.\n\n Parameters\n ----------\n lts : np.array\n Monthly Linke turbidity values.\n time : pd.DatetimeIndex\n Times to be interpolated onto.\n\n Returns\n -------\n linke_turbidity : pd.Series\n The interpolated turbidity.\n ' lts_concat = np.concatenate([[lts[(- 1)]], lts, [lts[0]]]) try: isleap = time.is_leap_year except AttributeError: year = time.year isleap = _is_leap_year(year) dayofyear = time.dayofyear days_leap = _calendar_month_middles(2016) days_no_leap = _calendar_month_middles(2015) lt_leap = np.interp(dayofyear, days_leap, lts_concat) lt_no_leap = np.interp(dayofyear, days_no_leap, lts_concat) linke_turbidity = np.where(isleap, lt_leap, lt_no_leap) linke_turbidity = pd.Series(linke_turbidity, index=time) return linke_turbidity
Interpolated monthly Linke turbidity onto daily values. Parameters ---------- lts : np.array Monthly Linke turbidity values. time : pd.DatetimeIndex Times to be interpolated onto. Returns ------- linke_turbidity : pd.Series The interpolated turbidity.
pvlib/clearsky.py
_interpolate_turbidity
Antoine-0/pvlib-python
695
python
def _interpolate_turbidity(lts, time): '\n Interpolated monthly Linke turbidity onto daily values.\n\n Parameters\n ----------\n lts : np.array\n Monthly Linke turbidity values.\n time : pd.DatetimeIndex\n Times to be interpolated onto.\n\n Returns\n -------\n linke_turbidity : pd.Series\n The interpolated turbidity.\n ' lts_concat = np.concatenate([[lts[(- 1)]], lts, [lts[0]]]) try: isleap = time.is_leap_year except AttributeError: year = time.year isleap = _is_leap_year(year) dayofyear = time.dayofyear days_leap = _calendar_month_middles(2016) days_no_leap = _calendar_month_middles(2015) lt_leap = np.interp(dayofyear, days_leap, lts_concat) lt_no_leap = np.interp(dayofyear, days_no_leap, lts_concat) linke_turbidity = np.where(isleap, lt_leap, lt_no_leap) linke_turbidity = pd.Series(linke_turbidity, index=time) return linke_turbidity
def _interpolate_turbidity(lts, time): '\n Interpolated monthly Linke turbidity onto daily values.\n\n Parameters\n ----------\n lts : np.array\n Monthly Linke turbidity values.\n time : pd.DatetimeIndex\n Times to be interpolated onto.\n\n Returns\n -------\n linke_turbidity : pd.Series\n The interpolated turbidity.\n ' lts_concat = np.concatenate([[lts[(- 1)]], lts, [lts[0]]]) try: isleap = time.is_leap_year except AttributeError: year = time.year isleap = _is_leap_year(year) dayofyear = time.dayofyear days_leap = _calendar_month_middles(2016) days_no_leap = _calendar_month_middles(2015) lt_leap = np.interp(dayofyear, days_leap, lts_concat) lt_no_leap = np.interp(dayofyear, days_no_leap, lts_concat) linke_turbidity = np.where(isleap, lt_leap, lt_no_leap) linke_turbidity = pd.Series(linke_turbidity, index=time) return linke_turbidity<|docstring|>Interpolated monthly Linke turbidity onto daily values. Parameters ---------- lts : np.array Monthly Linke turbidity values. time : pd.DatetimeIndex Times to be interpolated onto. Returns ------- linke_turbidity : pd.Series The interpolated turbidity.<|endoftext|>
4fcda9501b89799343d63565babdd7e91bec2778c1760221477f313a45838df3
def _calendar_month_middles(year): 'List of middle day of each month, used by Linke turbidity lookup' mdays = np.array(calendar.mdays[1:]) ydays = 365 if calendar.isleap(year): mdays[1] = (mdays[1] + 1) ydays = 366 middles = np.concatenate([[((- calendar.mdays[(- 1)]) / 2.0)], (np.cumsum(mdays) - (np.array(mdays) / 2.0)), [(ydays + (calendar.mdays[1] / 2.0))]]) return middles
List of middle day of each month, used by Linke turbidity lookup
pvlib/clearsky.py
_calendar_month_middles
Antoine-0/pvlib-python
695
python
def _calendar_month_middles(year): mdays = np.array(calendar.mdays[1:]) ydays = 365 if calendar.isleap(year): mdays[1] = (mdays[1] + 1) ydays = 366 middles = np.concatenate([[((- calendar.mdays[(- 1)]) / 2.0)], (np.cumsum(mdays) - (np.array(mdays) / 2.0)), [(ydays + (calendar.mdays[1] / 2.0))]]) return middles
def _calendar_month_middles(year): mdays = np.array(calendar.mdays[1:]) ydays = 365 if calendar.isleap(year): mdays[1] = (mdays[1] + 1) ydays = 366 middles = np.concatenate([[((- calendar.mdays[(- 1)]) / 2.0)], (np.cumsum(mdays) - (np.array(mdays) / 2.0)), [(ydays + (calendar.mdays[1] / 2.0))]]) return middles<|docstring|>List of middle day of each month, used by Linke turbidity lookup<|endoftext|>
f9768d351408a1da6805b9a2f858880459c5e8d215b388c7f5f16f8f4df20b33
def _degrees_to_index(degrees, coordinate): "Transform input degrees to an output index integer. The Linke\n turbidity lookup tables have three dimensions, latitude, longitude, and\n month. Specify a degree value and either 'latitude' or 'longitude' to get\n the appropriate index number for the first two of these index numbers.\n\n Parameters\n ----------\n degrees : float or int\n Degrees of either latitude or longitude.\n coordinate : string\n Specify whether degrees arg is latitude or longitude. Must be set to\n either 'latitude' or 'longitude' or an error will be raised.\n\n Returns\n -------\n index : np.int16\n The latitude or longitude index number to use when looking up values\n in the Linke turbidity lookup table.\n " if (coordinate == 'latitude'): inputmin = 90 inputmax = (- 90) outputmax = 2160 elif (coordinate == 'longitude'): inputmin = (- 180) inputmax = 180 outputmax = 4320 else: raise IndexError("coordinate must be 'latitude' or 'longitude'.") inputrange = (inputmax - inputmin) scale = (outputmax / inputrange) center = (inputmin + ((1 / scale) / 2)) outputmax -= 1 index = ((degrees - center) * scale) err = IndexError(('Input, %g, is out of range (%g, %g).' % (degrees, inputmin, inputmax))) if (index > outputmax): if ((index - outputmax) <= 0.500001): index = outputmax else: raise err elif (index < 0): if ((- index) <= 0.500001): index = 0 else: raise err else: index = int(np.around(index)) return index
Transform input degrees to an output index integer. The Linke turbidity lookup tables have three dimensions, latitude, longitude, and month. Specify a degree value and either 'latitude' or 'longitude' to get the appropriate index number for the first two of these index numbers. Parameters ---------- degrees : float or int Degrees of either latitude or longitude. coordinate : string Specify whether degrees arg is latitude or longitude. Must be set to either 'latitude' or 'longitude' or an error will be raised. Returns ------- index : np.int16 The latitude or longitude index number to use when looking up values in the Linke turbidity lookup table.
pvlib/clearsky.py
_degrees_to_index
Antoine-0/pvlib-python
695
python
def _degrees_to_index(degrees, coordinate): "Transform input degrees to an output index integer. The Linke\n turbidity lookup tables have three dimensions, latitude, longitude, and\n month. Specify a degree value and either 'latitude' or 'longitude' to get\n the appropriate index number for the first two of these index numbers.\n\n Parameters\n ----------\n degrees : float or int\n Degrees of either latitude or longitude.\n coordinate : string\n Specify whether degrees arg is latitude or longitude. Must be set to\n either 'latitude' or 'longitude' or an error will be raised.\n\n Returns\n -------\n index : np.int16\n The latitude or longitude index number to use when looking up values\n in the Linke turbidity lookup table.\n " if (coordinate == 'latitude'): inputmin = 90 inputmax = (- 90) outputmax = 2160 elif (coordinate == 'longitude'): inputmin = (- 180) inputmax = 180 outputmax = 4320 else: raise IndexError("coordinate must be 'latitude' or 'longitude'.") inputrange = (inputmax - inputmin) scale = (outputmax / inputrange) center = (inputmin + ((1 / scale) / 2)) outputmax -= 1 index = ((degrees - center) * scale) err = IndexError(('Input, %g, is out of range (%g, %g).' % (degrees, inputmin, inputmax))) if (index > outputmax): if ((index - outputmax) <= 0.500001): index = outputmax else: raise err elif (index < 0): if ((- index) <= 0.500001): index = 0 else: raise err else: index = int(np.around(index)) return index
def _degrees_to_index(degrees, coordinate): "Transform input degrees to an output index integer. The Linke\n turbidity lookup tables have three dimensions, latitude, longitude, and\n month. Specify a degree value and either 'latitude' or 'longitude' to get\n the appropriate index number for the first two of these index numbers.\n\n Parameters\n ----------\n degrees : float or int\n Degrees of either latitude or longitude.\n coordinate : string\n Specify whether degrees arg is latitude or longitude. Must be set to\n either 'latitude' or 'longitude' or an error will be raised.\n\n Returns\n -------\n index : np.int16\n The latitude or longitude index number to use when looking up values\n in the Linke turbidity lookup table.\n " if (coordinate == 'latitude'): inputmin = 90 inputmax = (- 90) outputmax = 2160 elif (coordinate == 'longitude'): inputmin = (- 180) inputmax = 180 outputmax = 4320 else: raise IndexError("coordinate must be 'latitude' or 'longitude'.") inputrange = (inputmax - inputmin) scale = (outputmax / inputrange) center = (inputmin + ((1 / scale) / 2)) outputmax -= 1 index = ((degrees - center) * scale) err = IndexError(('Input, %g, is out of range (%g, %g).' % (degrees, inputmin, inputmax))) if (index > outputmax): if ((index - outputmax) <= 0.500001): index = outputmax else: raise err elif (index < 0): if ((- index) <= 0.500001): index = 0 else: raise err else: index = int(np.around(index)) return index<|docstring|>Transform input degrees to an output index integer. The Linke turbidity lookup tables have three dimensions, latitude, longitude, and month. Specify a degree value and either 'latitude' or 'longitude' to get the appropriate index number for the first two of these index numbers. Parameters ---------- degrees : float or int Degrees of either latitude or longitude. coordinate : string Specify whether degrees arg is latitude or longitude. Must be set to either 'latitude' or 'longitude' or an error will be raised. Returns ------- index : np.int16 The latitude or longitude index number to use when looking up values in the Linke turbidity lookup table.<|endoftext|>
5f22c1a8b094e14d55ba2d4bc872419bc0c8f9e937ce2ac9ed444bfe92c5acf1
def haurwitz(apparent_zenith): '\n Determine clear sky GHI using the Haurwitz model.\n\n Implements the Haurwitz clear sky model for global horizontal\n irradiance (GHI) as presented in [1, 2]. A report on clear\n sky models found the Haurwitz model to have the best performance\n in terms of average monthly error among models which require only\n zenith angle [3].\n\n Parameters\n ----------\n apparent_zenith : Series\n The apparent (refraction corrected) sun zenith angle\n in degrees.\n\n Returns\n -------\n ghi : DataFrame\n The modeled global horizonal irradiance in W/m^2 provided\n by the Haurwitz clear-sky model.\n\n References\n ----------\n\n .. [1] B. Haurwitz, "Insolation in Relation to Cloudiness and Cloud\n Density," Journal of Meteorology, vol. 2, pp. 154-166, 1945.\n\n .. [2] B. Haurwitz, "Insolation in Relation to Cloud Type," Journal of\n Meteorology, vol. 3, pp. 123-124, 1946.\n\n .. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance\n Clear Sky Models: Implementation and Analysis", Sandia National\n Laboratories, SAND2012-2389, 2012.\n ' cos_zenith = tools.cosd(apparent_zenith.values) clearsky_ghi = np.zeros_like(apparent_zenith.values) cos_zen_gte_0 = (cos_zenith > 0) clearsky_ghi[cos_zen_gte_0] = ((1098.0 * cos_zenith[cos_zen_gte_0]) * np.exp(((- 0.059) / cos_zenith[cos_zen_gte_0]))) df_out = pd.DataFrame(index=apparent_zenith.index, data=clearsky_ghi, columns=['ghi']) return df_out
Determine clear sky GHI using the Haurwitz model. Implements the Haurwitz clear sky model for global horizontal irradiance (GHI) as presented in [1, 2]. A report on clear sky models found the Haurwitz model to have the best performance in terms of average monthly error among models which require only zenith angle [3]. Parameters ---------- apparent_zenith : Series The apparent (refraction corrected) sun zenith angle in degrees. Returns ------- ghi : DataFrame The modeled global horizonal irradiance in W/m^2 provided by the Haurwitz clear-sky model. References ---------- .. [1] B. Haurwitz, "Insolation in Relation to Cloudiness and Cloud Density," Journal of Meteorology, vol. 2, pp. 154-166, 1945. .. [2] B. Haurwitz, "Insolation in Relation to Cloud Type," Journal of Meteorology, vol. 3, pp. 123-124, 1946. .. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance Clear Sky Models: Implementation and Analysis", Sandia National Laboratories, SAND2012-2389, 2012.
pvlib/clearsky.py
haurwitz
Antoine-0/pvlib-python
695
python
def haurwitz(apparent_zenith): '\n Determine clear sky GHI using the Haurwitz model.\n\n Implements the Haurwitz clear sky model for global horizontal\n irradiance (GHI) as presented in [1, 2]. A report on clear\n sky models found the Haurwitz model to have the best performance\n in terms of average monthly error among models which require only\n zenith angle [3].\n\n Parameters\n ----------\n apparent_zenith : Series\n The apparent (refraction corrected) sun zenith angle\n in degrees.\n\n Returns\n -------\n ghi : DataFrame\n The modeled global horizonal irradiance in W/m^2 provided\n by the Haurwitz clear-sky model.\n\n References\n ----------\n\n .. [1] B. Haurwitz, "Insolation in Relation to Cloudiness and Cloud\n Density," Journal of Meteorology, vol. 2, pp. 154-166, 1945.\n\n .. [2] B. Haurwitz, "Insolation in Relation to Cloud Type," Journal of\n Meteorology, vol. 3, pp. 123-124, 1946.\n\n .. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance\n Clear Sky Models: Implementation and Analysis", Sandia National\n Laboratories, SAND2012-2389, 2012.\n ' cos_zenith = tools.cosd(apparent_zenith.values) clearsky_ghi = np.zeros_like(apparent_zenith.values) cos_zen_gte_0 = (cos_zenith > 0) clearsky_ghi[cos_zen_gte_0] = ((1098.0 * cos_zenith[cos_zen_gte_0]) * np.exp(((- 0.059) / cos_zenith[cos_zen_gte_0]))) df_out = pd.DataFrame(index=apparent_zenith.index, data=clearsky_ghi, columns=['ghi']) return df_out
def haurwitz(apparent_zenith): '\n Determine clear sky GHI using the Haurwitz model.\n\n Implements the Haurwitz clear sky model for global horizontal\n irradiance (GHI) as presented in [1, 2]. A report on clear\n sky models found the Haurwitz model to have the best performance\n in terms of average monthly error among models which require only\n zenith angle [3].\n\n Parameters\n ----------\n apparent_zenith : Series\n The apparent (refraction corrected) sun zenith angle\n in degrees.\n\n Returns\n -------\n ghi : DataFrame\n The modeled global horizonal irradiance in W/m^2 provided\n by the Haurwitz clear-sky model.\n\n References\n ----------\n\n .. [1] B. Haurwitz, "Insolation in Relation to Cloudiness and Cloud\n Density," Journal of Meteorology, vol. 2, pp. 154-166, 1945.\n\n .. [2] B. Haurwitz, "Insolation in Relation to Cloud Type," Journal of\n Meteorology, vol. 3, pp. 123-124, 1946.\n\n .. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance\n Clear Sky Models: Implementation and Analysis", Sandia National\n Laboratories, SAND2012-2389, 2012.\n ' cos_zenith = tools.cosd(apparent_zenith.values) clearsky_ghi = np.zeros_like(apparent_zenith.values) cos_zen_gte_0 = (cos_zenith > 0) clearsky_ghi[cos_zen_gte_0] = ((1098.0 * cos_zenith[cos_zen_gte_0]) * np.exp(((- 0.059) / cos_zenith[cos_zen_gte_0]))) df_out = pd.DataFrame(index=apparent_zenith.index, data=clearsky_ghi, columns=['ghi']) return df_out<|docstring|>Determine clear sky GHI using the Haurwitz model. Implements the Haurwitz clear sky model for global horizontal irradiance (GHI) as presented in [1, 2]. A report on clear sky models found the Haurwitz model to have the best performance in terms of average monthly error among models which require only zenith angle [3]. Parameters ---------- apparent_zenith : Series The apparent (refraction corrected) sun zenith angle in degrees. Returns ------- ghi : DataFrame The modeled global horizonal irradiance in W/m^2 provided by the Haurwitz clear-sky model. References ---------- .. [1] B. Haurwitz, "Insolation in Relation to Cloudiness and Cloud Density," Journal of Meteorology, vol. 2, pp. 154-166, 1945. .. [2] B. Haurwitz, "Insolation in Relation to Cloud Type," Journal of Meteorology, vol. 3, pp. 123-124, 1946. .. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance Clear Sky Models: Implementation and Analysis", Sandia National Laboratories, SAND2012-2389, 2012.<|endoftext|>
cfa06ec53980286c741bfd8950b55192bd97df60ace15612654bdbb0a5ed3cf5
def simplified_solis(apparent_elevation, aod700=0.1, precipitable_water=1.0, pressure=101325.0, dni_extra=1364.0): '\n Calculate the clear sky GHI, DNI, and DHI according to the\n simplified Solis model.\n\n Reference [1]_ describes the accuracy of the model as being 15, 20,\n and 18 W/m^2 for the beam, global, and diffuse components. Reference\n [2]_ provides comparisons with other clear sky models.\n\n Parameters\n ----------\n apparent_elevation : numeric\n The apparent elevation of the sun above the horizon (deg).\n\n aod700 : numeric, default 0.1\n The aerosol optical depth at 700 nm (unitless).\n Algorithm derived for values between 0 and 0.45.\n\n precipitable_water : numeric, default 1.0\n The precipitable water of the atmosphere (cm).\n Algorithm derived for values between 0.2 and 10 cm.\n Values less than 0.2 will be assumed to be equal to 0.2.\n\n pressure : numeric, default 101325.0\n The atmospheric pressure (Pascals).\n Algorithm derived for altitudes between sea level and 7000 m,\n or 101325 and 41000 Pascals.\n\n dni_extra : numeric, default 1364.0\n Extraterrestrial irradiance. The units of ``dni_extra``\n determine the units of the output.\n\n Returns\n -------\n clearsky : DataFrame (if Series input) or OrderedDict of arrays\n DataFrame/OrderedDict contains the columns/keys\n ``\'dhi\', \'dni\', \'ghi\'``.\n\n References\n ----------\n .. [1] P. Ineichen, "A broadband simplified version of the\n Solis clear sky model," Solar Energy, 82, 758-762 (2008).\n\n .. [2] P. Ineichen, "Validation of models that estimate the clear\n sky global and beam solar irradiance," Solar Energy, 132,\n 332-344 (2016).\n ' p = pressure w = precipitable_water w = np.maximum(w, 0.2) i0p = _calc_i0p(dni_extra, w, aod700, p) taub = _calc_taub(w, aod700, p) b = _calc_b(w, aod700) taug = _calc_taug(w, aod700, p) g = _calc_g(w, aod700) taud = _calc_taud(w, aod700, p) d = _calc_d(aod700, p) sin_elev = np.maximum(1e-30, np.sin(np.radians(apparent_elevation))) dni = (i0p * np.exp(((- taub) / (sin_elev ** b)))) ghi = ((i0p * np.exp(((- taug) / (sin_elev ** g)))) * sin_elev) dhi = (i0p * np.exp(((- taud) / (sin_elev ** d)))) irrads = OrderedDict() irrads['ghi'] = ghi irrads['dni'] = dni irrads['dhi'] = dhi if isinstance(dni, pd.Series): irrads = pd.DataFrame.from_dict(irrads) return irrads
Calculate the clear sky GHI, DNI, and DHI according to the simplified Solis model. Reference [1]_ describes the accuracy of the model as being 15, 20, and 18 W/m^2 for the beam, global, and diffuse components. Reference [2]_ provides comparisons with other clear sky models. Parameters ---------- apparent_elevation : numeric The apparent elevation of the sun above the horizon (deg). aod700 : numeric, default 0.1 The aerosol optical depth at 700 nm (unitless). Algorithm derived for values between 0 and 0.45. precipitable_water : numeric, default 1.0 The precipitable water of the atmosphere (cm). Algorithm derived for values between 0.2 and 10 cm. Values less than 0.2 will be assumed to be equal to 0.2. pressure : numeric, default 101325.0 The atmospheric pressure (Pascals). Algorithm derived for altitudes between sea level and 7000 m, or 101325 and 41000 Pascals. dni_extra : numeric, default 1364.0 Extraterrestrial irradiance. The units of ``dni_extra`` determine the units of the output. Returns ------- clearsky : DataFrame (if Series input) or OrderedDict of arrays DataFrame/OrderedDict contains the columns/keys ``'dhi', 'dni', 'ghi'``. References ---------- .. [1] P. Ineichen, "A broadband simplified version of the Solis clear sky model," Solar Energy, 82, 758-762 (2008). .. [2] P. Ineichen, "Validation of models that estimate the clear sky global and beam solar irradiance," Solar Energy, 132, 332-344 (2016).
pvlib/clearsky.py
simplified_solis
Antoine-0/pvlib-python
695
python
def simplified_solis(apparent_elevation, aod700=0.1, precipitable_water=1.0, pressure=101325.0, dni_extra=1364.0): '\n Calculate the clear sky GHI, DNI, and DHI according to the\n simplified Solis model.\n\n Reference [1]_ describes the accuracy of the model as being 15, 20,\n and 18 W/m^2 for the beam, global, and diffuse components. Reference\n [2]_ provides comparisons with other clear sky models.\n\n Parameters\n ----------\n apparent_elevation : numeric\n The apparent elevation of the sun above the horizon (deg).\n\n aod700 : numeric, default 0.1\n The aerosol optical depth at 700 nm (unitless).\n Algorithm derived for values between 0 and 0.45.\n\n precipitable_water : numeric, default 1.0\n The precipitable water of the atmosphere (cm).\n Algorithm derived for values between 0.2 and 10 cm.\n Values less than 0.2 will be assumed to be equal to 0.2.\n\n pressure : numeric, default 101325.0\n The atmospheric pressure (Pascals).\n Algorithm derived for altitudes between sea level and 7000 m,\n or 101325 and 41000 Pascals.\n\n dni_extra : numeric, default 1364.0\n Extraterrestrial irradiance. The units of ``dni_extra``\n determine the units of the output.\n\n Returns\n -------\n clearsky : DataFrame (if Series input) or OrderedDict of arrays\n DataFrame/OrderedDict contains the columns/keys\n ``\'dhi\', \'dni\', \'ghi\'``.\n\n References\n ----------\n .. [1] P. Ineichen, "A broadband simplified version of the\n Solis clear sky model," Solar Energy, 82, 758-762 (2008).\n\n .. [2] P. Ineichen, "Validation of models that estimate the clear\n sky global and beam solar irradiance," Solar Energy, 132,\n 332-344 (2016).\n ' p = pressure w = precipitable_water w = np.maximum(w, 0.2) i0p = _calc_i0p(dni_extra, w, aod700, p) taub = _calc_taub(w, aod700, p) b = _calc_b(w, aod700) taug = _calc_taug(w, aod700, p) g = _calc_g(w, aod700) taud = _calc_taud(w, aod700, p) d = _calc_d(aod700, p) sin_elev = np.maximum(1e-30, np.sin(np.radians(apparent_elevation))) dni = (i0p * np.exp(((- taub) / (sin_elev ** b)))) ghi = ((i0p * np.exp(((- taug) / (sin_elev ** g)))) * sin_elev) dhi = (i0p * np.exp(((- taud) / (sin_elev ** d)))) irrads = OrderedDict() irrads['ghi'] = ghi irrads['dni'] = dni irrads['dhi'] = dhi if isinstance(dni, pd.Series): irrads = pd.DataFrame.from_dict(irrads) return irrads
def simplified_solis(apparent_elevation, aod700=0.1, precipitable_water=1.0, pressure=101325.0, dni_extra=1364.0): '\n Calculate the clear sky GHI, DNI, and DHI according to the\n simplified Solis model.\n\n Reference [1]_ describes the accuracy of the model as being 15, 20,\n and 18 W/m^2 for the beam, global, and diffuse components. Reference\n [2]_ provides comparisons with other clear sky models.\n\n Parameters\n ----------\n apparent_elevation : numeric\n The apparent elevation of the sun above the horizon (deg).\n\n aod700 : numeric, default 0.1\n The aerosol optical depth at 700 nm (unitless).\n Algorithm derived for values between 0 and 0.45.\n\n precipitable_water : numeric, default 1.0\n The precipitable water of the atmosphere (cm).\n Algorithm derived for values between 0.2 and 10 cm.\n Values less than 0.2 will be assumed to be equal to 0.2.\n\n pressure : numeric, default 101325.0\n The atmospheric pressure (Pascals).\n Algorithm derived for altitudes between sea level and 7000 m,\n or 101325 and 41000 Pascals.\n\n dni_extra : numeric, default 1364.0\n Extraterrestrial irradiance. The units of ``dni_extra``\n determine the units of the output.\n\n Returns\n -------\n clearsky : DataFrame (if Series input) or OrderedDict of arrays\n DataFrame/OrderedDict contains the columns/keys\n ``\'dhi\', \'dni\', \'ghi\'``.\n\n References\n ----------\n .. [1] P. Ineichen, "A broadband simplified version of the\n Solis clear sky model," Solar Energy, 82, 758-762 (2008).\n\n .. [2] P. Ineichen, "Validation of models that estimate the clear\n sky global and beam solar irradiance," Solar Energy, 132,\n 332-344 (2016).\n ' p = pressure w = precipitable_water w = np.maximum(w, 0.2) i0p = _calc_i0p(dni_extra, w, aod700, p) taub = _calc_taub(w, aod700, p) b = _calc_b(w, aod700) taug = _calc_taug(w, aod700, p) g = _calc_g(w, aod700) taud = _calc_taud(w, aod700, p) d = _calc_d(aod700, p) sin_elev = np.maximum(1e-30, np.sin(np.radians(apparent_elevation))) dni = (i0p * np.exp(((- taub) / (sin_elev ** b)))) ghi = ((i0p * np.exp(((- taug) / (sin_elev ** g)))) * sin_elev) dhi = (i0p * np.exp(((- taud) / (sin_elev ** d)))) irrads = OrderedDict() irrads['ghi'] = ghi irrads['dni'] = dni irrads['dhi'] = dhi if isinstance(dni, pd.Series): irrads = pd.DataFrame.from_dict(irrads) return irrads<|docstring|>Calculate the clear sky GHI, DNI, and DHI according to the simplified Solis model. Reference [1]_ describes the accuracy of the model as being 15, 20, and 18 W/m^2 for the beam, global, and diffuse components. Reference [2]_ provides comparisons with other clear sky models. Parameters ---------- apparent_elevation : numeric The apparent elevation of the sun above the horizon (deg). aod700 : numeric, default 0.1 The aerosol optical depth at 700 nm (unitless). Algorithm derived for values between 0 and 0.45. precipitable_water : numeric, default 1.0 The precipitable water of the atmosphere (cm). Algorithm derived for values between 0.2 and 10 cm. Values less than 0.2 will be assumed to be equal to 0.2. pressure : numeric, default 101325.0 The atmospheric pressure (Pascals). Algorithm derived for altitudes between sea level and 7000 m, or 101325 and 41000 Pascals. dni_extra : numeric, default 1364.0 Extraterrestrial irradiance. The units of ``dni_extra`` determine the units of the output. Returns ------- clearsky : DataFrame (if Series input) or OrderedDict of arrays DataFrame/OrderedDict contains the columns/keys ``'dhi', 'dni', 'ghi'``. References ---------- .. [1] P. Ineichen, "A broadband simplified version of the Solis clear sky model," Solar Energy, 82, 758-762 (2008). .. [2] P. Ineichen, "Validation of models that estimate the clear sky global and beam solar irradiance," Solar Energy, 132, 332-344 (2016).<|endoftext|>
fb97298b16fe0faa5fd31302097ee1d8ef9f74b4aa5b5a25c71fc00c865b2f48
def _calc_i0p(i0, w, aod700, p): 'Calculate the "enhanced extraterrestrial irradiance".' p0 = 101325.0 io0 = (1.08 * (w ** 0.0051)) i01 = (0.97 * (w ** 0.032)) i02 = (0.12 * (w ** 0.56)) i0p = (i0 * ((((i02 * (aod700 ** 2)) + (i01 * aod700)) + io0) + (0.071 * np.log((p / p0))))) return i0p
Calculate the "enhanced extraterrestrial irradiance".
pvlib/clearsky.py
_calc_i0p
Antoine-0/pvlib-python
695
python
def _calc_i0p(i0, w, aod700, p): p0 = 101325.0 io0 = (1.08 * (w ** 0.0051)) i01 = (0.97 * (w ** 0.032)) i02 = (0.12 * (w ** 0.56)) i0p = (i0 * ((((i02 * (aod700 ** 2)) + (i01 * aod700)) + io0) + (0.071 * np.log((p / p0))))) return i0p
def _calc_i0p(i0, w, aod700, p): p0 = 101325.0 io0 = (1.08 * (w ** 0.0051)) i01 = (0.97 * (w ** 0.032)) i02 = (0.12 * (w ** 0.56)) i0p = (i0 * ((((i02 * (aod700 ** 2)) + (i01 * aod700)) + io0) + (0.071 * np.log((p / p0))))) return i0p<|docstring|>Calculate the "enhanced extraterrestrial irradiance".<|endoftext|>
749607145f0742a24dcc2f8418f7d5e12d18b05eb87880d294c6dd42cf50dadc
def _calc_taub(w, aod700, p): 'Calculate the taub coefficient' p0 = 101325.0 tb1 = ((1.82 + (0.056 * np.log(w))) + (0.0071 * (np.log(w) ** 2))) tb0 = ((0.33 + (0.045 * np.log(w))) + (0.0096 * (np.log(w) ** 2))) tbp = ((0.0089 * w) + 0.13) taub = (((tb1 * aod700) + tb0) + (tbp * np.log((p / p0)))) return taub
Calculate the taub coefficient
pvlib/clearsky.py
_calc_taub
Antoine-0/pvlib-python
695
python
def _calc_taub(w, aod700, p): p0 = 101325.0 tb1 = ((1.82 + (0.056 * np.log(w))) + (0.0071 * (np.log(w) ** 2))) tb0 = ((0.33 + (0.045 * np.log(w))) + (0.0096 * (np.log(w) ** 2))) tbp = ((0.0089 * w) + 0.13) taub = (((tb1 * aod700) + tb0) + (tbp * np.log((p / p0)))) return taub
def _calc_taub(w, aod700, p): p0 = 101325.0 tb1 = ((1.82 + (0.056 * np.log(w))) + (0.0071 * (np.log(w) ** 2))) tb0 = ((0.33 + (0.045 * np.log(w))) + (0.0096 * (np.log(w) ** 2))) tbp = ((0.0089 * w) + 0.13) taub = (((tb1 * aod700) + tb0) + (tbp * np.log((p / p0)))) return taub<|docstring|>Calculate the taub coefficient<|endoftext|>
de82d5f189d3579ee91be6efef88f510e9639504003180a65ed7873966297925
def _calc_b(w, aod700): 'Calculate the b coefficient.' b1 = (((0.00925 * (aod700 ** 2)) + (0.0148 * aod700)) - 0.0172) b0 = ((((- 0.7565) * (aod700 ** 2)) + (0.5057 * aod700)) + 0.4557) b = ((b1 * np.log(w)) + b0) return b
Calculate the b coefficient.
pvlib/clearsky.py
_calc_b
Antoine-0/pvlib-python
695
python
def _calc_b(w, aod700): b1 = (((0.00925 * (aod700 ** 2)) + (0.0148 * aod700)) - 0.0172) b0 = ((((- 0.7565) * (aod700 ** 2)) + (0.5057 * aod700)) + 0.4557) b = ((b1 * np.log(w)) + b0) return b
def _calc_b(w, aod700): b1 = (((0.00925 * (aod700 ** 2)) + (0.0148 * aod700)) - 0.0172) b0 = ((((- 0.7565) * (aod700 ** 2)) + (0.5057 * aod700)) + 0.4557) b = ((b1 * np.log(w)) + b0) return b<|docstring|>Calculate the b coefficient.<|endoftext|>
7325d6b7ba8de0236dc1c6a2439168160ee1aac94bab1ac9c4b1d0e5baa31dfc
def _calc_taug(w, aod700, p): 'Calculate the taug coefficient' p0 = 101325.0 tg1 = ((1.24 + (0.047 * np.log(w))) + (0.0061 * (np.log(w) ** 2))) tg0 = ((0.27 + (0.043 * np.log(w))) + (0.009 * (np.log(w) ** 2))) tgp = ((0.0079 * w) + 0.1) taug = (((tg1 * aod700) + tg0) + (tgp * np.log((p / p0)))) return taug
Calculate the taug coefficient
pvlib/clearsky.py
_calc_taug
Antoine-0/pvlib-python
695
python
def _calc_taug(w, aod700, p): p0 = 101325.0 tg1 = ((1.24 + (0.047 * np.log(w))) + (0.0061 * (np.log(w) ** 2))) tg0 = ((0.27 + (0.043 * np.log(w))) + (0.009 * (np.log(w) ** 2))) tgp = ((0.0079 * w) + 0.1) taug = (((tg1 * aod700) + tg0) + (tgp * np.log((p / p0)))) return taug
def _calc_taug(w, aod700, p): p0 = 101325.0 tg1 = ((1.24 + (0.047 * np.log(w))) + (0.0061 * (np.log(w) ** 2))) tg0 = ((0.27 + (0.043 * np.log(w))) + (0.009 * (np.log(w) ** 2))) tgp = ((0.0079 * w) + 0.1) taug = (((tg1 * aod700) + tg0) + (tgp * np.log((p / p0)))) return taug<|docstring|>Calculate the taug coefficient<|endoftext|>
1aba7ae695f0658b0f47b8829a50d1e15301c9dbdb00277aff9d92ca9538468f
def _calc_g(w, aod700): 'Calculate the g coefficient.' g = (((((- 0.0147) * np.log(w)) - (0.3079 * (aod700 ** 2))) + (0.2846 * aod700)) + 0.3798) return g
Calculate the g coefficient.
pvlib/clearsky.py
_calc_g
Antoine-0/pvlib-python
695
python
def _calc_g(w, aod700): g = (((((- 0.0147) * np.log(w)) - (0.3079 * (aod700 ** 2))) + (0.2846 * aod700)) + 0.3798) return g
def _calc_g(w, aod700): g = (((((- 0.0147) * np.log(w)) - (0.3079 * (aod700 ** 2))) + (0.2846 * aod700)) + 0.3798) return g<|docstring|>Calculate the g coefficient.<|endoftext|>
94a13d10d2a732c22f6ca5f42feab0fe273f81291d1523787f7805a02fe84e30
def _calc_taud(w, aod700, p): 'Calculate the taud coefficient.' if (np.isscalar(w) and np.isscalar(aod700)): w = np.array([w]) aod700 = np.array([aod700]) elif np.isscalar(w): w = np.full_like(aod700, w) elif np.isscalar(aod700): aod700 = np.full_like(w, aod700) aod700_lt_0p05 = np.full_like(aod700, False, dtype='bool') np.less(aod700, 0.05, where=(~ np.isnan(aod700)), out=aod700_lt_0p05) aod700_mask = np.array([aod700_lt_0p05, (~ aod700_lt_0p05)], dtype=int) td4 = (((86 * w) - 13800), (((- 0.21) * w) + 11.6)) td3 = ((((- 3.11) * w) + 79.4), ((0.27 * w) - 20.7)) td2 = ((((- 0.23) * w) + 74.8), (((- 0.134) * w) + 15.5)) td1 = (((0.092 * w) - 8.86), ((0.0554 * w) - 5.71)) td0 = (((0.0042 * w) + 3.12), ((0.0057 * w) + 2.94)) tdp = (((- 0.83) * ((1 + aod700) ** (- 17.2))), ((- 0.71) * ((1 + aod700) ** (- 15.0)))) tds = (np.array([td0, td1, td2, td3, td4, tdp]) * aod700_mask).sum(axis=1) p0 = 101325.0 taud = ((((((tds[4] * (aod700 ** 4)) + (tds[3] * (aod700 ** 3))) + (tds[2] * (aod700 ** 2))) + (tds[1] * aod700)) + tds[0]) + (tds[5] * np.log((p / p0)))) if (len(taud) == 1): taud = taud[0] return taud
Calculate the taud coefficient.
pvlib/clearsky.py
_calc_taud
Antoine-0/pvlib-python
695
python
def _calc_taud(w, aod700, p): if (np.isscalar(w) and np.isscalar(aod700)): w = np.array([w]) aod700 = np.array([aod700]) elif np.isscalar(w): w = np.full_like(aod700, w) elif np.isscalar(aod700): aod700 = np.full_like(w, aod700) aod700_lt_0p05 = np.full_like(aod700, False, dtype='bool') np.less(aod700, 0.05, where=(~ np.isnan(aod700)), out=aod700_lt_0p05) aod700_mask = np.array([aod700_lt_0p05, (~ aod700_lt_0p05)], dtype=int) td4 = (((86 * w) - 13800), (((- 0.21) * w) + 11.6)) td3 = ((((- 3.11) * w) + 79.4), ((0.27 * w) - 20.7)) td2 = ((((- 0.23) * w) + 74.8), (((- 0.134) * w) + 15.5)) td1 = (((0.092 * w) - 8.86), ((0.0554 * w) - 5.71)) td0 = (((0.0042 * w) + 3.12), ((0.0057 * w) + 2.94)) tdp = (((- 0.83) * ((1 + aod700) ** (- 17.2))), ((- 0.71) * ((1 + aod700) ** (- 15.0)))) tds = (np.array([td0, td1, td2, td3, td4, tdp]) * aod700_mask).sum(axis=1) p0 = 101325.0 taud = ((((((tds[4] * (aod700 ** 4)) + (tds[3] * (aod700 ** 3))) + (tds[2] * (aod700 ** 2))) + (tds[1] * aod700)) + tds[0]) + (tds[5] * np.log((p / p0)))) if (len(taud) == 1): taud = taud[0] return taud
def _calc_taud(w, aod700, p): if (np.isscalar(w) and np.isscalar(aod700)): w = np.array([w]) aod700 = np.array([aod700]) elif np.isscalar(w): w = np.full_like(aod700, w) elif np.isscalar(aod700): aod700 = np.full_like(w, aod700) aod700_lt_0p05 = np.full_like(aod700, False, dtype='bool') np.less(aod700, 0.05, where=(~ np.isnan(aod700)), out=aod700_lt_0p05) aod700_mask = np.array([aod700_lt_0p05, (~ aod700_lt_0p05)], dtype=int) td4 = (((86 * w) - 13800), (((- 0.21) * w) + 11.6)) td3 = ((((- 3.11) * w) + 79.4), ((0.27 * w) - 20.7)) td2 = ((((- 0.23) * w) + 74.8), (((- 0.134) * w) + 15.5)) td1 = (((0.092 * w) - 8.86), ((0.0554 * w) - 5.71)) td0 = (((0.0042 * w) + 3.12), ((0.0057 * w) + 2.94)) tdp = (((- 0.83) * ((1 + aod700) ** (- 17.2))), ((- 0.71) * ((1 + aod700) ** (- 15.0)))) tds = (np.array([td0, td1, td2, td3, td4, tdp]) * aod700_mask).sum(axis=1) p0 = 101325.0 taud = ((((((tds[4] * (aod700 ** 4)) + (tds[3] * (aod700 ** 3))) + (tds[2] * (aod700 ** 2))) + (tds[1] * aod700)) + tds[0]) + (tds[5] * np.log((p / p0)))) if (len(taud) == 1): taud = taud[0] return taud<|docstring|>Calculate the taud coefficient.<|endoftext|>