content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# MIT License
#
# Copyright (c) 2020 PANGAEA (https://www.pangaea.de/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import List
import jmespath
from fuji_server.helper.metadata_collector import MetaDataCollector
from fuji_server.helper.request_helper import RequestHelper, AcceptTypes
class MetaDataCollectorDatacite (MetaDataCollector):
exclude_conversion: List[str]
def __init__(self, mapping, pid_url=None, loggerinst=None):
super().__init__(logger=loggerinst, mapping=mapping)
self.pid_url = pid_url
self.exclude_conversion = ['creator', 'license', 'related_resources', 'access_level']
def parse_metadata(self):
source_name = None
dcite_metadata = {}
self.logger.info('FsF-F2-01M : Extract datacite metadata')
requestHelper = RequestHelper(self.pid_url, self.logger)
requestHelper.setAcceptType(AcceptTypes.datacite_json)
neg_source,ext_meta = requestHelper.content_negotiate('FsF-F2-01M')
if ext_meta:
try:
dcite_metadata = jmespath.search(self.metadata_mapping.value, ext_meta)
if dcite_metadata:
self.namespaces.append('http://datacite.org/schema/')
source_name = self.getEnumSourceNames().DATACITE_JSON.value
if dcite_metadata['creator'] is None:
first = dcite_metadata['creator_first']
last = dcite_metadata['creator_last']
# default type of creator is []
if isinstance(first, list) and isinstance(last, list):
if len(first) == len(last):
names = [i + " " + j for i, j in zip(first, last)]
dcite_metadata['creator'] = names
if dcite_metadata.get('related_resources'):
self.logger.info('FsF-I3-01M : {0} related resource(s) extracted from {1}'.format(
len(dcite_metadata['related_resources']), source_name))
temp_rels = []
for r in dcite_metadata['related_resources']:
filtered = {k: v for k, v in r.items() if v is not None}
temp_rels.append(filtered)
dcite_metadata['related_resources'] = temp_rels
else:
self.logger.info('FsF-I3-01M : No related resource(s) found in Datacite metadata')
# convert all values (list type) into string except 'creator','license','related_resources'
for key, value in dcite_metadata.items():
if key not in self.exclude_conversion and isinstance(value, list):
flat = ', '.join(map(str, value))
dcite_metadata[key] = flat
except Exception as e:
self.logger.exception('Failed to extract Datacite Json - {}'.format(e))
return source_name, dcite_metadata
| 51.3375 | 111 | 0.62917 | [
"MIT"
] | EOSC-synergy/FUJI_pipeline.sqaaas | fuji_server/helper/metadata_collector_datacite.py | 4,107 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# imageprocessor documentation build configuration file.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
# import imageprocessor
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = u'ImageProcessor'
copyright = u"2021, Rui Wang"
author = u"Rui Wang"
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
# version = imageprocessor.__version__
# The full version, including alpha/beta/rc tags.
# release = imageprocessor.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_sidebars = {
"**": ["about.html", "navigation.html", "searchbox.html"]
}
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'imageprocessordoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'imageprocessor.tex',
u'imageprocessor Documentation',
u'Rui Wang', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'imageprocessor',
u'imageprocessor Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'imageprocessor',
u'imageprocessor Documentation',
author,
'imageprocessor',
'One line description of project.',
'Miscellaneous'),
]
| 30.245283 | 78 | 0.671033 | [
"MIT"
] | UBC-MDS/524_group4 | docs/conf.py | 4,809 | Python |
# coding=utf-8
"""
PAT - the name of the current project.
main_portfolio_maker.py - the name of the new file which you specify in the New File
dialog box during the file creation.
Hossein - the login name of the current user.
8 / 8 / 18 - the current system date.
9: 14 AM - the current system time.
PyCharm - the name of the IDE in which the file will be created.
"""
from portfolio_maker.subscriber import create_subscription
from observer import Observer
from price_fetcher.config import PROJECT_ID
from price_fetcher.publisher import list_topics
from price_fetcher.config import TICKERS
import time
import datetime
if __name__ == '__main__':
topics = list_topics(PROJECT_ID)
topics = [str(topic).split('/')[-1][:-2] for topic in topics if 'simulator' in str(topic)]
subscriptions = [create_subscription(PROJECT_ID, topic, 'live_writer_' + str(i))
for i, topic in enumerate(topics)]
observer = Observer(tickers=['AAPL'], start_date=datetime.date(2018, 10, 18))
observer.initiate()
for i in range(len(topics)):
observer.receive_messages(PROJECT_ID, 'live_writer_' + str(i))
while True:
# print('PRINTING!', observer.instruments)
time.sleep(60)
| 37.030303 | 94 | 0.719313 | [
"Apache-2.0"
] | pourmatin/Graph | portfolio_maker/main_portfolio_maker.py | 1,222 | Python |
import discord
import json
import CloudDB
import nqrng
from cloudant.result import Result
global CONFIG
client = discord.Client()
token = ""
#import config file
with open('config.json', 'r') as f:
getFile = json.load(f)
global CONFIG
CONFIG = getFile["services"]["discord"][0]
token = CONFIG["token"]
#db connect
global my_database
my_database = CloudDB.connect_db()
# bot start
@client.event
async def on_ready():
print("Login")
print(client.user.name)
print(client.user.id)
print("================")
# bot get message
@client.event
async def on_message(message):
# if get message for bot > return none
if message.author.bot:
return None
if message.content.startswith('!Hi'):
channel = message.channel
await channel.send('Welcome!')
if message.content.startswith('!Qadjoke'):
channel = message.channel
arrResult = Result(my_database.all_docs, include_docs=True)
dbNum = my_database.doc_count()
num1 = nqrng.random_number() % dbNum
num2 = nqrng.random_number() % dbNum
num = (num1 * num2) % dbNum
result = arrResult[num][0]['doc']['Qdad']
print(f"{num} is randnum, result is {result}")
await channel.send(f'{result}')
client.run(token)
| 22.982143 | 67 | 0.648796 | [
"Apache-2.0"
] | rochisha0/quantum-ugly-duckling | quantum-ugly-duckling-main/discord_bot.py | 1,287 | Python |
from rest_framework.routers import DefaultRouter
from rest_framework_nested import routers
from kubeops_api import api
from django.urls import path
from django.conf.urls import url
from kubeops_api.apis import host
from kubeops_api.apis import item
from kubeops_api.apis import grade
app_name = "kubeops_api"
router = DefaultRouter()
router.register('clusters', api.ClusterViewSet, 'cluster')
router.register('packages', api.PackageViewSet, 'package')
router.register('credential', api.CredentialViewSet, 'credential')
router.register('host', host.HostViewSet, 'host')
router.register('backupStorage', api.BackupStorageViewSet, 'backupStorage')
router.register('backupStrategy', api.BackupStrategyViewSet, 'backupStrategy')
router.register('clusterBackup', api.ClusterBackupViewSet, 'clusterBackup')
router.register('items', item.ItemViewSet, 'item')
router.register('item/profiles', item.ItemUserViewSet, 'item-profiles')
cluster_router = routers.NestedDefaultRouter(router, r'clusters', lookup='cluster')
cluster_router.register(r'configs', api.ClusterConfigViewSet, 'cluster-config')
cluster_router.register(r'nodes', api.NodeViewSet, 'cluster-node')
cluster_router.register(r'roles', api.RoleViewSet, 'cluster-role')
cluster_router.register(r'executions', api.DeployExecutionViewSet, 'cluster-deploy-execution')
urlpatterns = [
path('cluster/<uuid:pk>/download/', api.DownloadView.as_view()),
path('cluster/<uuid:pk>/token/', api.GetClusterTokenView.as_view()),
path('cluster/<uuid:pk>/webkubectl/token/', api.WebKubeCtrlToken.as_view()),
path('cluster/<uuid:pk>/grade/', grade.GradeRetrieveAPIView.as_view()),
path('cluster/config', api.GetClusterConfigView.as_view()),
path('version/', api.VersionView.as_view()),
path('version/', api.VersionView.as_view()),
path('backupStorage/check', api.CheckStorageView.as_view()),
path('backupStorage/getBuckets', api.GetBucketsView.as_view()),
path('clusterBackup/<uuid:project_id>/', api.ClusterBackupList.as_view()),
path('clusterBackup/<uuid:id>/delete/', api.ClusterBackupDelete.as_view()),
path('clusterBackup/restore/', api.ClusterBackupRestore.as_view()),
path('cluster/<project_name>/health/<namespace>/', api.ClusterHealthView.as_view()),
path('cluster/<project_name>/component/', api.ClusterComponentView.as_view()),
path('cluster/<project_name>/namespace/', api.ClusterNamespaceView.as_view()),
path('cluster/<project_name>/storage/', api.ClusterStorageView.as_view()),
path('cluster/<project_name>/event/', api.ClusterEventView.as_view()),
path('cluster/<project_name>/checkNodes/', api.CheckNodeView.as_view()),
path('cluster/<project_name>/syncNodeTime/', api.SyncHostTimeView.as_view()),
path('clusterHealthHistory/<project_id>/', api.ClusterHealthHistoryView.as_view()),
path('dashboard/<project_name>/<item_name>/', api.DashBoardView.as_view()),
path('resource/<item_name>/', item.ItemResourceView.as_view()),
path('resource/item/clusters/', item.ItemResourceClusterView.as_view()),
path('resource/<item_name>/<resource_type>/', item.ResourceView.as_view()),
path('resource/<item_name>/<resource_type>/<resource_id>/', item.ItemResourceDeleteView.as_view()),
url('settings', api.SettingView.as_view(), name='settings'),
] + router.urls + cluster_router.urls
| 63.931034 | 117 | 0.682309 | [
"Apache-2.0"
] | jackwiy/KubeOperator | core/apps/kubeops_api/api_url.py | 3,708 | Python |
import tensorflow as tf
from tensorflow.contrib.seq2seq.python.ops.attention_wrapper import LuongAttention, \
AttentionWrapper, AttentionWrapperState
class AttentionMode:
"""
Enumerator for the Luong style local attention modes.
- See [1]: Effective Approaches to Attention-based Neural Machine Translation,
http://arxiv.org/abs/1508.04025
"""
# local-m mode.
MONOTONIC = 'monotonic'
# local-p mode.
PREDICTIVE = 'predictive'
class AttentionScore:
"""
Enumerator for the three different content-based scoring functions for Luong style attention.
- See [1]: Effective Approaches to Attention-based Neural Machine Translation,
http://arxiv.org/abs/1508.04025
"""
DOT = 'dot'
GENERAL = 'general'
CONCAT = 'concat'
def _luong_local_compute_attention(attention_mechanism, cell_output, attention_state,
attention_layer):
"""Computes the attention and alignments for the Luong style local attention mechanism."""
alignments, next_attention_state = attention_mechanism(
cell_output, state=attention_state)
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = tf.expand_dims(alignments, 1)
context_windows = []
padded_alignment_windows = []
window_start = attention_mechanism.window_start
window_stop = attention_mechanism.window_stop
pre_padding = attention_mechanism.window_pre_padding
post_padding = attention_mechanism.window_post_padding
full_pre_padding = attention_mechanism.full_seq_pre_padding
full_post_padding = attention_mechanism.full_seq_post_padding
for i in range(0, attention_mechanism.const_batch_size):
# Slice out the window from the memory.
value_window = attention_mechanism.values[i, window_start[i][0]:window_stop[i][0], :]
# Add zero padding to the slice in order to ensure the window size is (2D+1).
value_window_paddings = [
[pre_padding[i][0], post_padding[i][0]],
[0, 0]
]
value_window = tf.pad(value_window, value_window_paddings, 'CONSTANT')
# Shape information is lost after padding ;(.
value_window.set_shape((attention_mechanism.window_size,
attention_mechanism._num_units))
# Calculate the context vector for the current batch entry using only information from
# teh window.
context_window = tf.matmul(expanded_alignments[i], value_window)
context_windows.append(context_window)
if attention_mechanism.force_gaussian is True:
# Apply gaussian weighting of the window contents.
point_dist = tf.cast(tf.range(start=window_start[i][0],
limit=window_stop[i][0],
delta=1), dtype=tf.float32) - attention_mechanism.p[i][0]
gaussian_weights = tf.exp(-(point_dist ** 2) / 2 * (attention_mechanism.d / 2) ** 2)
__alignments = alignments[i] * gaussian_weights
else:
# Use the raw window contents.
__alignments = alignments[i]
# Add padding to the alignments to get from the window size 2D+1 up to the original
# memory length.
alignment_seq_paddings = [
[full_pre_padding[i][0], full_post_padding[i][0]],
]
__alignments = tf.pad(__alignments, alignment_seq_paddings, 'CONSTANT')
padded_alignment_windows.append(__alignments)
# Stack all context vectors into one tensor.
context = tf.stack(context_windows)
# Squeeze out the helper dimension used for calculating the context.
context = tf.squeeze(context, [1])
# Stack all alignment vectors into one tensor. This tensor gives alignments for each encoder
# step.
padded_alignment = tf.stack(padded_alignment_windows)
if attention_layer is not None:
attention = attention_layer(tf.concat([cell_output, context], 1))
else:
attention = context
return attention, padded_alignment, padded_alignment
class LocalLuongAttention(LuongAttention):
"""
Implements a Luong-style local attention mechanism.
This implementation supports both monotonic attention as well as predictive attention.
- See [1]: Effective Approaches to Attention-based Neural Machine Translation,
http://arxiv.org/abs/1508.04025
"""
def __init__(self, num_units,
memory,
const_batch_size,
memory_sequence_length=None,
scale=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
name="LocalLuongAttention",
d=10,
attention_mode=AttentionMode.MONOTONIC,
score_mode=AttentionScore.DOT,
force_gaussian=False
):
"""
Arguments:
num_units (int):
The depth of the attention mechanism. This controls the number of units in the
memory layer that processes the encoder states into the `keys`.
memory (tf.Tensor):
The memory to query; usually the output of an RNN encoder.
The shape is expected to be shape=(batch_size, encoder_max_time, ...)
const_batch_size (int):
The constant batch size to expect from every batch. Every batch is expected to
contain exactly `const_batch_size` samples.
memory_sequence_length:
(optional) Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale (boolean):
Whether to scale the energy term.
probability_fn:
(optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value:
(optional) The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype (tf.DType):
The data type for the memory layer of the attention mechanism.
name (string):
Name to use when creating ops.
d (int):
D parameter controlling the window size and gaussian distribution.
The window size is set to be `2D + 1`.
attention_mode (AttentionMode):
The attention mode to use. Can be either `MONOTONIC` or `PREDICTIVE`.
score_mode (AttentionScore):
The attention scoring function to use. Can either be `DOT`, `GENERAL` or `CONCAT`.
force_gaussian (boolean):
Force a gaussian distribution onto the scores in the attention window.
Defaults to False.
"""
super().__init__(num_units=num_units,
memory=memory,
memory_sequence_length=memory_sequence_length,
scale=scale,
probability_fn=probability_fn,
score_mask_value=score_mask_value,
dtype=dtype,
name=name)
# Initialize the decoding time counter.
# This variable is updated by the `ÀdvancedAttentionWrapper`.
self.time = 0
# Calculate the attention window size.
self.d = d
self.window_size = 2 * self.d + 1
# Store the attention mode.
self.attention_mode = attention_mode
# Store the scoring function style to be used.
self.score_mode = score_mode
# The constant batch size to expect.
self.const_batch_size = const_batch_size
self.force_gaussian = force_gaussian
def __call__(self, query, state):
"""
Calculate the alignments and next_state for the current decoder output.
Arguments:
query (tf.Tensor):
Decoder cell outputs to compare to the keys (memory).
The shape is expected to be shape=(B, num_units) with B being the batch size
and `num_units` being the output size of the decoder_cell.
state (tf.Tensor):
In Luong attention the state is equal to the alignments. Therefore this will
contain the alignments from the previous decoding step.
Returns:
(alignments, next_state):
alignments (tf.Tensor):
The normalized attention scores for the attention window. The shape is
shape=(B, 2D+1), with B being the batch size and `2D+1` being the window size.
next_state (tf.Tensor):
In Luong attention this is equal to `alignments`.
"""
with tf.variable_scope(None, "local_luong_attention", [query]):
# Get the depth of the memory values.
num_units = self._keys.get_shape()[-1]
# Get the source sequence length from memory.
source_seq_length = tf.shape(self._keys)[1]
if self.attention_mode == AttentionMode.PREDICTIVE:
# Predictive selection fo the attention window position.
vp = tf.get_variable(name="local_v_p", shape=[num_units, 1], dtype=tf.float32)
wp = tf.get_variable(name="local_w_p", shape=[num_units, num_units],
dtype=tf.float32)
# shape => (B, num_units)
_intermediate_result = tf.transpose(tf.tensordot(wp, query, [0, 1]))
# shape => (B, 1)
_tmp = tf.transpose(tf.tensordot(vp, tf.tanh(_intermediate_result), [0, 1]))
# Derive p_t as described by Luong for the predictive local-p case.
self.p = tf.cast(source_seq_length, tf.float32) * tf.sigmoid(_tmp)
elif self.attention_mode == AttentionMode.MONOTONIC:
# Derive p_t as described by Luong for the predictive local-m case.
self.p = tf.tile(
[[self.time]],
tf.convert_to_tensor([self.batch_size, 1])
)
# Prevent the window from leaving the memory.
self.p = tf.maximum(self.p, self.d)
self.p = tf.minimum(self.p, source_seq_length - (self.d + 1))
self.p = tf.cast(self.p, dtype=tf.float32)
# Calculate the memory sequence index at which the window should start.
start_index = tf.floor(self.p) - self.d
start_index = tf.cast(start_index, dtype=tf.int32)
# Prevent the window from leaving the memory.
self.window_start = tf.maximum(0, start_index)
# Calculate the memory sequence index at which the window should stop.
stop_index = tf.floor(self.p) + self.d + 1
stop_index = tf.cast(stop_index, dtype=tf.int32)
# Prevent the window from leaving the memory.
self.window_stop = tf.minimum(source_seq_length, stop_index)
# Calculate how many padding frames should be added to the start of the window.
# This is used to get up to the total memory length again.
self.full_seq_pre_padding = tf.abs(start_index)
# Calculate how many padding frames should be added to the end of the window.
# This is used to get up to the total memory length again.
self.full_seq_post_padding = tf.abs(stop_index - source_seq_length)
# Calculate how many padding frames should be added to the start of the window.
# This is used to get the window up to 2D+1 frames.
self.window_pre_padding = tf.abs(self.window_start - start_index)
# Calculate how many padding frames should be added to the end of the window.
# This is used to get the window up to 2D+1 frames.
self.window_post_padding = tf.abs(self.window_stop - stop_index)
# Slice the windows for every batch entry.
with tf.variable_scope(None, "window_extraction", [query]):
windows = []
# Iterate the batch entries.
for i in range(0, self.const_batch_size):
# Slice out the window from the processed memory.
__window = self._keys[i, self.window_start[i][0]:self.window_stop[i][0], :]
# Add zero padding to the slice in order to ensure the window size is (2D+1).
paddings = [
[self.window_pre_padding[i][0], self.window_post_padding[i][0]],
[0, 0]
]
__window = tf.pad(__window, paddings, 'CONSTANT')
# Collect the extracted windows for each batch entry.
windows.append(__window)
# Merge all extracted windows into one tensor.
window = tf.stack(windows)
# Calculate the not not normalized attention score as described by Luong as dot.
if self.score_mode == AttentionScore.DOT:
score = _luong_dot_score(query, window, self._scale)
# Calculate the not not normalized attention score as described by Luong as general.
elif self.score_mode == AttentionScore.GENERAL:
score = _luong_general_score(query, window)
# Calculate the not not normalized attention score as described by Luong as general.
elif self.score_mode == AttentionScore.CONCAT:
score = _luong_concat_score(query, window)
else:
score = None
raise Exception("An invalid attention scoring mode was supplied.")
# Normalize the scores.
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
def _luong_dot_score(query, keys, scale):
"""
Implements the Luong-style dot scoring function.
This attention has two forms. The first is standard Luong attention, as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, call this function with `scale=True`.
This implementation is derived from: `tensorflow.contrib.seq2seq.python.ops.attention_wrapper`
Arguments:
query (tf.Tensor):
Decoder cell outputs to compare to the keys (memory).
The shape is expected to be shape=(B, num_units) with B being the batch size
and `num_units` being the output size of the decoder_cell.
keys (tf.Tensor):
Processed memory (usually the encoder states processed by the memory_layer).
The shape is expected to be shape=(B, X, num_units) with B being the batch size
and `num_units` being the output size of the memory_layer. X may be the
maximal length of the encoder time domain or in the case of local attention the
window size.
scale (boolean):
Whether to apply a scale to the score function.
Returns:
score (tf.Tensor):
A tensor with shape=(B, X) containing the non-normalized score values.
Raises:
ValueError: If `key` and `query` depths do not match.
"""
depth = query.get_shape()[-1]
key_units = keys.get_shape()[-1]
if depth != key_units:
raise ValueError(
"Incompatible or unknown inner dimensions between query and keys. "
"Query (%s) has units: %s. Keys (%s) have units: %s. "
"Perhaps you need to set num_units to the keys' dimension (%s)?"
% (query, depth, keys, key_units, key_units))
dtype = query.dtype
query = tf.expand_dims(query, 1)
score = tf.matmul(query, keys, transpose_b=True)
score = tf.squeeze(score, [1])
if scale:
# Scalar used in weight scaling
g = tf.get_variable(
"attention_g", dtype=dtype,
initializer=tf.ones_initializer, shape=())
score = g * score
return score
def _luong_general_score(query, keys):
"""
Implements the Luong-style general scoring function.
- See [1]: Effective Approaches to Attention-based Neural Machine Translation,
http://arxiv.org/abs/1508.04025
Arguments:
query (tf.Tensor):
Decoder cell outputs to compare to the keys (memory).
The shape is expected to be shape=(B, num_units) with B being the batch size
and `num_units` being the output size of the decoder_cell.
keys (tf.Tensor):
Processed memory (usually the encoder states processed by the memory_layer).
The shape is expected to be shape=(B, X, num_units) with B being the batch size
and `num_units` being the output size of the memory_layer. X may be the
maximal length of the encoder time domain or in the case of local attention the
window size.
Returns:
score (tf.Tensor):
A tensor with shape=(B, X) containing the non-normalized score values.
"""
raise NotImplementedError('Luong style general mode attention scoring is not implemented yet!')
def _luong_concat_score(query, keys):
"""
Implements the Luong-style concat scoring function.
- See [1]: Effective Approaches to Attention-based Neural Machine Translation,
http://arxiv.org/abs/1508.04025
Arguments:
query (tf.Tensor):
Decoder cell outputs to compare to the keys (memory).
The shape is expected to be shape=(B, num_units) with B being the batch size
and `num_units` being the output size of the decoder_cell.
keys (tf.Tensor):
Processed memory (usually the encoder states processed by the memory_layer).
The shape is expected to be shape=(B, X, num_units) with B being the batch size
and `num_units` being the output size of the memory_layer. X may be the
maximal length of the encoder time domain or in the case of local attention the
window size.
Returns:
score (tf.Tensor):
A tensor with shape=(B, X) containing the non-normalized score values.
"""
raise NotImplementedError('Luong style concat mode attention scoring is not implemented yet!')
class AdvancedAttentionWrapper(AttentionWrapper):
"""
Wraps the standard AttentionWrapper class so that during decoding steps the decoding time
index is updated in the attention mechanism.
This is a hack to enable us using Luong style monotonic attention.
"""
def __init__(self,
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None):
super().__init__(cell=cell,
attention_mechanism=attention_mechanism,
attention_layer_size=attention_layer_size,
alignment_history=alignment_history,
cell_input_fn=cell_input_fn,
output_attention=output_attention,
initial_cell_state=initial_cell_state,
name=name)
def call(self, inputs, state):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell output
and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time step.
state: An instance of `AttentionWrapperState` containing
tensors from the previous time step.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `AttentionWrapperState`.
"""
if not isinstance(state, AttentionWrapperState):
raise TypeError("Expected state to be instance of AttentionWrapperState. "
"Received type %s instead." % type(state))
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state)
cell_batch_size = (
cell_output.shape[0].value or tf.shape(cell_output)[0])
error_message = (
"When applying AttentionWrapper %s: " % self.name +
"Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input via "
"the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with tf.control_dependencies(
self._batch_size_checks(cell_batch_size, error_message)):
cell_output = tf.identity(
cell_output, name="checked_cell_output")
if self._is_multi:
previous_attention_state = state.attention_state
previous_alignment_history = state.alignment_history
else:
previous_attention_state = [state.attention_state]
previous_alignment_history = [state.alignment_history]
all_alignments = []
all_attentions = []
all_attention_states = []
maybe_all_histories = []
for i, attention_mechanism in enumerate(self._attention_mechanisms):
# Note: This is the only modification hacked into the attention wrapper to support
# monotonic Luong attention.
attention_mechanism.time = state.time
attention, alignments, next_attention_state = _luong_local_compute_attention(
attention_mechanism, cell_output, previous_attention_state[i],
self._attention_layers[i] if self._attention_layers else None)
alignment_history = previous_alignment_history[i].write(
state.time, alignments) if self._alignment_history else ()
all_attention_states.append(next_attention_state)
all_alignments.append(alignments)
all_attentions.append(attention)
maybe_all_histories.append(alignment_history)
attention = tf.concat(all_attentions, 1)
next_state = AttentionWrapperState(
time=state.time + 1,
cell_state=next_cell_state,
attention=attention,
attention_state=self._item_or_tuple(all_attention_states),
alignments=self._item_or_tuple(all_alignments),
alignment_history=self._item_or_tuple(maybe_all_histories))
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state
| 41.913265 | 99 | 0.62321 | [
"MIT"
] | yweweler/single-speaker-tts | tacotron/attention.py | 24,646 | Python |
import logging
import warnings
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from django.contrib.auth import get_user_model
l = logging.getLogger(__name__)
class OAuth2InputSerializer(serializers.Serializer):
provider = serializers.CharField(required=False)
code = serializers.CharField()
redirect_uri = serializers.CharField(required=False)
class OAuth1InputSerializer(serializers.Serializer):
provider = serializers.CharField(required=False)
oauth_token = serializers.CharField()
oauth_token_secret = serializers.CharField()
oauth_verifier = serializers.CharField()
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
exclude = ('is_staff', 'is_active', 'date_joined', 'password',
'last_login', 'user_permissions', 'groups', 'is_superuser',)
class TokenSerializer(serializers.Serializer):
token = serializers.SerializerMethodField()
def get_token(self, obj):
token, created = Token.objects.get_or_create(user=obj)
return token.key
class UserTokenSerializer(TokenSerializer, UserSerializer):
pass
class JWTSerializer(TokenSerializer):
def get_token(self, obj):
try:
from rest_framework_jwt.settings import api_settings
except ImportError:
warnings.warn('djangorestframework-jwt must be installed for JWT authentication',
ImportWarning)
raise
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(self.get_jwt_payload(obj))
token = jwt_encode_handler(payload)
return token
def get_jwt_payload(self, obj):
"""
Define here, what data shall be encoded in JWT.
By default, entire object will be encoded.
"""
return obj
class UserJWTSerializer(JWTSerializer, UserSerializer):
pass
| 27.066667 | 93 | 0.71133 | [
"MIT"
] | silverlogic/django-rest-social-auth | rest_social_auth/serializers.py | 2,030 | Python |
from __future__ import absolute_import, unicode_literals
import factory
from wagtail.wagtailcore.models import Site, Page
from wagtailsystemtext.models import SystemString
class PageFactory(factory.DjangoModelFactory):
class Meta:
model = Page
path = factory.Sequence(lambda x: '00010001{:04d}'.format(x+1))
depth = 3
numchild = 0
live = True
title = factory.Sequence(lambda x: 'page-title-{0}'.format(x))
class SiteFactory(factory.DjangoModelFactory):
class Meta:
model = Site
hostname = factory.Sequence(lambda x: 'host-{0}'.format(x))
site_name = factory.Sequence(lambda x: 'Site {0}'.format(x))
root_page = factory.SubFactory(PageFactory)
class SystemStringFactory(factory.DjangoModelFactory):
class Meta:
model = SystemString
identifier = factory.Sequence(lambda x: 'identifier_{0}'.format(x))
site = factory.SubFactory(SiteFactory)
| 24.421053 | 71 | 0.712284 | [
"MIT"
] | Frojd/wagtail-systemtext | tests/factories.py | 928 | Python |
# -*- coding: utf-8 -*-
"""Handle orders and pendingOrders endpoints."""
from .apirequest import APIRequest
from .decorators import dyndoc_insert, endpoint
from .responses.orders import responses
from abc import abstractmethod
class Orders(APIRequest):
"""Orders - abstract base class to handle the orders endpoints."""
ENDPOINT = ""
METHOD = "GET"
EXPECTED_STATUS = 0
@abstractmethod
@dyndoc_insert(responses)
def __init__(self, accountID, orderID=None):
"""Instantiate an Orders request.
Parameters
----------
accountID : string (required)
id of the account to perform the request on.
orderID : string
id of the order to perform the request for.
"""
endpoint = self.ENDPOINT.format(accountID=accountID, orderID=orderID)
super(Orders, self).__init__(endpoint, method=self.METHOD,
expected_status=self.EXPECTED_STATUS)
@endpoint("v3/accounts/{accountID}/orders", "POST", 201)
class OrderCreate(Orders):
"""Create an Order for an Account."""
HEADERS = {"Content-Type": "application/json"}
@dyndoc_insert(responses)
def __init__(self, accountID, data):
"""Instantiate an OrderCreate request.
Parameters
----------
accountID : string (required)
id of the account to perform the request on.
data : JSON (required)
json orderbody to send
Orderbody example::
{_v3_accounts_accountID_orders_create_body}
>>> import oandapyV20
>>> import oandapyV20.endpoints.orders as orders
>>> client = oandapyV20.API(access_token=...)
>>> r = orders.OrderCreate(accountID, data=data)
>>> client.request(r)
>>> print r.response
::
{_v3_accounts_accountID_orders_create_resp}
"""
super(OrderCreate, self).__init__(accountID)
self.data = data
@endpoint("v3/accounts/{accountID}/orders")
class OrderList(Orders):
"""Create an Order for an Account."""
@dyndoc_insert(responses)
def __init__(self, accountID, params=None):
"""Instantiate an OrderList request.
Parameters
----------
accountID : string (required)
id of the account to perform the request on.
params : dict
optional request query parameters, check developer.oanda.com
for details
Example::
>>> import oandapyV20
>>> import oandapyV20.endpoints.orders as orders
>>> client = oandapyV20.API(access_token=...)
>>> r = orders.OrderList(accountID)
>>> client.request(r)
>>> print r.response
Output::
{_v3_accounts_accountID_orders_list_resp}
"""
super(OrderList, self).__init__(accountID)
self.params = params
@endpoint("v3/accounts/{accountID}/pendingOrders")
class OrdersPending(Orders):
"""List all pending Orders in an Account."""
@dyndoc_insert(responses)
def __init__(self, accountID):
"""Instantiate an OrdersPending request.
Parameters
----------
accountID : string (required)
id of the account to perform the request on.
Example::
>>> import oandapyV20
>>> import oandapyV20.endpoints.orders as orders
>>> client = oandapyV20.API(access_token=...)
>>> r = orders.OrdersPending(accountID)
>>> client.request(r)
>>> print r.response
Output::
{_v3_accounts_accountID_orders_pending_resp}
"""
super(OrdersPending, self).__init__(accountID)
@endpoint("v3/accounts/{accountID}/orders/{orderID}")
class OrderDetails(Orders):
"""Get details for a single Order in an Account."""
@dyndoc_insert(responses)
def __init__(self, accountID, orderID):
"""Instantiate an OrderDetails request.
Parameters
----------
accountID : string (required)
id of the account to perform the request on.
orderID : string (required)
id of the order to perform the request on.
>>> import oandapyV20
>>> import oandapyV20.endpoints.orders as orders
>>> client = oandapyV20.API(access_token=...)
>>> r = orders.OrderDetails(accountID=..., orderID=...)
>>> client.request(r)
>>> print r.response
Output::
{_v3_accounts_accountID_order_details_resp}
"""
super(OrderDetails, self).__init__(accountID, orderID)
@endpoint("v3/accounts/{accountID}/orders/{orderID}", "PUT", 201)
class OrderReplace(Orders):
"""OrderReplace.
Replace an Order in an Account by simultaneously cancelling it and
createing a replacement Order.
"""
HEADERS = {"Content-Type": "application/json"}
@dyndoc_insert(responses)
def __init__(self, accountID, orderID, data):
"""Instantiate an OrderReplace request.
Parameters
----------
accountID : string (required)
id of the account to perform the request on.
orderID : string (required)
id of the order to perform the request on.
data : JSON (required)
json orderbody to send
Orderbody example::
{_v3_accounts_accountID_order_replace_body}
>>> import oandapyV20
>>> import oandapyV20.endpoints.orders as orders
>>> client = oandapyV20.API(access_token=...)
>>> data = {_v3_accounts_accountID_order_replace_body}
>>> r = orders.OrderReplace(accountID=..., orderID=..., data=data)
>>> client.request(r)
>>> print r.response
Output::
{_v3_accounts_accountID_order_replace_resp}
"""
super(OrderReplace, self).__init__(accountID, orderID)
self.data = data
@endpoint("v3/accounts/{accountID}/orders/{orderID}/cancel", "PUT")
class OrderCancel(Orders):
"""Cancel a pending Order in an Account."""
@dyndoc_insert(responses)
def __init__(self, accountID, orderID):
"""Instantiate an OrdersCancel request.
Parameters
----------
accountID : string (required)
id of the account to perform the request on.
orderID : string (required)
id of the account to perform the request on.
Example::
>>> import oandapyV20
>>> import oandapyV20.endpoints.orders as orders
>>> client = oandapyV20.API(access_token=...)
>>> r = orders.OrderCancel(accountID= ..., orderID=...)
>>> client.request(r)
>>> print r.response
Output::
{_v3_accounts_accountID_order_cancel_resp}
"""
super(OrderCancel, self).__init__(accountID, orderID)
@endpoint("v3/accounts/{accountID}/orders/{orderID}/clientExtensions", "PUT")
class OrderClientExtensions(Orders):
"""Update the Client Extensions for an Order in an Account.
.. warning::
Do not set, modify or delete clientExtensions if your account
is associated with MT4.
"""
HEADERS = {"Content-Type": "application/json"}
@dyndoc_insert(responses)
def __init__(self, accountID, orderID, data):
"""Instantiate an OrderCreate request.
Parameters
----------
accountID : string (required)
id of the account to perform the request on.
orderID : string (required)
id of the order to perform the request on.
data : JSON (required)
json orderbody to send
Orderbody example::
{_v3_accounts_accountID_order_clientextensions_body}
>>> import oandapyV20
>>> import oandapyV20.endpoints.orders as orders
>>> client = oandapyV20.API(access_token=...)
>>> r = orders.OrderClientExtensions(accountID, orderID, data=data)
>>> client.request(r)
>>> print r.response
::
{_v3_accounts_accountID_order_clientextensions_resp}
"""
super(OrderClientExtensions, self).__init__(accountID, orderID)
self.data = data
| 27.202658 | 77 | 0.611138 | [
"MIT"
] | Milad137/oanda-api-v20 | oandapyV20/endpoints/orders.py | 8,188 | Python |
from adafruit_circuitplayground.express import cpx
while True:
if cpx.shake(shake_threshold=20):
print("Shake detected!")
| 23.333333 | 51 | 0.714286 | [
"MIT"
] | jadudm/feather-isa | infra/libs-400rc2-20190512/examples/shake_simpletest.py | 140 | Python |
"""
Copyright (c) 2019 Microsoft Corporation. All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import yaml
import argparse
import numpy as np
import os
import sys
import time
import json
import pickle
import torch as th
import torch.nn as nn
import horovod.torch as hvd
#import pykaldi related modules
import kaldi.fstext as kaldi_fst
import kaldi.hmm as kaldi_hmm
import kaldi.matrix as kaldi_matrix
import kaldi.lat as kaldi_lat
import kaldi.decoder as kaldi_decoder
import kaldi.util as kaldi_util
from kaldi.asr import MappedLatticeFasterRecognizer
from kaldi.decoder import LatticeFasterDecoderOptions
from data import SpeechDataset, SeqDataloader
from models import LSTMStack, NnetAM
from ops import ops
from utils import utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-config")
parser.add_argument("-data", help="data yaml file")
parser.add_argument("-data_path", default='', type=str, help="path of data files")
parser.add_argument("-seed_model", help="the seed nerual network model")
parser.add_argument("-exp_dir", help="the directory to save the outputs")
parser.add_argument("-transform", help="feature transformation matrix or mvn statistics")
parser.add_argument("-criterion", type=str, choices=["mmi", "mpfe", "smbr"], help="set the sequence training crtierion")
parser.add_argument("-trans_model", help="the HMM transistion model, used for lattice generation")
parser.add_argument("-prior_path", help="the prior for decoder, usually named as final.occs in kaldi setup")
parser.add_argument("-den_dir", help="the decoding graph directory to find HCLG and words.txt files")
parser.add_argument("-lr", type=float, help="set the learning rate")
parser.add_argument("-ce_ratio", default=0.1, type=float, help="the ratio for ce regularization")
parser.add_argument("-momentum", default=0, type=float, help="set the momentum")
parser.add_argument("-batch_size", default=32, type=int, help="Override the batch size in the config")
parser.add_argument("-data_loader_threads", default=0, type=int, help="number of workers for data loading")
parser.add_argument("-max_grad_norm", default=5, type=float, help="max_grad_norm for gradient clipping")
parser.add_argument("-sweep_size", default=100, type=float, help="process n hours of data per sweep (default:60)")
parser.add_argument("-num_epochs", default=1, type=int, help="number of training epochs (default:1)")
parser.add_argument('-print_freq', default=10, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('-save_freq', default=1000, type=int, metavar='N', help='save model frequency (default: 1000)')
args = parser.parse_args()
with open(args.config) as f:
config = yaml.safe_load(f)
config['data_path'] = args.data_path
config["sweep_size"] = args.sweep_size
print("pytorch version:{}".format(th.__version__))
with open(args.data) as f:
data = yaml.safe_load(f)
config["source_paths"] = [j for i, j in data['clean_source'].items()]
print("Experiment starts with config {}".format(json.dumps(config, sort_keys=True, indent=4)))
# Initialize Horovod
hvd.init()
th.cuda.set_device(hvd.local_rank())
print("Run experiments with world size {}".format(hvd.size()))
dataset = SpeechDataset(config)
transform=None
if args.transform is not None and os.path.isfile(args.transform):
with open(args.transform, 'rb') as f:
transform = pickle.load(f)
dataset.transform = transform
train_dataloader = SeqDataloader(dataset,
batch_size=args.batch_size,
num_workers = args.data_loader_threads,
distributed=True,
test_only=False)
print("Data loader set up successfully!")
print("Number of minibatches: {}".format(len(train_dataloader)))
if not os.path.isdir(args.exp_dir):
os.makedirs(args.exp_dir)
# ceate model
model_config = config["model_config"]
lstm = LSTMStack(model_config["feat_dim"], model_config["hidden_size"], model_config["num_layers"], model_config["dropout"], True)
model = NnetAM(lstm, model_config["hidden_size"]*2, model_config["label_size"])
model.cuda()
# setup the optimizer
optimizer = th.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
# Broadcast parameters and opterimizer state from rank 0 to all other processes.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Add Horovod Distributed Optimizer
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
if os.path.isfile(args.seed_model):
checkpoint = th.load(args.seed_model)
state_dict = checkpoint['model']
model.load_state_dict(state_dict)
print("=> loaded checkpoint '{}' ".format(args.seed_model))
else:
sys.stderr.write('ERROR: The model file %s does not exist!\n'%(model_file))
sys.exit(0)
HCLG = args.den_dir + "/HCLG.fst"
words_txt = args.den_dir + "/words.txt"
silence_phones = args.den_dir + "/phones/silence.csl"
if not os.path.isfile(HCLG):
sys.stderr.write('ERROR: The HCLG file %s does not exist!\n'%(HCLG))
sys.exit(0)
if not os.path.isfile(words_txt):
sys.stderr.write('ERROR: The words.txt file %s does not exist!\n'%(words_txt))
sys.exit(0)
if not os.path.isfile(silence_phones):
sys.stderr.write('ERROR: The silence phone file %s does not exist!\n'%(silence_phones))
sys.exit(0)
with open(silence_phones) as f:
silence_ids = [int(i) for i in f.readline().strip().split(':')]
f.close()
if os.path.isfile(args.trans_model):
trans_model = kaldi_hmm.TransitionModel()
with kaldi_util.io.xopen(args.trans_model) as ki:
trans_model.read(ki.stream(), ki.binary)
else:
sys.stderr.write('ERROR: The trans_model %s does not exist!\n'%(args.trans_model))
sys.exit(0)
# now we can setup the decoder
decoder_opts = LatticeFasterDecoderOptions()
decoder_opts.beam = config["decoder_config"]["beam"]
decoder_opts.lattice_beam = config["decoder_config"]["lattice_beam"]
decoder_opts.max_active = config["decoder_config"]["max_active"]
acoustic_scale = config["decoder_config"]["acoustic_scale"]
decoder_opts.determinize_lattice = False #To produce raw state-level lattice instead of compact lattice
asr_decoder = MappedLatticeFasterRecognizer.from_files(
args.trans_model, HCLG, words_txt,
acoustic_scale=acoustic_scale, decoder_opts=decoder_opts)
prior = kaldi_util.io.read_matrix(args.prior_path).numpy()
log_prior = th.tensor(np.log(prior[0]/np.sum(prior[0])), dtype=th.float)
model.train()
for epoch in range(args.num_epochs):
run_train_epoch(model, optimizer,
log_prior.cuda(),
train_dataloader,
epoch,
asr_decoder,
trans_model,
silence_ids,
args)
# save model
if hvd.rank() == 0:
checkpoint={}
checkpoint['model']=model.state_dict()
checkpoint['optimizer']=optimizer.state_dict()
checkpoint['epoch']=epoch
output_file=args.exp_dir + '/model.se.'+ str(epoch) +'.tar'
th.save(checkpoint, output_file)
def run_train_epoch(model, optimizer, log_prior, dataloader, epoch, asr_decoder, trans_model, silence_ids, args):
batch_time = utils.AverageMeter('Time', ':6.3f')
losses = utils.AverageMeter('Loss', ':.4e')
grad_norm = utils.AverageMeter('grad_norm', ':.4e')
progress = utils.ProgressMeter(len(dataloader), batch_time, losses, grad_norm,
prefix="Epoch: [{}]".format(epoch))
ce_criterion = nn.CrossEntropyLoss(ignore_index=-100, reduction='sum')
if args.criterion == "mmi":
se_criterion = ops.MMIFunction.apply
else:
se_criterion = ops.sMBRFunction.apply
end = time.time()
for i, batch in enumerate(dataloader, 0):
feat = batch["x"]
label = batch["y"] #pdf-ids for ce loss
num_frs = batch["num_frs"]
utt_ids = batch["utt_ids"]
aux = batch["aux"] #trans_ids for se loss
x = feat.to(th.float32)
y = label.long()
x = x.cuda()
y = y.cuda()
prediction = model(x)
ce_loss = ce_criterion(prediction.view(-1, prediction.shape[2]), y.view(-1))
se_loss = 0.0
for j in range(len(num_frs)):
log_like_j=prediction[j,:,:]
log_like_j= log_like_j[:num_frs[j],:]
log_like_j = log_like_j - log_prior
#trans_id = label[j, :num_frs[j], 0].tolist()
trans_id = th.from_numpy(aux[j][0][0].astype(int)).tolist()
# print(len(trans_id), num_frs[j])
if args.criterion == "mmi":
se_loss += se_criterion(log_like_j, asr_decoder, trans_model, trans_id)
else:
se_loss += se_criterion(log_like_j, asr_decoder, trans_model, trans_id, args.criterion, silence_ids)
loss = se_loss.cuda() + args.ce_ratio * ce_loss
optimizer.zero_grad()
loss.backward()
# Gradient Clipping (th 5.0)
norm = nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
grad_norm.update(norm)
# update loss
tot_frs = np.array(num_frs).sum()
losses.update(loss.item()/tot_frs)
# measure elapsed time
batch_time.update(time.time() - end)
# save model
if hvd.rank() == 0 and i % args.save_freq == 0:
checkpoint={}
checkpoint['model']=model.state_dict()
checkpoint['optimizer']=optimizer.state_dict()
output_file=args.exp_dir + '/model.se.'+ str(i) +'.tar'
th.save(checkpoint, output_file)
if hvd.rank() == 0 and i % args.print_freq == 0:
progress.print(i)
if __name__ == '__main__':
main()
| 46.067376 | 158 | 0.583404 | [
"MIT"
] | gaochangfeng/pykaldi2 | bin/train_se.py | 12,991 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Ftrl optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import gradient_descent
class FtrlOptimizerTest(XLATestCase):
def initVariableAndGradient(self, dtype):
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.02, 0.04], dtype=dtype)
return var0, var1, grads0, grads1
def equivAdagradTest_FtrlPart(self, steps, dtype):
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
opt = ftrl.FtrlOptimizer(
3.0,
learning_rate_power=-0.5, # using Adagrad learning rate
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], var0.eval())
self.assertAllClose([0.0, 0.0], var1.eval())
# Run Ftrl for a few steps
for _ in range(steps):
ftrl_update.run()
return var0.eval(), var1.eval()
def equivAdagradTest_AdagradPart(self, steps, dtype):
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
adagrad_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], var0.eval())
self.assertAllClose([0.0, 0.0], var1.eval())
# Run Adagrad for a few steps
for _ in range(steps):
adagrad_update.run()
return var0.eval(), var1.eval()
def equivGradientDescentTest_FtrlPart(self, steps, dtype):
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
opt = ftrl.FtrlOptimizer(
3.0,
learning_rate_power=-0.0, # using Fixed learning rate
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], var0.eval())
self.assertAllClose([0.0, 0.0], var1.eval())
# Run Ftrl for a few steps
for _ in range(steps):
ftrl_update.run()
return var0.eval(), var1.eval()
def equivGradientDescentTest_GradientDescentPart(self, steps, dtype):
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
opt = gradient_descent.GradientDescentOptimizer(3.0, name="sgd")
sgd_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], var0.eval())
self.assertAllClose([0.0, 0.0], var1.eval())
# Run GradientDescent for a few steps
for _ in range(steps):
sgd_update.run()
return var0.eval(), var1.eval()
def testFtrlwithoutRegularization(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], var0.eval())
self.assertAllClose([0.0, 0.0], var1.eval())
# Run 3 steps FTRL
for _ in range(3):
ftrl_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-2.60260963, -4.29698515]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([-0.28432083, -0.56694895]), var1.eval())
def testFtrlwithoutRegularization2(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([4.0, 3.0], var1.eval())
# Run 3 steps FTRL
for _ in range(3):
ftrl_update.run()
# Validate updated params
self.assertAllClose(
np.array([-2.55607247, -3.98729396]), var0.eval(), 1e-5, 1e-5)
self.assertAllClose(
np.array([-0.28232238, -0.56096673]), var1.eval(), 1e-5, 1e-5)
def testFtrlWithL1(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([4.0, 3.0], var1.eval())
# Run 10 steps FTRL
for _ in range(10):
ftrl_update.run()
# Validate updated params
self.assertAllClose(np.array([-7.66718769, -10.91273689]), var0.eval())
self.assertAllClose(np.array([-0.93460727, -1.86147261]), var1.eval())
def testFtrlWithL1_L2(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([4.0, 3.0], var1.eval())
# Run 10 steps FTRL
for _ in range(10):
ftrl_update.run()
# Validate updated params
self.assertAllClose(np.array([-0.24059935, -0.46829352]), var0.eval())
self.assertAllClose(np.array([-0.02406147, -0.04830509]), var1.eval())
def testFtrlWithL1_L2_L2Shrinkage(self):
"""Test the new FTRL op with support for l2 shrinkage.
The addition of this parameter which places a constant pressure on weights
towards the origin causes the gradient descent trajectory to differ. The
weights will tend to have smaller magnitudes with this parameter set.
"""
for dtype in self.float_types:
with self.test_session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([4.0, 3.0], var1.eval())
# Run 10 steps FTRL
for _ in range(10):
ftrl_update.run()
# Validate updated params
self.assertAllClose(np.array([-0.21931979, -0.40642974]), var0.eval())
self.assertAllClose(np.array([-0.0282721, -0.07188385]), var1.eval())
# When variables are initialized with Zero, FTRL-Proximal has two properties:
# 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical
# with GradientDescent.
# 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is idential
# with Adagrad.
# So, basing on these two properties, we test if our implementation of
# FTRL-Proximal performs same updates as Adagrad or GradientDescent.
def testEquivAdagradwithoutRegularization(self):
steps = 5
for dtype in self.float_types:
with self.test_session(), self.test_scope():
val0, val1 = self.equivAdagradTest_FtrlPart(steps, dtype)
with self.test_session(), self.test_scope():
val2, val3 = self.equivAdagradTest_AdagradPart(steps, dtype)
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
def testEquivGradientDescentwithoutRegularization(self):
steps = 5
for dtype in self.float_types:
with self.test_session(), self.test_scope():
val0, val1 = self.equivGradientDescentTest_FtrlPart(steps, dtype)
with self.test_session(), self.test_scope():
val2, val3 = self.equivGradientDescentTest_GradientDescentPart(
steps, dtype)
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
if __name__ == "__main__":
test.main()
| 41.602787 | 80 | 0.673283 | [
"Apache-2.0"
] | 18802459097/tensorflow | tensorflow/compiler/tests/ftrl_test.py | 11,940 | Python |
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import external_process
from neutron.agent.linux import keepalived
from neutron.openstack.common import log as logging
from neutron.tests.functional import base as functional_base
from neutron.tests.unit.agent.linux import test_keepalived
LOG = logging.getLogger(__name__)
class KeepalivedManagerTestCase(functional_base.BaseSudoTestCase,
test_keepalived.KeepalivedConfBaseMixin):
def setUp(self):
super(KeepalivedManagerTestCase, self).setUp()
self.check_sudo_enabled()
self._configure()
def _configure(self):
cfg.CONF.set_override('debug', True)
config.setup_logging()
config.register_root_helper(cfg.CONF)
cfg.CONF.set_override('root_helper', self.root_helper, group='AGENT')
def test_keepalived_spawn(self):
expected_config = self._get_config()
manager = keepalived.KeepalivedManager('router1', expected_config,
conf_path=cfg.CONF.state_path,
root_helper=self.root_helper)
self.addCleanup(manager.disable)
manager.spawn()
process = external_process.ProcessManager(
cfg.CONF,
'router1',
self.root_helper,
namespace=None,
pids_path=cfg.CONF.state_path)
self.assertTrue(process.active)
config_path = manager._get_full_config_file_path('keepalived.conf')
with open(config_path, 'r') as config_file:
config_contents = config_file.read()
self.assertEqual(expected_config.get_config_str(), config_contents)
| 38.95082 | 78 | 0.683502 | [
"Apache-2.0"
] | JackyGao2016/OpenStack-ML2 | neutron/tests/functional/agent/linux/test_keepalived.py | 2,376 | Python |
from pyquilted.quilted.section import Section
class Work(Section):
"""The work section in a quilted resume
The work object is a complex section. It contains blocks of jobs
and optionally a list of slugs. As a section it mixes in the
sectionable functionality.
"""
def __init__(self, blocks=None, slugs=None, icon=None):
self.label = 'Work'
self.icon = icon or 'fa-briefcase'
self.blocks = blocks or []
self.compact = False
def add_job(self, job):
self.blocks.append(vars(job))
def add_slugs(self, slugs):
self.slugs = slugs
class Job:
"""The job block in the work section"""
def __init__(self, dates=None, location=None, company=None, title=None,
slugs=None, previously=None, **kwargs):
self.dates = dates
self.location = location
self.company = company
self.title = title
self.slugs = slugs
self.history = History(previously=previously).to_dict()
class Slugs():
"""The additional list of slugs in the work section"""
def __init__(self, slugs=None):
self.blocks = slugs
class History():
def __init__(self, previously=None):
self.previously = previously
def to_dict(self):
if self.previously:
return vars(self)
return None
| 27.12 | 75 | 0.627581 | [
"MIT"
] | cocoroutine/pyquilted | pyquilted/quilted/work.py | 1,356 | Python |
# Generated by Django 2.2 on 2019-05-02 16:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openbook_communities', '0021_auto_20190502_1754'),
]
operations = [
migrations.AddIndex(
model_name='communitymembership',
index=models.Index(fields=['community', 'user'], name='openbook_co_communi_59b23c_idx'),
),
]
| 23.944444 | 100 | 0.651972 | [
"MIT"
] | OkunaOrg/okuna-api | openbook_communities/migrations/0022_auto_20190502_1804.py | 431 | Python |
"""Collect and parse kobo forms."""
from datetime import datetime, timedelta, timezone
from os import getenv
from typing import Dict, List
from dateutil.parser import parse as dtparser
from flask import request
import requests
from werkzeug.exceptions import BadRequest, InternalServerError, NotFound
def get_kobo_params():
"""Collect and validate request parameters and environment variables."""
kobo_username = getenv('KOBO_USERNAME')
if kobo_username is None:
raise InternalServerError('Missing backend parameter: KOBO_USERNAME')
kobo_pw = getenv('KOBO_PW')
if kobo_pw is None:
raise InternalServerError('Missing backend parameter: KOBO_PW')
form_name = request.args.get('formName')
if form_name is None:
raise BadRequest('Missing query parameter: formName')
datetime_field = request.args.get('datetimeField')
if datetime_field is None:
raise BadRequest('Missing parameter datetimeField')
geom_field = request.args.get('geomField')
if geom_field is None:
raise BadRequest('Missing parameter: geomField')
filters = {}
filters_params = request.args.get('filters', None)
if filters_params is not None:
filters = dict([f.split('=') for f in filters_params.split(',')])
form_fields = dict(name=form_name,
datetime=datetime_field,
geom=geom_field,
filters=filters)
auth = (kobo_username, kobo_pw)
return auth, form_fields
def parse_form_field(value: str, field_type: str):
"""Parse strings into type according to field_type provided."""
if field_type == 'decimal':
return float(value)
elif field_type == 'integer':
return int(value)
elif field_type in ('datetime', 'date'):
return dtparser(value).astimezone(timezone.utc)
elif field_type == 'geopoint':
lat, lon, _, _ = value.split(' ')
return {'lat': float(lat), 'lon': float(lon)}
else:
return value
def parse_form_response(form_dict: Dict[str, str], form_fields: Dict[str, str], labels: List[str]):
"""Transform a Kobo form dictionary into a format that is used by the frontend."""
form_data = {k: parse_form_field(form_dict.get(k), v) for k, v in labels.items()
if k not in (form_fields.get('geom'), form_fields.get('datetime'))}
datetime_field = form_fields.get('datetime')
datetime_value = parse_form_field(form_dict.get(datetime_field), labels.get(datetime_field))
geom_field = form_fields.get('geom')
latlon_dict = parse_form_field(form_dict.get(geom_field), labels.get(geom_field))
status = form_dict.get('_validation_status').get('label', None)
form_data = {**form_data, **latlon_dict, 'date': datetime_value, 'status': status}
return form_data
def parse_datetime_params():
"""Transform into datetime objects used for filtering form responses."""
begin_datetime_str = request.args.get('beginDateTime', '2000-01-01')
begin_datetime = dtparser(begin_datetime_str).replace(tzinfo=timezone.utc)
end_datetime_str = request.args.get('endDateTime')
if end_datetime_str is not None:
end_datetime = dtparser(end_datetime_str)
else:
# 10 years.
end_datetime = datetime.now() + timedelta(days=365 * 10)
end_datetime = end_datetime.replace(tzinfo=timezone.utc)
# strptime function includes hours, minutes, and seconds as 00 by default.
# This check is done in case the begin and end datetime values are the same.
if end_datetime == begin_datetime:
end_datetime = end_datetime + timedelta(days=1)
if begin_datetime > end_datetime:
raise BadRequest('beginDateTime value must be lower than endDateTime')
return begin_datetime, end_datetime
def get_responses_from_kobo(auth, form_name):
"""
Request kobo api to collect all the information related to a form.
Also, retrieve the form responses for parsing and filtering.
"""
form_url = request.args.get('koboUrl')
if form_url is None:
raise BadRequest('Missing parameter koboUrl')
resp = requests.get(form_url, auth=auth)
resp.raise_for_status()
kobo_user_metadata = resp.json()
# Find form and get results.
forms_iterator = (d for d in kobo_user_metadata.get('results') if d.get('name') == form_name)
form_metadata = next(forms_iterator, None)
if form_metadata is None:
raise NotFound('Form not found')
# Additional request to get label mappings.
resp = requests.get(form_metadata.get('url'), auth=auth)
resp.raise_for_status()
form_metadata = resp.json()
# Get form fields and field type used for parsing.
form_labels = {f.get('$autoname'): f.get('type') for f
in form_metadata.get('content').get('survey')}
# Get all form responses using metadata 'data' key
resp = requests.get(form_metadata.get('data'), auth=auth)
resp.raise_for_status()
form_responses = resp.json().get('results')
return form_responses, form_labels
def get_form_responses(begin_datetime, end_datetime):
"""Get all form responses using Kobo api."""
auth, form_fields = get_kobo_params()
form_responses, form_labels = get_responses_from_kobo(auth, form_fields.get('name'))
forms = [parse_form_response(f, form_fields, form_labels) for f in form_responses]
filtered_forms = []
for form in forms:
date_value = form.get('date')
conditions = [form.get(k) == v for k, v in form_fields.get('filters').items()]
conditions.append(begin_datetime <= date_value)
conditions.append(date_value < end_datetime)
if all(conditions) is False:
continue
filtered_forms.append(form)
sorted_forms = sorted(filtered_forms, key=lambda x: x.get('date'))
# Transform date into string.
sorted_forms = [{**f, 'date': f.get('date').date().isoformat()} for f in sorted_forms]
return sorted_forms
| 34.136364 | 99 | 0.687916 | [
"MIT"
] | tdlinh2712/prism-frontend | api-flask/app/kobo.py | 6,008 | Python |
import logging
import ldap
import six
from collections import Mapping, Iterable
from ldap import modlist
from nodeconductor.structure import ServiceBackend, ServiceBackendError
logger = logging.getLogger(__name__)
class LDAPBackendError(ServiceBackendError):
pass
class UnauthorizedError(LDAPBackendError):
pass
class LDAPBackend(ServiceBackend):
"""
Interface to LDAP API.
https://www.python-ldap.org/doc/html/
"""
def __init__(self, settings):
self.settings = settings
self.user_base_dn = settings.options.get('user_base_dn', '')
self.client = self._get_client()
def _get_client(self):
username = ','.join(['uid=%s' % self.settings.username, self.user_base_dn])
try:
client = ldap.initialize(self.settings.backend_url)
client.simple_bind_s(username, self.settings.password)
except ldap.LDAPError as e:
six.reraise(UnauthorizedError, e)
return client
def ping(self, raise_exception=False):
tries_count = 3
for _ in range(tries_count):
try:
self.client.search_s(self.user_base_dn, ldap.SCOPE_SUBTREE)
except ldap.LDAPError as e:
if raise_exception:
six.reraise(LDAPBackendError, e)
else:
return True
return False
def sync(self):
pass
def create_ldap_user(self, ldap_user):
dn = ('uid=%s,' % ldap_user.name) + self.user_base_dn
# python-ldap rises TypeError if unicode strings are used.
data = modlist.addModlist(self._unicode_to_string(ldap_user.attributes))
try:
self.client.add_s(dn, data)
except ldap.LDAPError as e:
six.reraise(LDAPBackendError, e)
else:
ldap_user.backend_id = dn
ldap_user.save(update_fields=['backend_id'])
def delete_ldap_user(self, ldap_user):
dn = ldap_user.backend_id
try:
# XXX: Change to disabling user instead
self.client.delete_s(dn)
except ldap.LDAPError as e:
six.reraise(LDAPBackendError, e)
def _unicode_to_string(self, data):
# http://stackoverflow.com/a/1254499/4591416
if isinstance(data, basestring):
return str(data)
elif isinstance(data, Mapping):
return dict(map(self._unicode_to_string, data.iteritems()))
elif isinstance(data, Iterable):
return type(data)(map(self._unicode_to_string, data))
else:
return data
| 28.56044 | 83 | 0.629088 | [
"MIT"
] | opennode/nodeconductor-ldap | src/nodeconductor_ldap/backend.py | 2,599 | Python |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import flow_action_list
class flow_id(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-openflow-operational - based on the path /openflow-state/flow-id. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__flow_id','__priority','__status','__in_port','__in_vlan','__source_mac','__destination_mac','__ether_type','__ip_protocol','__ip_protocol_source_port','__ip_protocol_destination_port','__source_ip','__destination_ip','__source_ipv6','__destination_ipv6','__instructions','__action_data','__meter_id','__vlan_upbits','__nw_tos','__source_ip_mask','__destination_ip_mask','__total_packets','__total_bytes','__flow_action_list',)
_yang_name = 'flow-id'
_rest_name = 'flow-id'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__ether_type = YANGDynClass(base=unicode, is_leaf=True, yang_name="ether-type", rest_name="ether-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__destination_ipv6 = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ipv6", rest_name="destination-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__total_bytes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-bytes", rest_name="total-bytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False)
self.__ip_protocol_destination_port = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-destination-port", rest_name="ip-protocol-destination-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
self.__action_data = YANGDynClass(base=unicode, is_leaf=True, yang_name="action-data", rest_name="action-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__ip_protocol_source_port = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-source-port", rest_name="ip-protocol-source-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
self.__priority = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
self.__total_packets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-packets", rest_name="total-packets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False)
self.__source_ipv6 = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ipv6", rest_name="source-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__flow_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="flow-id", rest_name="flow-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
self.__destination_mac = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-mac", rest_name="destination-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__in_port = YANGDynClass(base=unicode, is_leaf=True, yang_name="in-port", rest_name="in-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__status = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-flow-pending-modify': {'value': 3}, u'dcm-flow-programmed': {'value': 4}, u'dcm-flow-pending-add': {'value': 1}, u'dcm-flow-pending-delete': {'value': 2}, u'dcm-flow-not-programmed': {'value': 0}},), is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='flow-status', is_config=False)
self.__destination_ip = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ip", rest_name="destination-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__ip_protocol = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol", rest_name="ip-protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
self.__flow_action_list = YANGDynClass(base=YANGListType("action_idx",flow_action_list.flow_action_list, yang_name="flow-action-list", rest_name="flow-action-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action-idx', extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}), is_container='list', yang_name="flow-action-list", rest_name="flow-action-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='list', is_config=False)
self.__source_mac = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-mac", rest_name="source-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__nw_tos = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="nw-tos", rest_name="nw-tos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
self.__meter_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="meter-id", rest_name="meter-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
self.__instructions = YANGDynClass(base=unicode, is_leaf=True, yang_name="instructions", rest_name="instructions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__in_vlan = YANGDynClass(base=unicode, is_leaf=True, yang_name="in-vlan", rest_name="in-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__source_ip = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ip", rest_name="source-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__source_ip_mask = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ip-mask", rest_name="source-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__destination_ip_mask = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ip-mask", rest_name="destination-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__vlan_upbits = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vlan-upbits", rest_name="vlan-upbits", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'openflow-state', u'flow-id']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'openflow-state', u'flow-id']
def _get_flow_id(self):
"""
Getter method for flow_id, mapped from YANG variable /openflow_state/flow_id/flow_id (uint32)
YANG Description: Flow ID
"""
return self.__flow_id
def _set_flow_id(self, v, load=False):
"""
Setter method for flow_id, mapped from YANG variable /openflow_state/flow_id/flow_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_flow_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flow_id() directly.
YANG Description: Flow ID
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="flow-id", rest_name="flow-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """flow_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="flow-id", rest_name="flow-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""",
})
self.__flow_id = t
if hasattr(self, '_set'):
self._set()
def _unset_flow_id(self):
self.__flow_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="flow-id", rest_name="flow-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
def _get_priority(self):
"""
Getter method for priority, mapped from YANG variable /openflow_state/flow_id/priority (uint32)
YANG Description: Priority
"""
return self.__priority
def _set_priority(self, v, load=False):
"""
Setter method for priority, mapped from YANG variable /openflow_state/flow_id/priority (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority() directly.
YANG Description: Priority
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """priority must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""",
})
self.__priority = t
if hasattr(self, '_set'):
self._set()
def _unset_priority(self):
self.__priority = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
def _get_status(self):
"""
Getter method for status, mapped from YANG variable /openflow_state/flow_id/status (flow-status)
YANG Description: Status
"""
return self.__status
def _set_status(self, v, load=False):
"""
Setter method for status, mapped from YANG variable /openflow_state/flow_id/status (flow-status)
If this variable is read-only (config: false) in the
source YANG file, then _set_status is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_status() directly.
YANG Description: Status
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-flow-pending-modify': {'value': 3}, u'dcm-flow-programmed': {'value': 4}, u'dcm-flow-pending-add': {'value': 1}, u'dcm-flow-pending-delete': {'value': 2}, u'dcm-flow-not-programmed': {'value': 0}},), is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='flow-status', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """status must be of a type compatible with flow-status""",
'defined-type': "brocade-openflow-operational:flow-status",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-flow-pending-modify': {'value': 3}, u'dcm-flow-programmed': {'value': 4}, u'dcm-flow-pending-add': {'value': 1}, u'dcm-flow-pending-delete': {'value': 2}, u'dcm-flow-not-programmed': {'value': 0}},), is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='flow-status', is_config=False)""",
})
self.__status = t
if hasattr(self, '_set'):
self._set()
def _unset_status(self):
self.__status = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-flow-pending-modify': {'value': 3}, u'dcm-flow-programmed': {'value': 4}, u'dcm-flow-pending-add': {'value': 1}, u'dcm-flow-pending-delete': {'value': 2}, u'dcm-flow-not-programmed': {'value': 0}},), is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='flow-status', is_config=False)
def _get_in_port(self):
"""
Getter method for in_port, mapped from YANG variable /openflow_state/flow_id/in_port (string)
YANG Description: In Port
"""
return self.__in_port
def _set_in_port(self, v, load=False):
"""
Setter method for in_port, mapped from YANG variable /openflow_state/flow_id/in_port (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_port() directly.
YANG Description: In Port
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="in-port", rest_name="in-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """in_port must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="in-port", rest_name="in-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__in_port = t
if hasattr(self, '_set'):
self._set()
def _unset_in_port(self):
self.__in_port = YANGDynClass(base=unicode, is_leaf=True, yang_name="in-port", rest_name="in-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_in_vlan(self):
"""
Getter method for in_vlan, mapped from YANG variable /openflow_state/flow_id/in_vlan (string)
YANG Description: In Vlan
"""
return self.__in_vlan
def _set_in_vlan(self, v, load=False):
"""
Setter method for in_vlan, mapped from YANG variable /openflow_state/flow_id/in_vlan (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_in_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_in_vlan() directly.
YANG Description: In Vlan
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="in-vlan", rest_name="in-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """in_vlan must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="in-vlan", rest_name="in-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__in_vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_in_vlan(self):
self.__in_vlan = YANGDynClass(base=unicode, is_leaf=True, yang_name="in-vlan", rest_name="in-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_source_mac(self):
"""
Getter method for source_mac, mapped from YANG variable /openflow_state/flow_id/source_mac (string)
YANG Description: Source Mac
"""
return self.__source_mac
def _set_source_mac(self, v, load=False):
"""
Setter method for source_mac, mapped from YANG variable /openflow_state/flow_id/source_mac (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_source_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_source_mac() directly.
YANG Description: Source Mac
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="source-mac", rest_name="source-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """source_mac must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="source-mac", rest_name="source-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__source_mac = t
if hasattr(self, '_set'):
self._set()
def _unset_source_mac(self):
self.__source_mac = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-mac", rest_name="source-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_destination_mac(self):
"""
Getter method for destination_mac, mapped from YANG variable /openflow_state/flow_id/destination_mac (string)
YANG Description: Destination Mac
"""
return self.__destination_mac
def _set_destination_mac(self, v, load=False):
"""
Setter method for destination_mac, mapped from YANG variable /openflow_state/flow_id/destination_mac (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_destination_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_destination_mac() directly.
YANG Description: Destination Mac
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="destination-mac", rest_name="destination-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """destination_mac must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-mac", rest_name="destination-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__destination_mac = t
if hasattr(self, '_set'):
self._set()
def _unset_destination_mac(self):
self.__destination_mac = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-mac", rest_name="destination-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_ether_type(self):
"""
Getter method for ether_type, mapped from YANG variable /openflow_state/flow_id/ether_type (string)
YANG Description: Ether type
"""
return self.__ether_type
def _set_ether_type(self, v, load=False):
"""
Setter method for ether_type, mapped from YANG variable /openflow_state/flow_id/ether_type (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_ether_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ether_type() directly.
YANG Description: Ether type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="ether-type", rest_name="ether-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ether_type must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="ether-type", rest_name="ether-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__ether_type = t
if hasattr(self, '_set'):
self._set()
def _unset_ether_type(self):
self.__ether_type = YANGDynClass(base=unicode, is_leaf=True, yang_name="ether-type", rest_name="ether-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_ip_protocol(self):
"""
Getter method for ip_protocol, mapped from YANG variable /openflow_state/flow_id/ip_protocol (uint32)
YANG Description: IP Protocol
"""
return self.__ip_protocol
def _set_ip_protocol(self, v, load=False):
"""
Setter method for ip_protocol, mapped from YANG variable /openflow_state/flow_id/ip_protocol (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip_protocol is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip_protocol() directly.
YANG Description: IP Protocol
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol", rest_name="ip-protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ip_protocol must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol", rest_name="ip-protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""",
})
self.__ip_protocol = t
if hasattr(self, '_set'):
self._set()
def _unset_ip_protocol(self):
self.__ip_protocol = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol", rest_name="ip-protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
def _get_ip_protocol_source_port(self):
"""
Getter method for ip_protocol_source_port, mapped from YANG variable /openflow_state/flow_id/ip_protocol_source_port (uint32)
YANG Description: IP Protocol Source Port
"""
return self.__ip_protocol_source_port
def _set_ip_protocol_source_port(self, v, load=False):
"""
Setter method for ip_protocol_source_port, mapped from YANG variable /openflow_state/flow_id/ip_protocol_source_port (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip_protocol_source_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip_protocol_source_port() directly.
YANG Description: IP Protocol Source Port
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-source-port", rest_name="ip-protocol-source-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ip_protocol_source_port must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-source-port", rest_name="ip-protocol-source-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""",
})
self.__ip_protocol_source_port = t
if hasattr(self, '_set'):
self._set()
def _unset_ip_protocol_source_port(self):
self.__ip_protocol_source_port = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-source-port", rest_name="ip-protocol-source-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
def _get_ip_protocol_destination_port(self):
"""
Getter method for ip_protocol_destination_port, mapped from YANG variable /openflow_state/flow_id/ip_protocol_destination_port (uint32)
YANG Description: IP Protocol Destination Port
"""
return self.__ip_protocol_destination_port
def _set_ip_protocol_destination_port(self, v, load=False):
"""
Setter method for ip_protocol_destination_port, mapped from YANG variable /openflow_state/flow_id/ip_protocol_destination_port (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip_protocol_destination_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip_protocol_destination_port() directly.
YANG Description: IP Protocol Destination Port
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-destination-port", rest_name="ip-protocol-destination-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ip_protocol_destination_port must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-destination-port", rest_name="ip-protocol-destination-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""",
})
self.__ip_protocol_destination_port = t
if hasattr(self, '_set'):
self._set()
def _unset_ip_protocol_destination_port(self):
self.__ip_protocol_destination_port = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip-protocol-destination-port", rest_name="ip-protocol-destination-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
def _get_source_ip(self):
"""
Getter method for source_ip, mapped from YANG variable /openflow_state/flow_id/source_ip (string)
YANG Description: Source IPv4
"""
return self.__source_ip
def _set_source_ip(self, v, load=False):
"""
Setter method for source_ip, mapped from YANG variable /openflow_state/flow_id/source_ip (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_source_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_source_ip() directly.
YANG Description: Source IPv4
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="source-ip", rest_name="source-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """source_ip must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ip", rest_name="source-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__source_ip = t
if hasattr(self, '_set'):
self._set()
def _unset_source_ip(self):
self.__source_ip = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ip", rest_name="source-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_destination_ip(self):
"""
Getter method for destination_ip, mapped from YANG variable /openflow_state/flow_id/destination_ip (string)
YANG Description: Destination IPv4
"""
return self.__destination_ip
def _set_destination_ip(self, v, load=False):
"""
Setter method for destination_ip, mapped from YANG variable /openflow_state/flow_id/destination_ip (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_destination_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_destination_ip() directly.
YANG Description: Destination IPv4
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="destination-ip", rest_name="destination-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """destination_ip must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ip", rest_name="destination-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__destination_ip = t
if hasattr(self, '_set'):
self._set()
def _unset_destination_ip(self):
self.__destination_ip = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ip", rest_name="destination-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_source_ipv6(self):
"""
Getter method for source_ipv6, mapped from YANG variable /openflow_state/flow_id/source_ipv6 (string)
YANG Description: Source IPv6 Address
"""
return self.__source_ipv6
def _set_source_ipv6(self, v, load=False):
"""
Setter method for source_ipv6, mapped from YANG variable /openflow_state/flow_id/source_ipv6 (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_source_ipv6 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_source_ipv6() directly.
YANG Description: Source IPv6 Address
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="source-ipv6", rest_name="source-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """source_ipv6 must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ipv6", rest_name="source-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__source_ipv6 = t
if hasattr(self, '_set'):
self._set()
def _unset_source_ipv6(self):
self.__source_ipv6 = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ipv6", rest_name="source-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_destination_ipv6(self):
"""
Getter method for destination_ipv6, mapped from YANG variable /openflow_state/flow_id/destination_ipv6 (string)
YANG Description: Destination IPv6 Address
"""
return self.__destination_ipv6
def _set_destination_ipv6(self, v, load=False):
"""
Setter method for destination_ipv6, mapped from YANG variable /openflow_state/flow_id/destination_ipv6 (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_destination_ipv6 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_destination_ipv6() directly.
YANG Description: Destination IPv6 Address
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="destination-ipv6", rest_name="destination-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """destination_ipv6 must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ipv6", rest_name="destination-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__destination_ipv6 = t
if hasattr(self, '_set'):
self._set()
def _unset_destination_ipv6(self):
self.__destination_ipv6 = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ipv6", rest_name="destination-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_instructions(self):
"""
Getter method for instructions, mapped from YANG variable /openflow_state/flow_id/instructions (string)
YANG Description: Instructions
"""
return self.__instructions
def _set_instructions(self, v, load=False):
"""
Setter method for instructions, mapped from YANG variable /openflow_state/flow_id/instructions (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_instructions is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_instructions() directly.
YANG Description: Instructions
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="instructions", rest_name="instructions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """instructions must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="instructions", rest_name="instructions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__instructions = t
if hasattr(self, '_set'):
self._set()
def _unset_instructions(self):
self.__instructions = YANGDynClass(base=unicode, is_leaf=True, yang_name="instructions", rest_name="instructions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_action_data(self):
"""
Getter method for action_data, mapped from YANG variable /openflow_state/flow_id/action_data (string)
YANG Description: Action
"""
return self.__action_data
def _set_action_data(self, v, load=False):
"""
Setter method for action_data, mapped from YANG variable /openflow_state/flow_id/action_data (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_action_data is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_action_data() directly.
YANG Description: Action
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="action-data", rest_name="action-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """action_data must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="action-data", rest_name="action-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__action_data = t
if hasattr(self, '_set'):
self._set()
def _unset_action_data(self):
self.__action_data = YANGDynClass(base=unicode, is_leaf=True, yang_name="action-data", rest_name="action-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_meter_id(self):
"""
Getter method for meter_id, mapped from YANG variable /openflow_state/flow_id/meter_id (uint32)
YANG Description: Meter id
"""
return self.__meter_id
def _set_meter_id(self, v, load=False):
"""
Setter method for meter_id, mapped from YANG variable /openflow_state/flow_id/meter_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_meter_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_meter_id() directly.
YANG Description: Meter id
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="meter-id", rest_name="meter-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """meter_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="meter-id", rest_name="meter-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""",
})
self.__meter_id = t
if hasattr(self, '_set'):
self._set()
def _unset_meter_id(self):
self.__meter_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="meter-id", rest_name="meter-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
def _get_vlan_upbits(self):
"""
Getter method for vlan_upbits, mapped from YANG variable /openflow_state/flow_id/vlan_upbits (uint32)
YANG Description: Vlan Priority
"""
return self.__vlan_upbits
def _set_vlan_upbits(self, v, load=False):
"""
Setter method for vlan_upbits, mapped from YANG variable /openflow_state/flow_id/vlan_upbits (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan_upbits is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan_upbits() directly.
YANG Description: Vlan Priority
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vlan-upbits", rest_name="vlan-upbits", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlan_upbits must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vlan-upbits", rest_name="vlan-upbits", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""",
})
self.__vlan_upbits = t
if hasattr(self, '_set'):
self._set()
def _unset_vlan_upbits(self):
self.__vlan_upbits = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vlan-upbits", rest_name="vlan-upbits", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
def _get_nw_tos(self):
"""
Getter method for nw_tos, mapped from YANG variable /openflow_state/flow_id/nw_tos (uint32)
YANG Description: IP DSCP
"""
return self.__nw_tos
def _set_nw_tos(self, v, load=False):
"""
Setter method for nw_tos, mapped from YANG variable /openflow_state/flow_id/nw_tos (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_nw_tos is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nw_tos() directly.
YANG Description: IP DSCP
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="nw-tos", rest_name="nw-tos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """nw_tos must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="nw-tos", rest_name="nw-tos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)""",
})
self.__nw_tos = t
if hasattr(self, '_set'):
self._set()
def _unset_nw_tos(self):
self.__nw_tos = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="nw-tos", rest_name="nw-tos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
def _get_source_ip_mask(self):
"""
Getter method for source_ip_mask, mapped from YANG variable /openflow_state/flow_id/source_ip_mask (string)
YANG Description: Source IPv4 Mask
"""
return self.__source_ip_mask
def _set_source_ip_mask(self, v, load=False):
"""
Setter method for source_ip_mask, mapped from YANG variable /openflow_state/flow_id/source_ip_mask (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_source_ip_mask is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_source_ip_mask() directly.
YANG Description: Source IPv4 Mask
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="source-ip-mask", rest_name="source-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """source_ip_mask must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ip-mask", rest_name="source-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__source_ip_mask = t
if hasattr(self, '_set'):
self._set()
def _unset_source_ip_mask(self):
self.__source_ip_mask = YANGDynClass(base=unicode, is_leaf=True, yang_name="source-ip-mask", rest_name="source-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_destination_ip_mask(self):
"""
Getter method for destination_ip_mask, mapped from YANG variable /openflow_state/flow_id/destination_ip_mask (string)
YANG Description: Destination IPv4 Mask
"""
return self.__destination_ip_mask
def _set_destination_ip_mask(self, v, load=False):
"""
Setter method for destination_ip_mask, mapped from YANG variable /openflow_state/flow_id/destination_ip_mask (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_destination_ip_mask is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_destination_ip_mask() directly.
YANG Description: Destination IPv4 Mask
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="destination-ip-mask", rest_name="destination-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """destination_ip_mask must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ip-mask", rest_name="destination-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__destination_ip_mask = t
if hasattr(self, '_set'):
self._set()
def _unset_destination_ip_mask(self):
self.__destination_ip_mask = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-ip-mask", rest_name="destination-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_total_packets(self):
"""
Getter method for total_packets, mapped from YANG variable /openflow_state/flow_id/total_packets (uint64)
YANG Description: Total Packets
"""
return self.__total_packets
def _set_total_packets(self, v, load=False):
"""
Setter method for total_packets, mapped from YANG variable /openflow_state/flow_id/total_packets (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_total_packets is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_total_packets() directly.
YANG Description: Total Packets
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-packets", rest_name="total-packets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """total_packets must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-packets", rest_name="total-packets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False)""",
})
self.__total_packets = t
if hasattr(self, '_set'):
self._set()
def _unset_total_packets(self):
self.__total_packets = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-packets", rest_name="total-packets", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False)
def _get_total_bytes(self):
"""
Getter method for total_bytes, mapped from YANG variable /openflow_state/flow_id/total_bytes (uint64)
YANG Description: Total Bytes
"""
return self.__total_bytes
def _set_total_bytes(self, v, load=False):
"""
Setter method for total_bytes, mapped from YANG variable /openflow_state/flow_id/total_bytes (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_total_bytes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_total_bytes() directly.
YANG Description: Total Bytes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-bytes", rest_name="total-bytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """total_bytes must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-bytes", rest_name="total-bytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False)""",
})
self.__total_bytes = t
if hasattr(self, '_set'):
self._set()
def _unset_total_bytes(self):
self.__total_bytes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="total-bytes", rest_name="total-bytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint64', is_config=False)
def _get_flow_action_list(self):
"""
Getter method for flow_action_list, mapped from YANG variable /openflow_state/flow_id/flow_action_list (list)
YANG Description: Details of an action
"""
return self.__flow_action_list
def _set_flow_action_list(self, v, load=False):
"""
Setter method for flow_action_list, mapped from YANG variable /openflow_state/flow_id/flow_action_list (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_flow_action_list is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flow_action_list() directly.
YANG Description: Details of an action
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("action_idx",flow_action_list.flow_action_list, yang_name="flow-action-list", rest_name="flow-action-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action-idx', extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}), is_container='list', yang_name="flow-action-list", rest_name="flow-action-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """flow_action_list must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("action_idx",flow_action_list.flow_action_list, yang_name="flow-action-list", rest_name="flow-action-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action-idx', extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}), is_container='list', yang_name="flow-action-list", rest_name="flow-action-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='list', is_config=False)""",
})
self.__flow_action_list = t
if hasattr(self, '_set'):
self._set()
def _unset_flow_action_list(self):
self.__flow_action_list = YANGDynClass(base=YANGListType("action_idx",flow_action_list.flow_action_list, yang_name="flow-action-list", rest_name="flow-action-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action-idx', extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}), is_container='list', yang_name="flow-action-list", rest_name="flow-action-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-flow-action', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='list', is_config=False)
flow_id = __builtin__.property(_get_flow_id)
priority = __builtin__.property(_get_priority)
status = __builtin__.property(_get_status)
in_port = __builtin__.property(_get_in_port)
in_vlan = __builtin__.property(_get_in_vlan)
source_mac = __builtin__.property(_get_source_mac)
destination_mac = __builtin__.property(_get_destination_mac)
ether_type = __builtin__.property(_get_ether_type)
ip_protocol = __builtin__.property(_get_ip_protocol)
ip_protocol_source_port = __builtin__.property(_get_ip_protocol_source_port)
ip_protocol_destination_port = __builtin__.property(_get_ip_protocol_destination_port)
source_ip = __builtin__.property(_get_source_ip)
destination_ip = __builtin__.property(_get_destination_ip)
source_ipv6 = __builtin__.property(_get_source_ipv6)
destination_ipv6 = __builtin__.property(_get_destination_ipv6)
instructions = __builtin__.property(_get_instructions)
action_data = __builtin__.property(_get_action_data)
meter_id = __builtin__.property(_get_meter_id)
vlan_upbits = __builtin__.property(_get_vlan_upbits)
nw_tos = __builtin__.property(_get_nw_tos)
source_ip_mask = __builtin__.property(_get_source_ip_mask)
destination_ip_mask = __builtin__.property(_get_destination_ip_mask)
total_packets = __builtin__.property(_get_total_packets)
total_bytes = __builtin__.property(_get_total_bytes)
flow_action_list = __builtin__.property(_get_flow_action_list)
_pyangbind_elements = {'flow_id': flow_id, 'priority': priority, 'status': status, 'in_port': in_port, 'in_vlan': in_vlan, 'source_mac': source_mac, 'destination_mac': destination_mac, 'ether_type': ether_type, 'ip_protocol': ip_protocol, 'ip_protocol_source_port': ip_protocol_source_port, 'ip_protocol_destination_port': ip_protocol_destination_port, 'source_ip': source_ip, 'destination_ip': destination_ip, 'source_ipv6': source_ipv6, 'destination_ipv6': destination_ipv6, 'instructions': instructions, 'action_data': action_data, 'meter_id': meter_id, 'vlan_upbits': vlan_upbits, 'nw_tos': nw_tos, 'source_ip_mask': source_ip_mask, 'destination_ip_mask': destination_ip_mask, 'total_packets': total_packets, 'total_bytes': total_bytes, 'flow_action_list': flow_action_list, }
| 70.298872 | 812 | 0.747012 | [
"Apache-2.0"
] | extremenetworks/pybind | pybind/slxos/v17r_1_01a/openflow_state/flow_id/__init__.py | 74,798 | Python |
# coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.16.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class WikiCommit(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'author': 'CommitUser',
'commiter': 'CommitUser',
'message': 'str',
'sha': 'str'
}
attribute_map = {
'author': 'author',
'commiter': 'commiter',
'message': 'message',
'sha': 'sha'
}
def __init__(self, author=None, commiter=None, message=None, sha=None): # noqa: E501
"""WikiCommit - a model defined in Swagger""" # noqa: E501
self._author = None
self._commiter = None
self._message = None
self._sha = None
self.discriminator = None
if author is not None:
self.author = author
if commiter is not None:
self.commiter = commiter
if message is not None:
self.message = message
if sha is not None:
self.sha = sha
@property
def author(self):
"""Gets the author of this WikiCommit. # noqa: E501
:return: The author of this WikiCommit. # noqa: E501
:rtype: CommitUser
"""
return self._author
@author.setter
def author(self, author):
"""Sets the author of this WikiCommit.
:param author: The author of this WikiCommit. # noqa: E501
:type: CommitUser
"""
self._author = author
@property
def commiter(self):
"""Gets the commiter of this WikiCommit. # noqa: E501
:return: The commiter of this WikiCommit. # noqa: E501
:rtype: CommitUser
"""
return self._commiter
@commiter.setter
def commiter(self, commiter):
"""Sets the commiter of this WikiCommit.
:param commiter: The commiter of this WikiCommit. # noqa: E501
:type: CommitUser
"""
self._commiter = commiter
@property
def message(self):
"""Gets the message of this WikiCommit. # noqa: E501
:return: The message of this WikiCommit. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this WikiCommit.
:param message: The message of this WikiCommit. # noqa: E501
:type: str
"""
self._message = message
@property
def sha(self):
"""Gets the sha of this WikiCommit. # noqa: E501
:return: The sha of this WikiCommit. # noqa: E501
:rtype: str
"""
return self._sha
@sha.setter
def sha(self, sha):
"""Sets the sha of this WikiCommit.
:param sha: The sha of this WikiCommit. # noqa: E501
:type: str
"""
self._sha = sha
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WikiCommit, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WikiCommit):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.666667 | 89 | 0.546485 | [
"MIT"
] | r7l/python-gitea-api | gitea_api/models/wiki_commit.py | 4,851 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['DataCollectionRule']
class DataCollectionRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_collection_rule_name: Optional[pulumi.Input[str]] = None,
data_flows: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]]] = None,
data_sources: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
destinations: Optional[pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Definition of ARM tracked top level resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] data_collection_rule_name: The name of the data collection rule. The name is case insensitive.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DataFlowArgs']]]] data_flows: The specification of data flows.
:param pulumi.Input[pulumi.InputType['DataCollectionRuleDataSourcesArgs']] data_sources: The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.
:param pulumi.Input[str] description: Description of the data collection rule.
:param pulumi.Input[pulumi.InputType['DataCollectionRuleDestinationsArgs']] destinations: The specification of destinations.
:param pulumi.Input[str] location: The geo-location where the resource lives.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['data_collection_rule_name'] = data_collection_rule_name
if data_flows is None and not opts.urn:
raise TypeError("Missing required property 'data_flows'")
__props__['data_flows'] = data_flows
__props__['data_sources'] = data_sources
__props__['description'] = description
if destinations is None and not opts.urn:
raise TypeError("Missing required property 'destinations'")
__props__['destinations'] = destinations
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['etag'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:insights/v20191101preview:DataCollectionRule"), pulumi.Alias(type_="azure-native:insights:DataCollectionRule"), pulumi.Alias(type_="azure-nextgen:insights:DataCollectionRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DataCollectionRule, __self__).__init__(
'azure-native:insights/v20191101preview:DataCollectionRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DataCollectionRule':
"""
Get an existing DataCollectionRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["data_flows"] = None
__props__["data_sources"] = None
__props__["description"] = None
__props__["destinations"] = None
__props__["etag"] = None
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["tags"] = None
__props__["type"] = None
return DataCollectionRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dataFlows")
def data_flows(self) -> pulumi.Output[Sequence['outputs.DataFlowResponse']]:
"""
The specification of data flows.
"""
return pulumi.get(self, "data_flows")
@property
@pulumi.getter(name="dataSources")
def data_sources(self) -> pulumi.Output[Optional['outputs.DataCollectionRuleResponseDataSources']]:
"""
The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.
"""
return pulumi.get(self, "data_sources")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the data collection rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def destinations(self) -> pulumi.Output['outputs.DataCollectionRuleResponseDestinations']:
"""
The specification of destinations.
"""
return pulumi.get(self, "destinations")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Resource entity tag (ETag).
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The resource provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 42.292683 | 264 | 0.647059 | [
"Apache-2.0"
] | pulumi-bot/pulumi-azure-native | sdk/python/pulumi_azure_native/insights/v20191101preview/data_collection_rule.py | 8,670 | Python |
#
# The Multiverse Platform is made available under the MIT License.
#
# Copyright (c) 2012 The Multiverse Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
#
#!/usr/bin/python
#
# This script sets up a render server machine
#
import sys
import os
import subprocess
import shutil
import glob
# NOTE - if you change machinimaPath, you need to update BuildDirs()
machinimaPath = 'c:\\Multiverse\\Machinima\\'
treePath = 'c:\\Multiverse\\tree\\'
toolsToCopy = [ 'processJob.py', 'sendJob.py', 'mencoder.exe', 'machinima.sshkey.ppk', 'pscp.exe', 'MP4Box.exe' ]
toolSourceDir = os.path.join(treePath, 'Tools\\Machinima')
srcMediaDir = os.path.join(treePath, 'Media')
# map our internal world name to the public world name
# used when copying assets from our source tree to the production media area
worldNameMap = {
'nyts' : 'times_square',
'friendworld' : 'friendworld2'
}
# requires a full path name including drive spec
# returns a list of directory components
def SplitDirs(path):
# split off the drive spec
drive, path = os.path.splitdrive(path)
# remove ending slash if it is present
if path.endswith('\\'):
path, empty = os.path.split(path)
pathList = []
while path is not None:
path, element = os.path.split(path)
if len(element) == 0:
pathList.append(path)
path = None
else:
pathList.append(element)
pathList.append(drive)
pathList.reverse()
return pathList
def BuildPath(pathList, numDirs):
path = os.path.join(pathList[0], pathList[1])
for i in range(2,numDirs):
path = os.path.join(path, pathList[i])
return path
# make sure all the directories in the path exist
def MakePath(fullpath):
pathList = SplitDirs(fullpath)
for i in range(3, len(pathList) + 1):
path = BuildPath(pathList, i)
if not os.path.exists(path):
print 'Making %s' % path
os.mkdir(path)
def MakeDirIfNeeded(path):
if not os.path.exists(path):
os.mkdir(path, 0777)
def BuildDirs():
MakePath(machinimaPath)
MakeDirIfNeeded(os.path.join(machinimaPath, 'Tools'))
MakeDirIfNeeded(os.path.join(machinimaPath, 'Media'))
MakeDirIfNeeded(os.path.join(machinimaPath, 'Jobs'))
def CopyTools():
destPath = os.path.join(machinimaPath, 'Tools')
for tool in toolsToCopy:
srcPath = os.path.join(toolSourceDir, tool)
shutil.copy(srcPath, destPath)
def CopyTree(src, dst):
"""Because shutil.copytree() fails if a directory already exists"""
srcLen = len(src)
for path, dirs, files in os.walk(src):
if '.svn' in path:
continue
dstPath = path[srcLen:]
for dir in dirs:
if '.svn' in dir:
continue
dstDir = os.path.join(dst, dstPath, dir)
MakePath(dstDir)
for file in files:
srcPath = os.path.join(path, file)
subPath = srcPath[srcLen:]
dstPath = os.path.join(dst, subPath)
shutil.copy2(srcPath, dstPath)
def CopyMedia():
sceneDirs = glob.glob(os.path.join(srcMediaDir, '*\\Machinima\\*\\'))
for sceneDir in sceneDirs:
dirList = SplitDirs(sceneDir)
numElements = len(dirList)
scene = dirList[numElements-1]
world = worldNameMap[dirList[numElements-3]]
destMediaDir = os.path.join(machinimaPath, 'Media\\')
destDir = os.path.join(os.path.join(destMediaDir, world), scene)
CopyTree(sceneDir, destDir)
saMediaSrc = os.path.join(srcMediaDir, 'standalone\\')
saMediaDest = os.path.join(destMediaDir, 'standalone\\')
CopyTree(saMediaSrc, saMediaDest)
BuildDirs()
CopyTools()
CopyMedia()
| 32.724138 | 113 | 0.676923 | [
"MIT"
] | AustralianDisabilityLimited/MultiversePlatform | tools/Machinima/setupRenderHost.py | 4,745 | Python |
# INSTRUCTIONS
# Translate the text and write it between the "
# EXAMPLE: original -> "This text is in english: value {0}"
# translation -> "Aquest text està en anglès: valor {0}"
# If you see sth like {0}, {1}, maintain it on the translated sentence
# Meke special attention to elements like ":", etc.
lang_2_9_2 = {
"Reload log": "",
"Do not show the clock on secondary monitors": "",
"Disable clock taskbar background color (make clock transparent)": "",
"Open the welcome wizard": "",
" (ALPHA STAGE, MAY NOT WORK)": "",
"Welcome to ElevenClock": "",
"Skip": "",
"Start": "",
"Next": "",
"Finish": "",
}
lang_2_9 = lang_2_9_2 | {
"Task Manager": "",
"Change date and time": "",
"Notification settings": "",
"Updates, icon tray, language": "",
"Hide extended options from the clock right-click menu (needs a restart to be aplied)": "",
"Fullscreen behaviour, clock position, 1st monitor clock, other miscellanious settings": "",
'Add the "Show Desktop" button on the left corner of every clock': '',
'You might need to set a custom background color for this to work. More info <a href="{0}" style="color:DodgerBlue">HERE</a>': '',
"Clock's font, font size, font color and background, text alignment": "",
"Date format, Time format, seconds,weekday, weeknumber, regional settings": "",
"Testing features and error-fixing tools": "",
"Language pack author(s), help translating ElevenClock": "",
"Info, report a bug, submit a feature request, donate, about": "",
"Log, debugging information": "",
}
lang_2_8 = lang_2_9 | {
"Force the clock to be at the top of the screen": "",
"Show the clock on the primary screen": "",
"Use a custom font color": "",
"Use a custom background color": "",
"Align the clock text to the center": "",
"Select custom color": "",
"Hide the clock when a program occupies all screens": "",
}
lang2_7_bis = lang_2_8 | {
"Use a custom font": "",
"Use a custom font size": "",
"Enable hide when multi-monitor fullscreen apps are running": "",
"<b>{0}</b> needs to be enabled to change this setting": "",
"<b>{0}</b> needs to be disabled to change this setting": "",
}
lang2_7 = lang2_7_bis | {
" (This feature has been disabled because it should work by default. If it is not, please report a bug)": "",
"ElevenClock's language": ""
}
lang2_6 = lang2_7 | {
"About Qt6 (PySide6)": "",
"About": "",
"Alternative non-SSL update server (This might help with SSL errors)": "",
"Fixes and other experimental features: (Use ONLY if something is not working)": "",
"Show week number on the clock": "",
}
lang2_5 = lang2_6 | {
"Hide the clock when RDP Client or Citrix Workspace are running": "",
"Clock Appearance:": "",
"Force the clock to have black text": "",
" - It is required that the Dark Text checkbox is disabled": "",
"Debbugging information:": "",
"Open ElevenClock's log": "",
}
lang2_4 = lang2_5 | {
# Added text in version 2.4
"Show the clock on the primary screen (Useful if clock is set on the left)": "",
"Show weekday on the clock" :"",
}
lang2_3 = lang2_4 | {
#Context menu
"ElevenClock Settings" :"Instellingen ElevenClock", # Also settings title
"Reload Clocks" :"Herlaad Klokken",
"ElevenClock v{0}" :"ElevenClock v{0}",
"Restart ElevenClock" :"ElevenClock opnieuw opstarten",
"Hide ElevenClock" :"Verberg ElevenClock",
"Quit ElevenClock" :"Afsluiten ElevenClock",
#General settings section
"General Settings:" :"Algemene Instellingen:",
"Automatically check for updates" :"Controlleer automatisch voor updates",
"Automatically install available updates" :"Installeer automatisch beschikbare updates",
"Enable really silent updates" :"Schakel hele stille updates in",
"Bypass update provider authenticity check (NOT RECOMMENDED, AT YOUR OWN RISK)" :"Omzeil update provider autenticatie check (NIET AANBEVOLEN, GEBRUIK OP EIGEN RISICO)",
"Show ElevenClock on system tray" :"Laat ElevenClock in systeemvak zien",
"Alternative clock alignment (may not work)" :"Alternatieve klok uitlijning (werkt mogelijk niet)",
"Change startup behaviour" :"Verander automatisch starten gedrag",
"Change" :"Verander",
"<b>Update to the latest version!</b>" :"<b>Update naar de nieuwste versie!</b>",
"Install update" :"Installeer update",
#Clock settings
"Clock Settings:" :"Klok instellingen:",
"Hide the clock in fullscreen mode" :"Verberg de klok in volledigscherm applicaties",
"Hide the clock when RDP client is active" :"Verberg de klok wanneer RDP client actief is",
"Force the clock to be at the bottom of the screen" :"Forceer de klok om onderaan het scherm te staan",
"Show the clock when the taskbar is set to hide automatically" :"Klok weergeven als de taakbalk is ingesteld om automatisch te verbergen",
"Fix the hyphen/dash showing over the month" :"Corrigeer het koppelteken/streepje dat gedurende de maand wordt weergegeven",
"Force the clock to have white text" :"Forceer de klok om witte tekst te hebben",
"Show the clock at the left of the screen" :"Toon de klok aan de linkerkant van het scherm ",
#Date & time settings
"Date & Time Settings:" :"Datum & Tijd instellingen:",
"Show seconds on the clock" :"Toon seconden op de klok",
"Show date on the clock" :"Toon de datum op de klok",
"Show time on the clock" :"Toon de tijd op de klok",
"Change date and time format (Regional settings)" :"Datum en Tijd aanpassen (regionale instellingen)",
"Regional settings" :"Regionale instellingen",
#About the language pack
"About the language pack:" :"Over het taalpakket:",
"Translated to English by martinet101" :"Vertaald naar het Nederlands door Bugs", # Here, make sute to give you some credits: Translated to LANGUAGE by USER/NAME/PSEUDONYM/etc.
"Translate ElevenClock to your language" :"Vertaal ElevenClock naar jou taal",
"Get started" :"Begin",
#About ElevenClock
"About ElevenClock version {0}:" :"Over ElevenClock versie {0}:",
"View ElevenClock's homepage" :"Website van ElevenClock",
"Open" :"Open",
"Report an issue/request a feature" :"Rapporteer een probleem/vraag een feature aan",
"Report" :"Rapporteer",
"Support the dev: Give me a coffee☕" :"Steun de ontwikkelaar: Geef mij een kopje koffie☕",
"Open page" :"Open pagina",
"Icons by Icons8" :"Iconen door Icons8", # Here, the word "Icons8" should not be translated
"Webpage" :"Webpagina",
"Close settings" :"Instellingen sluiten",
"Close" :"Sluiten",
}
lang = lang2_3
| 54.630137 | 187 | 0.566825 | [
"Apache-2.0"
] | IPriam/ElevenClock | elevenclock/lang/lang_nl.py | 7,982 | Python |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from functools import partial
import pytest
import torch
from sklearn.metrics import mean_tweedie_deviance
from torch import Tensor
from tests.helpers import seed_all
from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester
from torchmetrics.functional.regression.tweedie_deviance import tweedie_deviance_score
from torchmetrics.regression.tweedie_deviance import TweedieDevianceScore
seed_all(42)
Input = namedtuple("Input", ["preds", "targets"])
_single_target_inputs1 = Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
targets=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_single_target_inputs2 = Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
targets=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_multi_target_inputs = Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, 5),
targets=torch.rand(NUM_BATCHES, BATCH_SIZE, 5),
)
def _sk_deviance(preds: Tensor, targets: Tensor, power: float):
sk_preds = preds.view(-1).numpy()
sk_target = targets.view(-1).numpy()
return mean_tweedie_deviance(sk_target, sk_preds, power=power)
@pytest.mark.parametrize("power", [-0.5, 0, 1, 1.5, 2, 3])
@pytest.mark.parametrize(
"preds, targets",
[
(_single_target_inputs1.preds, _single_target_inputs1.targets),
(_single_target_inputs2.preds, _single_target_inputs2.targets),
(_multi_target_inputs.preds, _multi_target_inputs.targets),
],
)
class TestDevianceScore(MetricTester):
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("dist_sync_on_step", [True, False])
def test_deviance_scores_class(self, ddp, dist_sync_on_step, preds, targets, power):
self.run_class_metric_test(
ddp,
preds,
targets,
TweedieDevianceScore,
partial(_sk_deviance, power=power),
dist_sync_on_step,
metric_args=dict(power=power),
)
def test_deviance_scores_functional(self, preds, targets, power):
self.run_functional_metric_test(
preds,
targets,
tweedie_deviance_score,
partial(_sk_deviance, power=power),
metric_args=dict(power=power),
)
def test_pearson_corrcoef_differentiability(self, preds, targets, power):
self.run_differentiability_test(
preds, targets, metric_module=TweedieDevianceScore, metric_functional=tweedie_deviance_score
)
# Tweedie Deviance Score half + cpu does not work due to missing support in torch.log
@pytest.mark.xfail(reason="TweedieDevianceScore metric does not support cpu + half precision")
def test_pearson_corrcoef_half_cpu(self, preds, targets, power):
metric_args = {"power": power}
self.run_precision_test_cpu(
preds,
targets,
metric_module=TweedieDevianceScore,
metric_functional=tweedie_deviance_score,
metric_args=metric_args,
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_pearson_corrcoef_half_gpu(self, preds, targets, power):
metric_args = {"power": power}
self.run_precision_test_gpu(
preds,
targets,
metric_module=TweedieDevianceScore,
metric_functional=tweedie_deviance_score,
metric_args=metric_args,
)
def test_error_on_different_shape(metric_class=TweedieDevianceScore):
metric = metric_class()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
def test_error_on_invalid_inputs(metric_class=TweedieDevianceScore):
with pytest.raises(ValueError, match="Deviance Score is not defined for power=0.5."):
metric_class(power=0.5)
metric = metric_class(power=1)
with pytest.raises(
ValueError, match="For power=1, 'preds' has to be strictly positive and 'targets' cannot be negative."
):
metric(torch.tensor([-1.0, 2.0, 3.0]), torch.rand(3))
with pytest.raises(
ValueError, match="For power=1, 'preds' has to be strictly positive and 'targets' cannot be negative."
):
metric(torch.rand(3), torch.tensor([-1.0, 2.0, 3.0]))
metric = metric_class(power=2)
with pytest.raises(ValueError, match="For power=2, both 'preds' and 'targets' have to be strictly positive."):
metric(torch.tensor([-1.0, 2.0, 3.0]), torch.rand(3))
with pytest.raises(ValueError, match="For power=2, both 'preds' and 'targets' have to be strictly positive."):
metric(torch.rand(3), torch.tensor([-1.0, 2.0, 3.0]))
def test_corner_case_for_power_at_1(metric_class=TweedieDevianceScore):
"""Test that corner case for power=1.0 produce valid result."""
metric = TweedieDevianceScore()
targets = torch.tensor([0, 1, 0, 1])
preds = torch.tensor([0.1, 0.1, 0.1, 0.1])
val = metric(preds, targets)
assert val != 0.0
assert not torch.isnan(val)
| 37.364238 | 114 | 0.701347 | [
"Apache-2.0"
] | Abdelrhman-Hosny/metrics | tests/regression/test_tweedie_deviance.py | 5,642 | Python |
{
'name': 'Chapter 06, Recipe 4 code',
'summary': 'Update values of recordset records',
'depends': ['my_module'], # from Chapter 4
}
| 24.333333 | 52 | 0.609589 | [
"MIT"
] | PacktPublishing/Odoo-11-Development-Coobook-Second-Edition | Chapter06/ch06_r05/some_model_ch06r05/__manifest__.py | 146 | Python |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Publich API of batch inference."""
from typing import Tuple, TypeVar, Union
import apache_beam as beam
import tensorflow as tf
from tfx_bsl.beam import run_inference
from tfx_bsl.public.proto import model_spec_pb2
from tensorflow_serving.apis import prediction_log_pb2
_K = TypeVar('_K')
_INPUT_TYPE = Union[tf.train.Example, tf.train.SequenceExample, bytes]
_OUTPUT_TYPE = prediction_log_pb2.PredictionLog
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[_INPUT_TYPE, Tuple[_K, _INPUT_TYPE]])
@beam.typehints.with_output_types(Union[_OUTPUT_TYPE, Tuple[_K, _OUTPUT_TYPE]])
def RunInference( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
inference_spec_type: model_spec_pb2.InferenceSpecType
) -> beam.pvalue.PCollection:
"""Run inference with a model.
There are two types of inference you can perform using this PTransform:
1. In-process inference from a SavedModel instance. Used when
`saved_model_spec` field is set in `inference_spec_type`.
2. Remote inference by using a service endpoint. Used when
`ai_platform_prediction_model_spec` field is set in
`inference_spec_type`.
TODO(b/131873699): Add support for the following features:
1. tf.train.SequenceExample as Input for RemotePredict.
2. beam.Shared() initialization via Fingerprint for models CSE.
3. Models as SideInput.
4. TPU models.
Args:
examples: A PCollection containing examples of the following possible kinds,
each with their corresponding return type.
- PCollection[Example] -> PCollection[PredictionLog]
* Works with Classify, Regress, MultiInference, Predict and
RemotePredict.
- PCollection[SequenceExample] -> PCollection[PredictionLog]
* Works with Predict and (serialized) RemotePredict.
- PCollection[bytes] -> PCollection[PredictionLog]
* For serialized Example: Works with Classify, Regress,
MultiInference, Predict and RemotePredict.
* For everything else: Works with Predict and RemotePredict.
- PCollection[Tuple[K, Example]] -> PCollection[
Tuple[K, PredictionLog]]
* Works with Classify, Regress, MultiInference, Predict and
RemotePredict.
- PCollection[Tuple[K, SequenceExample]] -> PCollection[
Tuple[K, PredictionLog]]
* Works with Predict and (serialized) RemotePredict.
- PCollection[Tuple[K, bytes]] -> PCollection[
Tuple[K, PredictionLog]]
* For serialized Example: Works with Classify, Regress,
MultiInference, Predict and RemotePredict.
* For everything else: Works with Predict and RemotePredict.
inference_spec_type: Model inference endpoint.
Returns:
A PCollection (possibly keyed) containing prediction logs.
"""
return (
examples |
'RunInferenceImpl' >> run_inference.RunInferenceImpl(inference_spec_type))
| 41.304348 | 80 | 0.683947 | [
"Apache-2.0"
] | RossKohler/tfx-bsl | tfx_bsl/public/beam/run_inference.py | 3,800 | Python |
import warnings
from time import sleep
from pyspedas import time_double
from pytplot import get_data, store_data, options
import numpy as np
try:
from hapiclient import hapi as load_hapi
except:
print('hapiclient not found; install with: "pip install hapiclient"')
def hapi(trange=None, server=None, dataset=None, parameters='', suffix='',
catalog=False):
"""
Loads data from a HAPI server into pytplot variables
Parameters
-----------
trange: list of str or list of float
Time range to load the data for
server: str
HAPI server to load the data from
dataset: str
HAPI dataset to load
parameters: str or list of str
Parameters in the dataset to load; default
is to load them all
suffix: str
Suffix to append to the tplot variables
catalog: bool
If True, returns the server's catalog of datasets
Returns
-------
List of tplot variables created.
"""
if server is None:
print('Error, no server specified; example servers include:')
print('- https://cdaweb.gsfc.nasa.gov/hapi')
print('- https://pds-ppi.igpp.ucla.edu/hapi')
print('- http://planet.physics.uiowa.edu/das/das2Server/hapi')
print('- https://iswa.gsfc.nasa.gov/IswaSystemWebApp/hapi')
print('- http://lasp.colorado.edu/lisird/hapi')
return
if catalog:
catalog = load_hapi(server)
items = []
if 'catalog' in catalog.keys():
items = catalog['catalog']
print('Available datasets: ')
for item in items:
if 'title' in item.keys():
print(item['id'] + ': ' + item['title'])
else:
print(item['id'])
return
if dataset is None:
print('Error, no dataset specified; please see the catalog for a list of available data sets.')
return
if trange is None:
print('Error, no trange specified')
return
if isinstance(parameters, list):
parameters = ','.join(parameters)
opts = {'logging': False}
data, hapi_metadata = load_hapi(server, dataset, parameters, trange[0], trange[1], **opts)
out_vars = []
# loop through the parameters in this dataset
params = hapi_metadata['parameters']
for param in params[1:]:
spec = False
param_name = param.get('name')
print('Loading ' + param_name)
# load the data only for this parameter
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ResourceWarning)
data, hapi_metadata = load_hapi(server, dataset, param_name, trange[0], trange[1], **opts)
except:
breakpoint()
print('Error! 95')
continue
timestamps = [datapoint[0] for datapoint in data]
unixtimes = [time_double(timestamp.decode('utf-8')) for timestamp in timestamps]
param_type = hapi_metadata['parameters'][1].get('type')
if param_type is None:
param_type = 'double'
data_size = hapi_metadata['parameters'][1].get('size')
if data_size is None:
single_line = True
try:
if param_type == 'double':
single_line = isinstance(data[0][1], np.float64)
elif param_type == 'integer':
single_line = isinstance(data[0][1], np.int32)
except IndexError:
breakpoint()
print('Error! 103')
continue
if single_line:
data_out = np.zeros((len(data)))
else:
try:
data_out = np.zeros((len(data), len(data[0][1])))
except TypeError:
print('Error! 112')
breakpoint()
continue
for idx, datapoint in enumerate(data):
if single_line:
data_out[idx] = datapoint[1]
else:
data_out[idx, :] = datapoint[1]
data_out = data_out.squeeze()
# check for fill values
fill_value = hapi_metadata['parameters'][1].get('fill')
if fill_value is not None:
if param_type == 'double':
fill_value = float(fill_value)
data_out[data_out == fill_value] = np.nan
elif param_type == 'integer':
# NaN is only floating point, so we replace integer fill
# values with 0 instead of NaN
fill_value = int(fill_value)
data_out[data_out == fill_value] = 0
bins = param.get('bins')
if bins is not None:
centers = bins[0].get('centers')
if centers is not None:
spec = True
data_table = {'x': unixtimes, 'y': data_out}
if spec:
data_table['v'] = centers
saved = store_data(param_name + suffix, data=data_table)
metadata = get_data(param_name + suffix, metadata=True)
metadata['HAPI'] = hapi_metadata
if spec:
options(param_name + suffix, 'spec', True)
if saved:
out_vars.append(param_name + suffix)
# wait for a second before going to the next variable
# to avoid hitting the server too quickly
sleep(1)
return out_vars | 30.26257 | 106 | 0.564335 | [
"MIT"
] | pulupa/pyspedas | pyspedas/hapi/hapi.py | 5,417 | Python |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 25 21:37:26 2021
@author: brian
"""
import os
os.chdir('C:/Users/brian/Desktop/All/UWEC/DS785_Capstone/Project')
import brawl_data as bd
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from statsmodels.stats.proportion import proportion_confint
all_win_rates = bd.sql_get_results('dbname=BrawlStars user=postgres password=PG!3%7(', '', '', '', 0, 0, my_id = '',
custom_query = "SELECT mode, map, brawler, wins, matches_played FROM population_aggs_high;")
all_win_rates['win_rate'] = all_win_rates['wins']/all_win_rates['matches_played']
all_win_rates = all_win_rates.loc[all_win_rates['matches_played']>10,:]
win_rate_extremes = all_win_rates.groupby(['mode', 'map']).win_rate.agg(['min', 'max'])
win_rate_extremes = win_rate_extremes.reset_index()
win_rate_extremes['win_rate_differential'] = win_rate_extremes['max'] - win_rate_extremes['min']
win_rate_extremes = win_rate_extremes.sort_values(by = 'win_rate_differential')
win_rate_extremes.columns = ['Mode', 'Map', 'Minimum Brawler Win Rate', 'Maximum Brawler Win Rate', 'Win Rate Differential']
sns.set_style("darkgrid")
sns.scatterplot(data=win_rate_extremes,
x='Minimum Brawler Win Rate',
y='Maximum Brawler Win Rate',
hue='Win Rate Differential',
palette=sns.cubehelix_palette(start=2, rot=0, dark=.2, light=.8, as_cmap=True))
plt.title('Win Rates Differences for Brawlers Across Each Map-Mode')
sns.violinplot(x=win_rate_extremes['Win Rate Differential'])
plt.title('Differences Between Maximum and Minimum Win Rates for Brawlers Across Each Map-Mode')
for_example = all_win_rates.loc[all_win_rates['map'] == 'Split', :].sort_values('win_rate', ascending = False)
for_example = for_example.loc[:,['map', 'mode', 'brawler', 'win_rate']]
for_example = pd.concat([for_example.head(5),for_example.tail(5)])
for_example_2 = pd.concat([win_rate_extremes.head(5),win_rate_extremes.tail(5)])
for_example_2 = for_example_2.sort_values('Win Rate Differential', ascending=False)
example = bd.get_recommendation('dbname=BrawlStars user=postgres password=PG!3%7(', 'records', '#2G080980', 'brawlBall', 'Sneaky Fields', 0, 4)
example = pd.concat([example.head(5),example.tail(5)])
my_recs = bd.get_all_recommendations('dbname=BrawlStars user=postgres password=PG!3%7(', 'records', '#8VUPQ2PP', my_trophy_min = 500)
map_weaknesses = bd.get_map_weaknesses('dbname=BrawlStars user=postgres password=PG!3%7(', 'records')
map_weaknesses.head(10)
all_individual_history = bd.sql_get_results('dbname=BrawlStars user=postgres password=PG!3%7(', '', '', '', 0, 0, my_id = '',
custom_query = "SELECT * FROM individual_aggs_high UNION ALL SELECT * FROM individual_aggs_mid UNION ALL SELECT * FROM individual_aggs_low;")
all_population_history = bd.sql_get_results('dbname=BrawlStars user=postgres password=PG!3%7(', '', '', '', 0, 0, my_id = '',
custom_query = "SELECT * FROM population_aggs_high UNION ALL SELECT * FROM population_aggs_mid UNION ALL SELECT * FROM population_aggs_low;")
#Calculate win rate confidence intervals
all_individual_history['win_rate'] = all_individual_history['wins'] / all_individual_history['matches_played']
all_individual_history['ci.lower'],all_individual_history['ci.upper'] = zip(*all_individual_history.apply(lambda row : proportion_confint(count = row['wins'], nobs = row['matches_played'], alpha = .1, method = 'agresti_coull'), axis = 1))
all_population_history['win_rate'] = all_population_history['wins'] / all_population_history['matches_played']
all_individual_history = all_population_history.merge(all_individual_history, how = 'left', left_on = ['mode', 'map', 'brawler'], right_on = ['mode', 'map', 'brawler'])
#Compare population to individual history and inform recommendations
better = (all_individual_history['win_rate_x'] < all_individual_history['ci.lower']) & (all_individual_history['matches_played_y'] >= 5)
worse = (all_individual_history['win_rate_x'] > all_individual_history['ci.upper']) & (all_individual_history['matches_played_y'] >= 5)
sum(better) + sum(worse) | 61.426471 | 238 | 0.741681 | [
"MIT"
] | brianjstroh/BrawlStars | Capstone_Tables&Figures_Results_Graphs.py | 4,177 | Python |
# -*- coding: utf-8 -*-
"""
hyper/tls
~~~~~~~~~
Contains the TLS/SSL logic for use in hyper.
"""
import os.path as path
import six
from .common.exceptions import MissingCertFile
from .compat import ignore_missing, ssl
NPN_PROTOCOL = 'h2'
H2_NPN_PROTOCOLS = [NPN_PROTOCOL, 'h2-16', 'h2-15', 'h2-14']
SUPPORTED_NPN_PROTOCOLS = H2_NPN_PROTOCOLS + ['http/1.1']
H2C_PROTOCOL = 'h2c'
# We have a singleton SSLContext object. There's no reason to be creating one
# per connection.
_context = None
# Work out where our certificates are.
cert_loc = path.join(path.dirname(__file__), 'certs.pem')
def wrap_socket(sock, server_hostname, ssl_context=None, force_proto=None):
"""
A vastly simplified SSL wrapping function. We'll probably extend this to
do more things later.
"""
global _context
if ssl_context:
# if an SSLContext is provided then use it instead of default context
_ssl_context = ssl_context
else:
# create the singleton SSLContext we use
if _context is None: # pragma: no cover
_context = init_context()
_ssl_context = _context
# the spec requires SNI support
ssl_sock = _ssl_context.wrap_socket(sock, server_hostname=server_hostname)
# Setting SSLContext.check_hostname to True only verifies that the
# post-handshake servername matches that of the certificate. We also need
# to check that it matches the requested one.
if _ssl_context.check_hostname: # pragma: no cover
try:
ssl.match_hostname(ssl_sock.getpeercert(), server_hostname)
except AttributeError:
ssl.verify_hostname(ssl_sock, server_hostname) # pyopenssl
# Allow for the protocol to be forced externally.
proto = force_proto
# ALPN is newer, so we prefer it over NPN. The odds of us getting
# different answers is pretty low, but let's be sure.
with ignore_missing():
if proto is None:
proto = ssl_sock.selected_alpn_protocol()
with ignore_missing():
if proto is None:
proto = ssl_sock.selected_npn_protocol()
return (ssl_sock, proto)
def init_context(cert_path=None, cert=None, cert_password=None):
"""
Create a new ``SSLContext`` that is correctly set up for an HTTP/2
connection. This SSL context object can be customized and passed as a
parameter to the :class:`HTTPConnection <hyper.HTTPConnection>` class.
Provide your own certificate file in case you don’t want to use hyper’s
default certificate. The path to the certificate can be absolute or
relative to your working directory.
:param cert_path: (optional) The path to the certificate file of
“certification authority” (CA) certificates
:param cert: (optional) if string, path to ssl client cert file (.pem).
If tuple, ('cert', 'key') pair.
The certfile string must be the path to a single file in PEM format
containing the certificate as well as any number of CA certificates
needed to establish the certificate’s authenticity. The keyfile string,
if present, must point to a file containing the private key in.
Otherwise the private key will be taken from certfile as well.
:param cert_password: (optional) The password argument may be a function to
call to get the password for decrypting the private key. It will only
be called if the private key is encrypted and a password is necessary.
It will be called with no arguments, and it should return a string,
bytes, or bytearray. If the return value is a string it will be
encoded as UTF-8 before using it to decrypt the key. Alternatively a
string, bytes, or bytearray value may be supplied directly as the
password argument. It will be ignored if the private key is not
encrypted and no password is needed.
:returns: An ``SSLContext`` correctly set up for HTTP/2.
"""
cafile = cert_path or cert_loc
if not cafile or not path.exists(cafile):
err_msg = ("No certificate found at " + str(cafile) + ". Either " +
"ensure the default cert.pem file is included in the " +
"distribution or provide a custom certificate when " +
"creating the connection.")
raise MissingCertFile(err_msg)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.set_default_verify_paths()
context.load_verify_locations(cafile=cafile)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
with ignore_missing():
context.set_npn_protocols(SUPPORTED_NPN_PROTOCOLS)
with ignore_missing():
context.set_alpn_protocols(SUPPORTED_NPN_PROTOCOLS)
# required by the spec
context.options |= ssl.OP_NO_COMPRESSION
if cert is not None:
if not isinstance(cert, six.string_types):
context.load_cert_chain(cert[0], cert[1], cert_password)
else:
context.load_cert_chain(cert, password=cert_password)
return context
| 38.59542 | 79 | 0.69462 | [
"MIT"
] | qtacore/hyper | hyper/tls.py | 5,066 | Python |
'''This module implements concrete agent controllers for the rollout worker'''
import numpy as np
import os
import random
import rospkg
import rospy
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import SetModelState, SpawnModel
from markov.agent_ctrl.constants import ConfigParams, BOT_CAR_Z, OBSTACLE_Z
from markov.track_geom.constants import SET_MODEL_STATE, SPAWN_SDF_MODEL, SPAWN_URDF_MODEL, ObstacleDimensions
from markov.track_geom.track_data import TrackData
from markov.agent_ctrl.agent_ctrl_interface import AgentCtrlInterface
from markov.rospy_wrappers import ServiceProxyWrapper
from markov import utils
from markov.reset.constants import AgentInfo
from markov.domain_randomizations.randomizer_manager import RandomizerManager
from markov.domain_randomizations.visual.model_visual_randomizer import ModelVisualRandomizer
from markov.domain_randomizations.constants import ModelRandomizerType
class ObstaclesCtrl(AgentCtrlInterface):
def __init__(self):
# Read ros parameters
# OBJECT_POSITIONS will overwrite NUMBER_OF_OBSTACLES and RANDOMIZE_OBSTACLE_LOCATIONS
self.object_locations = rospy.get_param("OBJECT_POSITIONS", [])
self.num_obstacles = int(rospy.get_param("NUMBER_OF_OBSTACLES", 0)) \
if not self.object_locations else len(self.object_locations)
self.min_obstacle_dist = float(rospy.get_param("MIN_DISTANCE_BETWEEN_OBSTACLES", 2.0))
self.randomize = utils.str2bool(rospy.get_param("RANDOMIZE_OBSTACLE_LOCATIONS", False))
self.use_bot_car = utils.str2bool(rospy.get_param("IS_OBSTACLE_BOT_CAR", False))
self.obstacle_names = ["obstacle_{}".format(i) for i in range(self.num_obstacles)]
self.obstacle_dimensions = ObstacleDimensions.BOT_CAR_DIMENSION if self.use_bot_car \
else ObstacleDimensions.BOX_OBSTACLE_DIMENSION
# track data
self.track_data = TrackData.get_instance()
# Wait for ros services
rospy.wait_for_service(SET_MODEL_STATE)
rospy.wait_for_service(SPAWN_SDF_MODEL)
rospy.wait_for_service(SPAWN_URDF_MODEL)
self.set_model_state = ServiceProxyWrapper(SET_MODEL_STATE, SetModelState)
self.spawn_sdf_model = ServiceProxyWrapper(SPAWN_SDF_MODEL, SpawnModel)
self.spawn_urdf_model = ServiceProxyWrapper(SPAWN_URDF_MODEL, SpawnModel)
# Load the obstacle sdf/urdf
obstacle_model_folder = "bot_car" if self.use_bot_car else "box_obstacle"
rospack = rospkg.RosPack()
deepracer_path = rospack.get_path("deepracer_simulation_environment")
obstacle_sdf_path = os.path.join(deepracer_path, "models", obstacle_model_folder, "model.sdf")
with open(obstacle_sdf_path, "r") as fp:
self.obstacle_sdf = fp.read()
# Set obstacle poses and spawn the obstacles
self.obstacle_poses = self._compute_obstacle_poses()
self._spawn_obstacles()
self._configure_randomizer()
def _configure_randomizer(self):
'''configure domain randomizer
'''
for obstacle_names in self.obstacle_names:
RandomizerManager.get_instance().add(ModelVisualRandomizer(model_name=obstacle_names,
model_randomizer_type=ModelRandomizerType.MODEL))
def _compute_obstacle_poses(self):
obstacle_dists = []
obstacle_lanes = []
lane_choices = (self.track_data.inner_lane, self.track_data.outer_lane)
# use fix obstacle locations
if self.object_locations:
for object_location in self.object_locations:
# index 0 is obstacle_ndist and index 1 is obstacle_lane
object_location = object_location.split(",")
obstacle_dists.append(float(object_location[0]) * \
self.track_data.center_line.length)
# Inner lane is 1, outer lane is -1. If True, use outer lane
obstacle_lanes.append(lane_choices[int(object_location[1]) == -1])
else:
# Start with equally spaced
obstacle_start_dist = self.min_obstacle_dist
obstacle_end_dist = self.track_data.center_line.length - 1.0
obstacle_dists = np.linspace(obstacle_start_dist, obstacle_end_dist, self.num_obstacles)
# Perturb to achieve randomness
if self.randomize:
i_obstacle = list(range(self.num_obstacles))
random.shuffle(i_obstacle)
for i in i_obstacle:
lo = obstacle_start_dist if (i == 0) \
else obstacle_dists[i-1] + self.min_obstacle_dist
hi = obstacle_end_dist if (i == self.num_obstacles-1) \
else obstacle_dists[i+1] - self.min_obstacle_dist
if lo < hi:
obstacle_dists[i] = random.uniform(lo, hi)
# Select a random lane for each obstacle
for _ in obstacle_dists:
use_outer_lane = random.choice((False, True))
obstacle_lanes.append(lane_choices[use_outer_lane])
else:
# Alternate between lanes for each obstacle
use_outer_lane = False
for _ in obstacle_dists:
obstacle_lanes.append(lane_choices[use_outer_lane])
use_outer_lane = not use_outer_lane
# Compute the obstacle poses
obstacle_poses = []
for obstacle_dist, obstacle_lane in zip(obstacle_dists, obstacle_lanes):
obstacle_pose = obstacle_lane.interpolate_pose(
obstacle_lane.project(self.track_data.center_line.interpolate(obstacle_dist)))
if self.use_bot_car:
obstacle_pose.position.z = BOT_CAR_Z
else:
obstacle_pose.position.z = OBSTACLE_Z
obstacle_poses.append(obstacle_pose)
# Return the poses
return obstacle_poses
def _spawn_obstacles(self):
for obstacle_name, obstacle_pose in zip(self.obstacle_names, self.obstacle_poses):
self.spawn_sdf_model(obstacle_name, self.obstacle_sdf, '/{}'.format(obstacle_name),
obstacle_pose, '')
self.track_data.initialize_object(obstacle_name, obstacle_pose,
self.obstacle_dimensions)
def _reset_obstacles(self):
for obstacle_name, obstacle_pose in zip(self.obstacle_names, self.obstacle_poses):
obstacle_state = ModelState()
obstacle_state.model_name = obstacle_name
obstacle_state.pose = obstacle_pose
obstacle_state.twist.linear.x = 0
obstacle_state.twist.linear.y = 0
obstacle_state.twist.linear.z = 0
obstacle_state.twist.angular.x = 0
obstacle_state.twist.angular.y = 0
obstacle_state.twist.angular.z = 0
self.set_model_state(obstacle_state)
self.track_data.reset_object(obstacle_name, obstacle_pose)
@property
def action_space(self):
return None
def reset_agent(self):
self.obstacle_poses = self._compute_obstacle_poses()
self._reset_obstacles()
def send_action(self, action):
pass
def update_agent(self, action):
return {}
def judge_action(self, agents_info_map):
for agent_name, agent_info in agents_info_map.items():
# check racecar crash with a obstacle
crashed_object_name = agent_info[AgentInfo.CRASHED_OBJECT_NAME.value] \
if AgentInfo.CRASHED_OBJECT_NAME.value in agent_info else ''
# only trainable racecar agent has 'obstacle' as possible crashed object
if 'obstacle' in crashed_object_name:
self._reset_obstacles()
break
return None, None, None
def finish_episode(self):
pass
| 47.335294 | 120 | 0.663601 | [
"Apache-2.0"
] | LastRemote/amazon-sagemaker-examples | reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/agent_ctrl/obstacles_agent_ctrl.py | 8,047 | Python |
import torch
import torch.nn as nn
class CustomModel(nn.Module):
def __init__(self):
super().__init__()
self.stage1_block = nn.Sequential(
nn.Conv2d(
in_channels=3,
out_channels=64,
kernel_size=3,
stride=1,
padding=1
),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=3,
stride=1,
padding=1
),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=3,
stride=1,
padding=1
),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(
in_channels=64,
out_channels=32,
kernel_size=3,
stride=1,
padding=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=32,
out_channels=1,
kernel_size=3,
stride=1,
padding=1
),
nn.ReLU(),
nn.Flatten()
)
def forward(self,x):
x = self.stage1_block(x)
#print(x.shape)
return x
| 21.294118 | 42 | 0.380525 | [
"MIT"
] | olaals/masteroppgave | machine-learning/ml-projects/locally-connected/models/models/model_bn.py | 1,448 | Python |
# Generated by Django 2.1.5 on 2019-03-16 16:41
from django.db import migrations, models
import django.db.models.deletion
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
("people", "0012_auto_20190316_1641"),
("services", "0023_key_points_heading_not_required"),
]
operations = [
migrations.AddField(
model_name="servicepage",
name="contact_reasons",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="people.ContactReasonsList",
),
),
migrations.AlterField(
model_name="servicepage",
name="heading_for_key_points",
field=wagtail.core.fields.RichTextField(),
),
]
| 27.181818 | 61 | 0.58194 | [
"MIT"
] | elviva404/wagtail-torchbox | tbx/services/migrations/0024_auto_20190316_1641.py | 897 | Python |
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
import boto3
import json
import logging
import os
import re
from ipaddr import IPNetwork
from ybops.utils import get_or_create, get_and_cleanup
from ybops.common.exceptions import YBOpsRuntimeError
from ybops.cloud.common.utils import request_retry_decorator
RESOURCE_PREFIX_FORMAT = "yb-{}"
IGW_CIDR = "0.0.0.0/0"
SUBNET_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT
IGW_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-igw"
ROUTE_TABLE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-rt"
SG_YUGABYTE_PREFIX_FORMAT = RESOURCE_PREFIX_FORMAT + "-sg"
PEER_CONN_FORMAT = "yb-peer-conn-{}-to-{}"
class AwsBootstrapRegion():
def __init__(self, region, metadata, region_cidrs):
self.region = region
self.metadata = metadata
self.region_cidrs = region_cidrs
self.client = get_client(self.region)
# Outputs.
self.vpc = None
self.igw = None
self.peer_vpc = None
self.sg_yugabyte = None
self.subnets = []
self.route_table = None
def bootstrap(self):
self.setup_vpc()
self.setup_igw()
self.setup_subnets()
self.setup_yugabyte_sg()
self.setup_rt()
def setup_vpc(self):
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(self.region)
vpc = create_vpc(client=self.client, tag_name=vpc_region_tag,
cidr=get_region_cidr(self.metadata, self.region))
vpc.wait_until_available()
self.vpc = vpc
def setup_igw(self):
igw_tag = IGW_PREFIX_FORMAT.format(self.region)
igw = create_igw(client=self.client, tag_name=igw_tag, vpc=self.vpc)
self.igw = igw
def setup_subnets(self):
zones = get_zones(self.region)
subnets = {}
for zone_index, zone in enumerate(sorted(zones.keys())):
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
zone_cidr = self.metadata["zone_cidr_format"].format(
get_cidr_prefix(self.metadata, self.region), (zone_index + 1) * 16)
subnet = create_subnet(self.client, self.vpc, zone, zone_cidr, vpc_zone_tag)
subnets[zone] = subnet
self.subnets = subnets
def setup_yugabyte_sg(self):
sg_group_name = get_yb_sg_name(self.region)
rules = list(self.metadata["sg_rules"])
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
sg = create_security_group(client=self.client, group_name=sg_group_name,
description="YugaByte SG", vpc=self.vpc,
rules=rules)
self.sg_yugabyte = sg
def setup_rt(self):
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(self.region)
route_table = create_route_table(client=self.client, tag_name=route_table_tag,
vpc=self.vpc)
# TODO: handle private/public case at somepoint, also NAT.
add_route_to_rt(route_table, IGW_CIDR, "GatewayId", self.igw.id)
current_associated_subnet_ids = [assoc.subnet_id for assoc in route_table.associations]
missing_ids = [subnet.id for subnet in self.subnets.values()
if subnet.id not in current_associated_subnet_ids]
for subnet_id in missing_ids:
route_table.associate_with_subnet(SubnetId=subnet_id)
self.route_table = route_table
def add_sg_ingress_to_sg(self, incoming_sg, target_sg):
current_sg_ids = set([pair["GroupId"]
for perm in target_sg.ip_permissions
for pair in perm["UserIdGroupPairs"]])
if incoming_sg.id not in current_sg_ids:
target_sg.authorize_ingress(
IpPermissions=[{
"IpProtocol": "-1",
"UserIdGroupPairs": [{"GroupId": incoming_sg.id}]}])
def add_route_to_rt(route_table, cidr, target_type, target_id):
kwargs = {target_type: target_id}
route = get_route_by_cidr(route_table, cidr)
if route is None:
route_table.create_route(DestinationCidrBlock=cidr, **kwargs)
elif getattr(route, dumb_camel_to_snake(target_type)) != target_id:
route.replace(**kwargs)
def add_cidr_to_rules(rules, cidr):
rule_block = {
"ip_protocol": "-1",
"from_port": 0,
"to_port": 65535,
"cidr_ip": cidr
}
rules.append(rule_block)
def get_cidr_prefix(metadata, region):
return metadata["regions"][region]["cidr_prefix"]
def get_region_cidr(metadata, region):
return metadata["region_cidr_format"].format(get_cidr_prefix(metadata, region))
def get_region_cidrs(metadata):
return dict([(r, get_region_cidr(metadata, r)) for r in metadata["regions"].keys()])
def dumb_camel_to_snake(s):
return re.sub("([A-Z])", "_\\1", s).lower()[1:]
class YbVpcComponents:
def __init__(self):
self.region = None
self.vpc = None
self.sg_yugabyte = None
self.customer_sgs = None
self.route_table = None
self.subnets = None
@staticmethod
def from_pieces(region, vpc_id, sg_id, rt_id, az_to_subnet_ids):
c = YbVpcComponents()
c.region = region
client = get_client(region)
c.vpc = client.Vpc(vpc_id)
c.sg_yugabyte = client.SecurityGroup(sg_id)
c.route_table = client.RouteTable(rt_id)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
@staticmethod
def from_user_json(region, per_region_meta):
c = YbVpcComponents()
c.region = region
client = get_client(region)
vpc_id = per_region_meta.get("vpcId")
if vpc_id:
c.vpc = client.Vpc(vpc_id)
else:
c.vpc = get_vpc(client, RESOURCE_PREFIX_FORMAT.format(region))
sg_ids = per_region_meta.get("customSecurityGroupId")
if sg_ids:
c.customer_sgs = [client.SecurityGroup(sg_id) for sg_id in sg_ids.split(",")]
else:
c.sg_yugabyte = get_security_group(
client, SG_YUGABYTE_PREFIX_FORMAT.format(region), c.vpc)
if not vpc_id:
c.route_table = get_route_table(client, ROUTE_TABLE_PREFIX_FORMAT.format(region))
az_to_subnet_ids = {}
if vpc_id:
az_to_subnet_ids = per_region_meta.get("azToSubnetIds", {})
else:
az_to_subnet_ids = get_zones(region)
c.subnets = {az: client.Subnet(subnet_id)
for az, subnet_id in az_to_subnet_ids.iteritems()}
return c
def as_json(self):
sgs = self.customer_sgs if self.customer_sgs else [self.sg_yugabyte]
return vpc_components_as_json(self.vpc, sgs, self.subnets)
class AwsBootstrapClient():
def __init__(self, metadata, host_vpc_id, host_vpc_region):
self.metadata = metadata
self.host_vpc_id = host_vpc_id
self.host_vpc_region = host_vpc_region
self.region_cidrs = get_region_cidrs(self.metadata)
# Validation.
self._validate_cidr_overlap()
def _validate_cidr_overlap(self):
region_networks = [IPNetwork(cidr) for cidr in self.region_cidrs.values()]
all_networks = region_networks
for i in xrange(len(all_networks)):
for j in xrange(i + 1, len(all_networks)):
left = all_networks[i]
right = all_networks[j]
if left.overlaps(right):
raise YBOpsRuntimeError(
"IP blocks in the CIDRs overlap: {} - {}".format(left, right))
def bootstrap_individual_region(self, region):
if region is None:
raise YBOpsRuntimeError("Must provider region to bootstrap!")
client = AwsBootstrapRegion(region, self.metadata, self.region_cidrs)
client.bootstrap()
return YbVpcComponents.from_pieces(
region, client.vpc.id, client.sg_yugabyte.id, client.route_table.id,
{az: s.id for az, s in client.subnets.iteritems()})
def cross_link_regions(self, components):
# Do the cross linking, adding CIDR entries to RTs and SGs, as well as doing vpc peerings.
region_and_vpc_tuples = [(r, c.vpc) for r, c in components.iteritems()]
host_vpc = None
if self.host_vpc_id and self.host_vpc_region:
host_vpc = get_client(self.host_vpc_region).Vpc(self.host_vpc_id)
region_and_vpc_tuples.append((self.host_vpc_region, host_vpc))
# Setup VPC peerings.
for i in xrange(len(region_and_vpc_tuples) - 1):
i_region, i_vpc = region_and_vpc_tuples[i]
for j in xrange(i + 1, len(region_and_vpc_tuples)):
j_region, j_vpc = region_and_vpc_tuples[j]
peering = create_vpc_peering(
# i is the host, j is the target.
client=get_client(i_region), vpc=j_vpc, host_vpc=i_vpc, target_region=j_region)
if len(peering) != 1:
raise YBOpsRuntimeError(
"Expecting one peering connection, got {}".format(peer_conn))
peering = peering[0]
# Add route i -> j.
add_route_to_rt(components[i_region].route_table, j_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
# Add route j -> i.
# Note: If we have a host_vpc, it is the last in the list, and it doesn't have an
# associated component, so we special case it.
if host_vpc is None or j != len(region_and_vpc_tuples) - 1:
add_route_to_rt(components[j_region].route_table, i_vpc.cidr_block,
"VpcPeeringConnectionId", peering.id)
else:
# TODO: should ideally filter to the RT that is relevant, but we do not really
# know the subnets which matter from this host_vpc...
for rt in list(host_vpc.route_tables.all()):
add_route_to_rt(rt, i_vpc.cidr_block, "VpcPeeringConnectionId", peering.id)
# Setup SG entries for all the CIDRs.
all_cidrs = [vpc.cidr_block for r, vpc in region_and_vpc_tuples]
rules = []
# Add CIDRs from all the VPCs, including the host.
for cidr in all_cidrs:
add_cidr_to_rules(rules, cidr)
# Add CIDRs from any custom networks we have internally, primarily the OpenVPN in AWS.
# TODO(bogdan): custom CIDR entries
for cidr in self.metadata.get("custom_network_whitelisted_ip_cidrs", []):
add_cidr_to_rules(rules, cidr)
for region, component in components.iteritems():
sg = component.sg_yugabyte
ip_perms = sg.ip_permissions
for rule in rules:
found = False
for perm in ip_perms:
if perm.get("FromPort") == rule["from_port"] and \
perm.get("ToPort") == rule["to_port"] and \
perm.get("IpProtocol") == rule["ip_protocol"] and \
len([True for r in perm.get("IpRanges", [])
if r.get("CidrIp") == rule["cidr_ip"]]) > 0:
# This rule matches this permission, so no need to add it.
found = True
break
if not found:
try:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
if "InvalidPermission.Duplicate" not in str(e):
raise YBOpsRuntimeError(
"Authorize Security Group Ingress failed: {}".format(repr(e)))
def aws_exception_handler(e):
"""AWS specific exception handler.
Args:
e: the exception that was raised by the underlying API call that just failed.
Returns:
True if this exception can be retried, False otherwise.
"""
return "Request limit exceeded" in str(e)
def aws_request_limit_retry(fn):
"""A decorator for retrying an AWS operation after exceeding request limit. Does retries with
randomized jitter. Ideally, we should reconfigure boto3 to do the right kind of retries
internally, but as of May 2017 there does not seem to be a good way of doing that.
Initially not adding this decorator to all functions in this module. This should be done
gradually as we encounter rate limiting errors.
Relevant boto issues:
https://github.com/boto/boto3/issues/770
https://github.com/boto/botocore/issues/882
"""
return request_retry_decorator(fn, aws_exception_handler)
def get_client(region):
"""Method to get boto3 ec2 resource for given region
Args:
region (str): Region name
Returns:
boto3 resource
"""
return boto3.resource("ec2", region_name=region)
def get_clients(regions):
"""Method to get boto3 clients for given region or all the regions if none specified.
Args:
regions (list): List of regions to return clients for
Returns:
clients(obj): Map of region to boto3 resource
"""
return {region: get_client(region) for region in regions}
def get_available_regions(metadata):
return metadata["regions"].keys()
def get_spot_pricing(region, zone, instance_type):
client = boto3.client('ec2', region_name=region)
prod_desc = ['Linux/UNIX (Amazon VPC)']
spot_price = client.describe_spot_price_history(InstanceTypes=[instance_type],
MaxResults=1,
ProductDescriptions=prod_desc,
AvailabilityZone=zone)
if len(spot_price['SpotPriceHistory']) == 0:
raise YBOpsRuntimeError('Invalid instance type {} for zone {}'.format(instance_type, zone))
return spot_price['SpotPriceHistory'][0]['SpotPrice']
def get_zones(region, dest_vpc_id=None):
"""Method to fetch zones for given region or all the regions if none specified.
Args:
region (str): Name of region to get zones of.
Returns:
zones (obj): Map of zone -> subnet
"""
result = {}
filters = get_filters("state", "available")
client = boto3.client("ec2", region_name=region)
zones = client.describe_availability_zones(Filters=filters).get("AvailabilityZones", [])
new_client = get_client(region)
zone_mapping = {}
for z in zones:
zone_name = z["ZoneName"]
zone_tag = SUBNET_PREFIX_FORMAT.format(zone_name)
region_vpc = None
if dest_vpc_id:
region_vpc = new_client.Vpc(dest_vpc_id)
else:
region_vpc = get_vpc(new_client, RESOURCE_PREFIX_FORMAT.format(region))
subnet = next(iter(fetch_subnets(region_vpc, zone_tag)), None)
if subnet is None:
subnet = next(iter([s for s in region_vpc.subnets.all()
if s.availability_zone == zone_name]), None)
zone_mapping[zone_name] = subnet.id if subnet is not None else None
return zone_mapping
def get_vpc(client, tag_name, **kwargs):
"""Method to fetch vpc based on the tag_name.
Args:
client (boto client): Boto Client for the region to query.
tag_name (str): VPC tag name.
Returns:
VPC obj: VPC object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.vpcs.filter(Filters=filters)), None)
def fetch_subnets(vpc, tag_name):
"""Method to fetch subnets based on the tag_name.
Args:
vpc (vpc obj): VPC object to search for subnets
tag_name (str): subnet tag name.
Returns:
subnets (list): list of aws subnets for given vpc.
"""
filters = get_tag_filter(tag_name)
return vpc.subnets.filter(Filters=filters)
def create_subnet(client, vpc, zone, cidr, tag_name):
"""Method to create subnet based on cidr and tag name.
Args:
client (boto client): Region specific boto client
vpc (VPC object): VPC object to create subnet.
zone (str): Availability zone name
cidr (str): CIDR string
tag_name (str): Tag name for subnet.
Returns:
subnet: Newly created subnet object.
"""
subnet = next((s for s in fetch_subnets(vpc, tag_name) if s.cidr_block == cidr), None)
if subnet is None:
subnet = vpc.create_subnet(CidrBlock=cidr, AvailabilityZone=zone)
# TODO: no direct waiter on subnet just yet, it seems...
client.meta.client.get_waiter("subnet_available").wait(SubnetIds=[subnet.id])
tag_resource_name(client, subnet.id, tag_name)
return subnet
def get_security_group(client, group_name, vpc, **kwargs):
"""Method to fetch security group based on the group_name.
Args:
client (boto client): Region specific boto client
group_name (str): Security Group name
vpc (VPC object): The VPC in which to check for the SG
Returns:
SecurityGroup: Matching security group.
"""
filters = get_filters("group-name", group_name) + get_filters("vpc-id", vpc.id)
return next(iter(client.security_groups.filter(Filters=filters)), None)
@get_or_create(get_security_group)
def create_security_group(client, group_name, vpc, description, rules):
"""Method to create a security group based on the group_name and authorize ingress with
the rules provided.
Args:
client (boto client): Region specific boto client
group_name (str): security group name
description (str): description of the security group
vpc (VPC Object): VPC object to create the security group
rules (dict): List of rules to add to security group.
"""
sg = vpc.create_security_group(GroupName=group_name, Description=description)
try:
for rule in rules:
sg.authorize_ingress(IpProtocol=rule["ip_protocol"],
CidrIp=rule["cidr_ip"],
FromPort=rule["from_port"],
ToPort=rule["to_port"])
except Exception as e:
logging.error("Authorize Security Group Ingress failed: {}".format(e))
sg.delete()
raise YBOpsRuntimeError("Security Group creation failed.")
return sg
def get_igw(client, tag_name, **kwargs):
"""Method to fetch Internet Gateway based on tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): Internet Gateway tag name.
Returns:
internet_gateway: internet gateway object.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.internet_gateways.filter(Filters=filters)), None)
@get_or_create(get_igw)
def create_igw(client, tag_name, vpc):
"""Method to create Internet Gateway based on tag_name in given VPC. If the gateway
already exists, it would return that object. If the object doesn't have a tag, we
would tag it accordingly.
Args:
client (boto client): Region specific boto client
tag_name (str): Tag name for internet gateway.
vpc (VPC object): VPC object to create Internet Gateway
Returns:
internet gateway: newly internet gateway object.
"""
# Query to make sure the region doesn't have any IGW already attached.
existing_igw = next(iter(vpc.internet_gateways.all()), None)
if existing_igw is not None:
# If we have existing igw for the region, lets just tag it with yb-XX-igw
tag_resource_name(client, existing_igw.id, tag_name)
return existing_igw
# If we don't have a internet gateway, lets create one and attach it to vpc
igw = client.create_internet_gateway()
tag_resource_name(client, igw.id, tag_name)
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
return igw
def get_route_table(client, tag_name, **kwargs):
"""Method to fetch route table based on tag_name
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name to search for.
Returns:
RouteTable (obj): Matching route table object or None.
"""
filters = get_tag_filter(tag_name)
return next(iter(client.route_tables.filter(Filters=filters)), None)
@get_or_create(get_route_table)
def create_route_table(client, tag_name, vpc):
"""Method to create route table based on tag_name in given VPC. It will first
query for the tag name to see if the route table already exists or if one is already
attached to the VPC, if so it will return that route table.
Args:
client (boto client): Region specific boto client
tag_name (str): Route table tag name
vpc (vpc object): VPC object to create the route table against
Returns:
RouteTable (obj): newly created RouteTable object.
"""
# Check to see if there is a route table attached to VPC, if so, we can just tag it
existing_route_table = next(iter(vpc.route_tables.all()), None)
if existing_route_table is not None:
tag_resource_name(client, existing_route_table.id, tag_name)
return existing_route_table
# If no route table exists, we can create one and tag it.
route_table = vpc.create_route_table()
tag_resource_name(client, route_table.id, tag_name)
return route_table
@get_and_cleanup(get_security_group)
def cleanup_security_group(sg, **kwargs):
"""Method to cleanup security group for the matching group_name.
Args:
sg: Instance of security group matching the group_name.
"""
sg.delete()
@get_and_cleanup(get_igw)
def cleanup_igw(igw, **kwargs):
"""Method to cleanup Internet Gateway matching the tag name. And also remove any vpc
that is attached to the Internet Gateway.
Args:
igw: Instance of Internet Gateway matching tag_name.
"""
for vpc in igw.attachments:
igw.detach_from_vpc(VpcId=vpc['VpcId'])
igw.delete()
@get_and_cleanup(get_route_table)
def cleanup_route_table(rt, **kwargs):
"""Method to cleanup the Route Table matching the tag name.
Args:
rt: Instance of Route Table matching tag_name.
"""
rt.delete()
def get_route_by_cidr(route_table, cidr):
"""Method to check if given CIDR already attached to route table.
Args:
RouteTable (obj): Route Table object.
cidr (str): CIDR string to check in route table.
Returns:
Route: the route for this CIDR or None if not found
"""
return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
@get_or_create(get_vpc)
def create_vpc(client, tag_name, cidr):
"""Method to create vpc based on the cidr and tag with tag_name.
Args:
client (boto client): Region specific boto client
tag_name (str): VPC tag name
cidr (str): CIDR string.
Returns:
VPC(Object): Newly created VPC object.
"""
vpc = client.create_vpc(CidrBlock=cidr)
vpc.modify_attribute(EnableDnsHostnames={'Value': True})
tag_resource_name(client, vpc.id, tag_name)
return vpc
def set_yb_sg_and_fetch_vpc(metadata, region, dest_vpc_id):
"""Method to bootstrap vpc and security group, and enable vpc peering
with the host_instance vpc.
Args:
metadata (obj): Cloud metadata object with cidr prefix and other metadata.
region (str): Region name to create the vpc in.
dest_vpc_id (str): Id of the VPC that yugabyte machines will reside in.
Returns:
vpc_info (json): return vpc, subnet and security group as json.
"""
client = get_client(region)
dest_vpc = client.Vpc(dest_vpc_id)
subnets = {subnet.availability_zone: subnet for subnet in dest_vpc.subnets.all()}
sg_group_name = get_yb_sg_name(region)
rules = metadata["sg_rules"]
for r in rules:
r.update({"cidr_ip": IGW_CIDR})
add_cidr_to_rules(rules, dest_vpc.cidr_block)
sgs = [create_security_group(client=client, group_name=sg_group_name, vpc=dest_vpc,
description="YugaByte SG", rules=rules)]
return vpc_components_as_json(dest_vpc, sgs, subnets)
def query_vpc(region):
"""Method to query VPC against given region and respective subnets.
Args:
region (str): Region name to query the VPC.
Returns:
vpc and subnet info (obj): Object with region and zone subnet id.
"""
per_vpc_info = {}
# Fetch all available AZs, as we want to group subnets by AZ.
raw_client = boto3.client("ec2", region_name=region)
zones = [z["ZoneName"]
for z in raw_client.describe_availability_zones(
Filters=get_filters("state", "available")).get("AvailabilityZones", [])]
# Default to empty lists, in case some zones do not have subnets, so we can use this as a query
# for all available AZs in this region.
subnets_by_zone = {z: [] for z in zones}
# Fetch SGs and group them by VPC ID.
client = get_client(region)
per_vpc_sgs = {}
sgs = client.security_groups.all()
for sg in sgs:
sg_list = per_vpc_sgs.setdefault(sg.vpc_id, [])
sg_list.append({
"sg_id": sg.group_id,
# Note: Name tag is not mandatory or always present but group_name is!
"sg_name": sg.group_name
})
# Fetch all available VPCs so we can group by VPC ID.
region_vpcs = client.vpcs.all()
for vpc in region_vpcs:
# Filter for available subnets and group by AZ.
subnets = vpc.subnets.filter(Filters=get_filters("state", "available"))
for s in subnets:
subnets_for_this_az = subnets_by_zone.setdefault(s.availability_zone, [])
subnets_for_this_az.append({
"subnet_id": s.subnet_id,
"name": _get_name_from_tags(s.tags),
"public": s.map_public_ip_on_launch
})
vpc_info = {
"subnets_by_zone": subnets_by_zone,
# In case we somehow did not find any SGs, default to empty list.
"security_groups": per_vpc_sgs.get(vpc.id, [])
}
per_vpc_info[vpc.id] = vpc_info
region_json = {
"per_vpc_info": per_vpc_info
}
return region_json
def _get_name_from_tags(tags):
for t in tags if tags else []:
if t.get("Key") == "Name":
return t.get("Value", None)
return None
def vpc_components_as_json(vpc, sgs, subnets):
"""Method takes VPC, Security Group and Subnets and returns a json data format with ids.
Args:
vpc (VPC Object): Region specific VPC object
sgs (List of Security Group Object): Region specific Security Group object
subnets (subnet object map): Map of Subnet objects keyed of zone.
Retuns:
json (str): A Json string for yugaware to consume with necessary ids.
"""
result = {}
result["vpc_id"] = vpc.id
result["security_group"] = [{"id": sg.group_id, "name": sg.group_name} for sg in sgs]
result["zones"] = {}
for zone, subnet in subnets.iteritems():
result["zones"][zone] = subnet.id
return result
def delete_vpc(region, host_vpc_id=None, host_vpc_region=None):
"""Method to delete VPC, Subnet, Internet Gateway, Route Table and VPC peering.
Args:
region (str): Region name to query the VPC.
"""
vpc_region_tag = RESOURCE_PREFIX_FORMAT.format(region)
client = get_client(region)
region_vpc = get_vpc(client, vpc_region_tag)
if region_vpc is None:
raise YBOpsRuntimeError("VPC not setup.")
zones = get_zones(region)
# Remove the yugabyte SG first.
sg_group_name = get_yb_sg_name(region)
cleanup_security_group(client=client, group_name=sg_group_name, vpc=region_vpc)
# Cleanup the subnets.
for zone, subnet_id in zones.iteritems():
vpc_zone_tag = SUBNET_PREFIX_FORMAT.format(zone)
if subnet_id is not None:
client.Subnet(subnet_id).delete()
# Remove the IGW.
igw_tag = IGW_PREFIX_FORMAT.format(region)
cleanup_igw(client=client, tag_name=igw_tag)
# Remove this region's CIDR from the RT of the host vpc.
host_vpc = None
if host_vpc_id is not None and host_vpc_region is not None:
host_vpc = get_client(host_vpc_region).Vpc(host_vpc_id)
for rt in list(host_vpc.route_tables.all()):
delete_route(rt, region_vpc.cidr_block)
# Remove all of the VPC peerings of this vpc.
cleanup_vpc_peering(client=client, vpc=region_vpc, host_vpc=None)
# Delete the VPC itself.
region_vpc.delete()
# Finally cleanup the Routing Table.
route_table_tag = ROUTE_TABLE_PREFIX_FORMAT.format(region)
cleanup_route_table(client=client, tag_name=route_table_tag)
return {"success": "VPC deleted."}
def tag_resource_name(client, resource_id, tag_name):
"""Method to create name tag for given resource.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_name (str): Tag name.
"""
tag_resource(client, resource_id, "Name", tag_name)
def tag_resource(client, resource_id, tag_key, tag_value):
"""Method to attach arbitrary key-value tags to resources.
Args:
client (boto3 client): Region specific boto client
resource_id (str): EC2 resource id to tag
tag_key: Tag key
tag_value: Tag value
"""
tags = [{"Key": tag_key, "Value": tag_value}]
client.create_tags(Resources=[resource_id], Tags=tags)
def get_filters(key, value):
return [{'Name': key, 'Values': [value]}]
def get_tag_filter(tag_name):
return get_filters("tag:Name", tag_name)
def get_vpc_peerings(vpc, host_vpc, **kwargs):
"""Method to fetch all the VPC peerings against given VPC. If host_vpc is provided
it will check if there is a peering against that vpc.
Args:
vpc(VPC object): VPC Object to search for peerings
host_vpc (Host VPC object): Can be Null as well, to check if specific host_vpc
peering is done.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
output = []
# Search through accepted vpc peerings.
vpc_peerings = vpc.accepted_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.requester_vpc == host_vpc)])
# Also search through requested vpc peerings.
vpc_peerings = vpc.requested_vpc_peering_connections.all()
output.extend([vp for vp in vpc_peerings
if vp.status.get('Code') == "active" and
(host_vpc is None or vp.accepter_vpc == host_vpc)])
return output
@get_and_cleanup(get_vpc_peerings)
def cleanup_vpc_peering(vpc_peerings, **kwargs):
for vpc_peering in vpc_peerings:
vpc_peering.delete()
@get_or_create(get_vpc_peerings)
def create_vpc_peering(client, vpc, host_vpc, target_region):
"""Method would create a vpc peering between the newly created VPC and caller's VPC
Also makes sure, if they aren't the same, then there is no need for vpc peering.
Args:
client (boto client): Region specific boto client
vpc (VPC object): Newly created VPC object
host_vpc(Host VPC object): Host VPC to peer with.
target_region (region name): Region name in which peering is being created.
Returns:
VPC peering (array): Array list of vpc peerings.
"""
try:
peer_conn = client.create_vpc_peering_connection(
VpcId=host_vpc.id, PeerVpcId=vpc.id, PeerRegion=target_region)
peer_conn.wait_until_exists()
# Need to accept from the other end.
remote_peer_conn = get_client(target_region).VpcPeeringConnection(peer_conn.id)
remote_peer_conn.wait_until_exists()
remote_peer_conn.accept()
return [peer_conn]
except Exception as e:
logging.error(e)
raise YBOpsRuntimeError("Unable to create VPC peering.")
def get_device_names(instance_type, num_volumes):
device_names = []
for i in xrange(num_volumes):
device_name_format = "nvme{}n1" if is_nvme(instance_type) else "xvd{}"
index = "{}".format(i if is_nvme(instance_type) else chr(ord('b') + i))
device_names.append(device_name_format.format(index))
return device_names
def is_next_gen(instance_type):
return instance_type.startswith(("c3", "c4", "c5", "m4", "r4"))
def is_nvme(instance_type):
return instance_type.startswith("i3")
def has_ephemerals(instance_type):
return not is_nvme(instance_type) and not is_next_gen(instance_type)
def create_instance(args):
client = get_client(args.region)
vars = {
"ImageId": args.machine_image,
"KeyName": args.key_pair_name,
"MinCount": 1,
"MaxCount": 1,
"InstanceType": args.instance_type,
}
# Network setup.
# Lets assume they have provided security group id comma delimited.
sg_ids = args.security_group_id.split(",") if args.security_group_id else None
if sg_ids is None:
# Figure out which VPC this instance will be brought up in and search for the SG in there.
# This is for a bit of backwards compatibility with the previous mode of potentially using
# YW's VPC, in which we would still deploy a SG with the same name as in our normal VPCs.
# This means there could be customers that had that deployment mode from the start AND have
# a SG we created back then, with the internal naming convention we use, but NOT in the YB
# VPC (which they likely will not even have).
vpc = get_vpc_for_subnet(client, args.cloud_subnet)
sg_name = get_yb_sg_name(args.region)
sg = get_security_group(client, sg_name, vpc)
sg_ids = [sg.id]
vars["NetworkInterfaces"] = [{
"DeviceIndex": 0,
"AssociatePublicIpAddress": args.assign_public_ip,
"SubnetId": args.cloud_subnet,
"Groups": sg_ids
}]
# Volume setup.
volumes = []
ebs = {
"DeleteOnTermination": True,
# TODO: constant
"VolumeSize": 40,
"VolumeType": "gp2"
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
volumes.append({
"DeviceName": "/dev/sda1",
"Ebs": ebs
})
device_names = get_device_names(args.instance_type, args.num_volumes)
# TODO: Clean up semantics on nvme vs "next-gen" vs ephemerals, as this is currently whack...
for i, device_name in enumerate(device_names):
volume = {}
if has_ephemerals(args.instance_type):
volume = {
"DeviceName": "/dev/{}".format(device_name),
"VirtualName": "ephemeral{}".format(i)
}
elif is_next_gen(args.instance_type):
ebs = {
"DeleteOnTermination": True,
"VolumeType": args.volume_type,
# TODO: make this int.
"VolumeSize": args.volume_size
}
if args.cmk_res_name is not None:
ebs["Encrypted"] = True
ebs["KmsKeyId"] = args.cmk_res_name
if args.volume_type == "io1":
# TODO: make this int.
ebs["Iops"] = args.disk_iops
volume = {
"DeviceName": "/dev/{}".format(device_name),
"Ebs": ebs
}
volumes.append(volume)
vars["BlockDeviceMappings"] = volumes
# Tag setup.
def __create_tag(k, v):
return {"Key": k, "Value": v}
# Add Name all the time.
instance_tags = [
__create_tag("Name", args.search_pattern),
__create_tag("launched-by", os.environ.get("USER", "unknown")),
__create_tag("yb-server-type", args.type)
]
custom_tags = args.instance_tags if args.instance_tags is not None else '{}'
for k, v in json.loads(custom_tags).iteritems():
instance_tags.append(__create_tag(k, v))
vars["TagSpecifications"] = [{
"ResourceType": "instance",
"Tags": instance_tags
}]
# TODO: user_data > templates/cloud_init.yml.j2, still needed?
instance_ids = client.create_instances(**vars)
if len(instance_ids) != 1:
logging.error("Invalid create_instances response: {}".format(instance_ids))
raise YBOpsRuntimeError("Expected to create 1 instance, got {}".format(
len(instance_ids)))
instance = instance_ids[0]
instance.wait_until_running()
def modify_tags(region, instance_id, tags_to_set_str, tags_to_remove_str):
instance = get_client(region).Instance(instance_id)
# Remove all the tags we were asked to, except the internal ones.
tags_to_remove = set(tags_to_remove_str.split(",") if tags_to_remove_str else [])
# TODO: combine these with the above instance creation function.
internal_tags = set(["Name", "launched-by", "yb-server-type"])
if tags_to_remove & internal_tags:
raise YBOpsRuntimeError(
"Was asked to remove tags: {}, which contain internal tags: {}".format(
tags_to_remove, internal_tags
))
# Note: passing an empty list to Tags will remove all tags from the instance.
if tags_to_remove:
instance.delete_tags(Tags=[{"Key": k} for k in tags_to_remove])
# Set all the tags provided.
tags_to_set = json.loads(tags_to_set_str if tags_to_set_str else "{}")
customer_tags = []
for k, v in tags_to_set.iteritems():
customer_tags.append({"Key": k, "Value": v})
instance.create_tags(Tags=customer_tags)
def delete_route(rt, cidr):
route = get_route_by_cidr(rt, cidr)
if route is not None:
route.delete()
def get_vpc_for_subnet(client, subnet):
return client.Subnet(subnet).vpc
def get_yb_sg_name(region):
return SG_YUGABYTE_PREFIX_FORMAT.format(region)
def list_dns_record_set(hosted_zone_id):
return boto3.client('route53').get_hosted_zone(Id=hosted_zone_id)
def create_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'CREATE')
def edit_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'UPSERT')
def delete_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list):
return _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, 'DELETE')
def _update_dns_record_set(hosted_zone_id, domain_name_prefix, ip_list, action):
client = boto3.client('route53')
records = []
for ip in ip_list:
records.append({'Value': ip})
result = list_dns_record_set(hosted_zone_id)
hosted_zone_name = result['HostedZone']['Name']
change_batch = {
'Comment': "YugaWare driven Record Set",
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': "{}.{}".format(domain_name_prefix, hosted_zone_name),
'Type': 'A',
'TTL': 5,
'ResourceRecords': records
}
}]
}
result = client.change_resource_record_sets(
HostedZoneId=hosted_zone_id,
ChangeBatch=change_batch)
client.get_waiter('resource_record_sets_changed').wait(
Id=result['ChangeInfo']['Id'],
WaiterConfig={
'Delay': 10,
'MaxAttempts': 60
})
| 38.933269 | 100 | 0.645776 | [
"Apache-2.0",
"CC0-1.0"
] | bhavin192/yugabyte-db | managed/devops/opscli/ybops/cloud/aws/utils.py | 40,257 | Python |
from typing import List
import questionary
def get_first_checked(
choices: List[questionary.Choice], prev_search=None
) -> questionary.Choice:
first_checked = choices[0]
if prev_search:
first_checked = next(c for c in choices if c.checked)
return first_checked
| 23.916667 | 61 | 0.735192 | [
"MIT"
] | capellaspace/console-client | capella_console_client/cli/prompt_helpers.py | 287 | Python |
# -*- coding: utf-8 -*-
"""An implementation of the extension to ERMLP."""
from typing import Optional, Type
import torch
from torch import nn
from ..base import EntityRelationEmbeddingModel
from ...losses import BCEAfterSigmoidLoss, Loss
from ...regularizers import Regularizer
from ...triples import TriplesFactory
__all__ = [
'ERMLPE',
]
class ERMLPE(EntityRelationEmbeddingModel):
r"""An extension of ERMLP proposed by [sharifzadeh2019]_.
This model uses a neural network-based approach similar to ERMLP and with slight modifications.
In ERMLP, the model is:
.. math::
f(h, r, t) = \textbf{w}^{T} g(\textbf{W} [\textbf{h}; \textbf{r}; \textbf{t}])
whereas in ERMPLE the model is:
.. math::
f(h, r, t) = \textbf{t}^{T} f(\textbf{W} (g(\textbf{W} [\textbf{h}; \textbf{r}]))
including dropouts and batch-norms between each two hidden layers.
ConvE can be seen as a special case of ERMLPE that contains the unnecessary inductive bias of convolutional
filters. The aim of this model is to show that lifting this bias from ConvE (which simply leaves us with a
modified ERMLP model), not only reduces the number of parameters but also improves performance.
"""
#: The default strategy for optimizing the model's hyper-parameters
hpo_default = dict(
embedding_dim=dict(type=int, low=50, high=350, q=25),
hidden_dim=dict(type=int, low=50, high=450, q=25),
input_dropout=dict(type=float, low=0.0, high=0.8, q=0.1),
hidden_dropout=dict(type=float, low=0.0, high=0.8, q=0.1),
)
#: The default loss function class
loss_default: Type[Loss] = BCEAfterSigmoidLoss
#: The default parameters for the default loss function class
loss_default_kwargs = {}
def __init__(
self,
triples_factory: TriplesFactory,
hidden_dim: int = 300,
input_dropout: float = 0.2,
hidden_dropout: float = 0.3,
embedding_dim: int = 200,
automatic_memory_optimization: Optional[bool] = None,
loss: Optional[Loss] = None,
preferred_device: Optional[str] = None,
random_seed: Optional[int] = None,
regularizer: Optional[Regularizer] = None,
) -> None:
super().__init__(
triples_factory=triples_factory,
embedding_dim=embedding_dim,
automatic_memory_optimization=automatic_memory_optimization,
loss=loss,
preferred_device=preferred_device,
random_seed=random_seed,
regularizer=regularizer,
)
self.hidden_dim = hidden_dim
self.input_dropout = input_dropout
self.linear1 = nn.Linear(2 * self.embedding_dim, self.hidden_dim)
self.linear2 = nn.Linear(self.hidden_dim, self.embedding_dim)
self.input_dropout = nn.Dropout(self.input_dropout)
self.bn1 = nn.BatchNorm1d(self.hidden_dim)
self.bn2 = nn.BatchNorm1d(self.embedding_dim)
self.mlp = nn.Sequential(
self.linear1,
nn.Dropout(hidden_dropout),
self.bn1,
nn.ReLU(),
self.linear2,
nn.Dropout(hidden_dropout),
self.bn2,
nn.ReLU(),
)
# Finalize initialization
self.reset_parameters_()
def _reset_parameters_(self): # noqa: D102
self.entity_embeddings.reset_parameters()
self.relation_embeddings.reset_parameters()
for module in [
self.linear1,
self.linear2,
self.bn1,
self.bn2,
]:
module.reset_parameters()
def score_hrt(self, hrt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(hrt_batch[:, 0]).view(-1, self.embedding_dim)
r = self.relation_embeddings(hrt_batch[:, 1]).view(-1, self.embedding_dim)
t = self.entity_embeddings(hrt_batch[:, 2])
# Embedding Regularization
self.regularize_if_necessary(h, r, t)
# Concatenate them
x_s = torch.cat([h, r], dim=-1)
x_s = self.input_dropout(x_s)
# Predict t embedding
x_t = self.mlp(x_s)
# compare with all t's
# For efficient calculation, each of the calculated [h, r] rows has only to be multiplied with one t row
x = (x_t.view(-1, self.embedding_dim) * t).sum(dim=1, keepdim=True)
# The application of the sigmoid during training is automatically handled by the default loss.
return x
def score_t(self, hr_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
h = self.entity_embeddings(hr_batch[:, 0]).view(-1, self.embedding_dim)
r = self.relation_embeddings(hr_batch[:, 1]).view(-1, self.embedding_dim)
t = self.entity_embeddings.weight.transpose(1, 0)
# Embedding Regularization
self.regularize_if_necessary(h, r, t)
# Concatenate them
x_s = torch.cat([h, r], dim=-1)
x_s = self.input_dropout(x_s)
# Predict t embedding
x_t = self.mlp(x_s)
x = x_t @ t
# The application of the sigmoid during training is automatically handled by the default loss.
return x
def score_h(self, rt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
h = self.entity_embeddings.weight
r = self.relation_embeddings(rt_batch[:, 0]).view(-1, self.embedding_dim)
t = self.entity_embeddings(rt_batch[:, 1]).view(-1, self.embedding_dim)
# Embedding Regularization
self.regularize_if_necessary(h, r, t)
rt_batch_size = t.shape[0]
# Extend each rt_batch of "r" with shape [rt_batch_size, dim] to [rt_batch_size, dim * num_entities]
r = torch.repeat_interleave(r, self.num_entities, dim=0)
# Extend each h with shape [num_entities, dim] to [rt_batch_size * num_entities, dim]
# h = torch.repeat_interleave(h, rt_batch_size, dim=0)
h = h.repeat(rt_batch_size, 1)
# Extend t
t = t.repeat_interleave(self.num_entities, dim=0)
# Concatenate them
x_s = torch.cat([h, r], dim=-1)
x_s = self.input_dropout(x_s)
# Predict t embedding
x_t = self.mlp(x_s)
# For efficient calculation, each of the calculated [h, r] rows has only to be multiplied with one t row
x = (x_t.view(-1, self.embedding_dim) * t).sum(dim=1, keepdim=True)
# The results have to be realigned with the expected output of the score_h function
x = x.view(rt_batch_size, self.num_entities)
# The application of the sigmoid during training is automatically handled by the default loss.
return x
| 36.155914 | 112 | 0.639851 | [
"MIT"
] | Sina-Baharlou/pykeen | src/pykeen/models/unimodal/ermlpe.py | 6,725 | Python |
import sqlite3
from abc import ABCMeta, abstractmethod
from model.dao.daoexception import DAOException
class AbstractDAO(object):
__metaclass__ = ABCMeta
def __init__(self, conn):
self._conn = conn
"""
base CRUD operation
"""
# GENERIC CREATE FUNCTION
def _insert(self, request, parameters):
with self._conn as conn:
try:
c = conn.cursor()
c.execute(request, parameters)
conn.commit()
return c.lastrowid
except sqlite3.Error as ex:
conn.rollback()
DAOException(self, ex)
# GENERIC READ FUNCTION
def _read(self, request, parameters=None):
with self._conn as conn:
try:
c = conn.cursor()
if parameters is None:
c.execute(request)
else:
c.execute(request, parameters)
return c.fetchall()
except Exception as ex:
DAOException(self, ex)
# GENERIC UPDATE FUNCTION
def _update(self, request, parameters):
with self._conn as conn:
try:
c = conn.cursor()
c.execute(request, parameters)
conn.commit()
return True
except Exception as ex:
conn.rollback()
DAOException(self, ex)
return False
# GENERIC DELETE FUNCTION
def _delete(self, request, obj_id):
with self._conn as conn:
try:
c = conn.cursor()
c.execute(request, obj_id)
conn.commit()
return True
except Exception as ex:
conn.rollback()
DAOException(self, ex)
return False
| 28.090909 | 50 | 0.506472 | [
"MIT"
] | ChatNoir76/Championnat | model/dao/abstractdao.py | 1,854 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/31 0031 18:55
# @Author : Hadrianl
# @File : realtime_data_server
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rqalpha.const import COMMISSION_TYPE
from spapi.spAPI import *
from spapi.sp_struct import *
import zmq
import datetime as dt
from rqalpha.api import logger
from queue import Queue, Empty
import pymongo as pmg
from threading import Thread
from collections import deque
import pandas as pd
from rqalpha.events import EVENT
import time
from rqalpha.environment import Environment
from rqalpha.model.instrument import Instrument
from .util import _convert_from_ctype
class RealtimeDataServer:
def __init__(self, sp_info, db_info, socket_info):
mongo_cli = pmg.MongoClient(db_info['host'])
admin_db = mongo_cli.get_database('admin')
admin_db.authenticate(db_info['user'], db_info['pwd'])
self._db = mongo_cli.get_database(db_info['db'])
self._col = self._db.get_collection('realtime_future_1min_')
self._col.create_index([('datetime', pmg.DESCENDING), ('code', pmg.ASCENDING)], unique=True)
self._col.create_index([('code', pmg.ASCENDING)])
self.ctx = zmq.Context()
self.trigger_socket = self.ctx.socket(zmq.PUB)
self.trigger_socket.bind(f'tcp://*: {socket_info["trigger_port"]}')
self.prod_codes = {}
initialize()
set_login_info(**sp_info)
self._init_callback()
login()
time.sleep(3)
self._init_subscribe()
def _init_callback(self):
self._ticker_queues = {}
self._price_queues = {}
self._trigger_queue = Queue()
self._resample_thread = {}
@on_login_reply # 登录成功时候调用
def login_reply(user_id, ret_code, ret_msg):
if ret_code == 0:
api_logger.info(f'@{user_id.decode()}登录成功')
self._init_subscribe()
else:
api_logger.error(f'@{user_id.decode()}登录失败--errcode:{ret_code}--errmsg:{ret_msg.decode()}')
@on_instrument_list_reply # 产品系列信息的回调推送,用load_instrument_list()触发
def inst_list_reply(req_id, is_ready, ret_msg):
if is_ready:
api_logger.info('<产品>' + f'信息加载成功 req_id:{req_id}-msg:{ret_msg.decode()}')
else:
api_logger.error('<产品>' + f'信息正在加载......req_id{req_id}-msg:{ret_msg.decode()}')
@on_product_list_by_code_reply # 根据产品系列名返回合约信息
def product_list_by_code_reply(req_id, inst_code, is_ready, ret_msg):
if is_ready:
if inst_code == '':
api_logger.info('<合约>' + f'该产品系列没有合约信息 req_id:{req_id}-msg:{ret_msg.decode()}')
else:
api_logger.info('<合约>' + f'产品:{inst_code.decode()}合约信息加载成功 req_id:{req_id}-msg:{ret_msg.decode()}')
else:
api_logger.error('<合约>' + f'产品:{inst_code.decode()}合约信息正在加载......req_id:{req_id}-msg:{ret_msg.decode()}')
#
@on_business_date_reply # 登录成功后会返回一个交易日期
def business_date_reply(business_date):
self.trade_date = dt.datetime.fromtimestamp(business_date)
api_logger.info('<日期>' + f'当前交易日--{self.trade_date}')
@on_ticker_update # ticker数据推送
def ticker_update(ticker: SPApiTicker):
ticker_dict = _convert_from_ctype(ticker)
self._ticker_queues[ticker_dict['ProdCode']].put(ticker_dict)
api_logger.info(f'{ticker_dict}')
@on_api_price_update # price数据推送
def price_update(price: SPApiPrice):
price_dict = _convert_from_ctype(price)
self._price_queues[price_dict['ProdCode']].append(price_dict)
api_logger.info(f'{price_dict}')
@on_connecting_reply # 连接状态改变时调用
def connecting_reply(host_id, con_status):
api_logger.info(f'<连接>{HOST_TYPE[host_id]}状态改变--{HOST_CON_STATUS[con_status]}')
# global login_flag
self.on_login_reply = login_reply
self.inst_list_reply = inst_list_reply
self.product_list_by_code_reply = product_list_by_code_reply
self.business_date_reply = business_date_reply
self.ticker_update = ticker_update
self.price_update = price_update
self.connecting_reply = connecting_reply
def _init_subscribe(self):
contract_col = self._db.get_collection('realtime_future_contract_info')
code = contract_col.find()
self.prod_codes = {c['Filler']: c['CODE'] for c in code}
for p in self.prod_codes:
self.subscribe_ticker(p)
for p in self.prod_codes:
self.subscribe_price(p)
def _resample_ticker(self, prod_code):
tickers = []
q = self._ticker_queues[prod_code]
code = self.prod_codes[prod_code]
time_diff = 0
while True:
try:
tick = q.get(timeout=1)
time_diff = tick['TickerTime'] - time.time()
print(time_diff)
except Empty:
if tickers and time.time() % (tickers[-1]['TickerTime'] // 60) >= 61 + time_diff: # 在没有新的一分钟tick数据时,跨过下分钟超过3秒会自动生成bar
price_list = []
vol_list = []
d = dt.datetime.fromtimestamp(tickers[-1]['TickerTime']).replace(second=0)
for t in tickers:
price_list.append(t['Price'])
vol_list.append(t['Qty'])
o, h, l, c, v = price_list[0], max(price_list), min(price_list), price_list[-1], sum(vol_list)
self._col.update_one({'datetime': d, 'code': code},
{'$set': {'datetime': d, 'code': code, 'open': o,
'high': h, 'low': l, 'close': c, 'volume': v,
'trade_date': self.trade_date}}, upsert=True)
self._trigger_queue.put(d)
tickers.clear()
continue
if tick is None:
break
if tickers and tickers[-1]['TickerTime'] // 60 != tick['TickerTime'] // 60:
price_list = []
vol_list = []
d = dt.datetime.fromtimestamp(tickers[-1]['TickerTime']).replace(second=0)
for t in tickers:
price_list.append(t['Price'])
vol_list.append(t['Qty'])
o, h, l, c, v = price_list[0], max(price_list), min(price_list), price_list[-1], sum(vol_list)
self._col.update_one({'datetime': d, 'code': code}, {'$set': {'datetime': d, 'code': code, 'open': o,
'high': h, 'low': l, 'close': c, 'volume': v,
'trade_date': self.trade_date}}, upsert=True)
self._trigger_queue.put(d)
tickers.clear()
tickers.append(tick)
def subscribe_ticker(self, prod_code):
self._ticker_queues.setdefault(prod_code, Queue())
subscribe_ticker(prod_code, 1)
t = self._resample_thread.setdefault(prod_code, Thread(target=self._resample_ticker, args=(prod_code, )))
if not t.isAlive():
t.setDaemon(True)
t.start()
def unsubscribe_ticker(self, prod_code):
subscribe_ticker(prod_code, 0)
q = self._ticker_queues.pop(prod_code)
t = self._resample_thread.pop(prod_code)
q.put(None)
t.join()
def subscribe_price(self, prod_code):
self._price_queues.setdefault(prod_code, deque(maxlen=1))
subscribe_price(prod_code, 1)
def unsubscribe_price(self, prod_code):
try:
self._price_queues.pop(prod_code)
finally:
subscribe_price(prod_code, 0)
def publish_bar_signal(self):
dt_list = []
while True:
d = self._trigger_queue.get()
dt_list.append(d)
print(d)
if len(dt_list) >= len(self._resample_thread) or dt.datetime.now() > d + dt.timedelta(seconds=2):
self.trigger_socket.send_pyobj(d)
dt_list.clear()
def add_contract(db_info, code):
mongo_cli = pmg.MongoClient(db_info['host'])
admin_db = mongo_cli.get_database('admin')
admin_db.authenticate(db_info['user'], db_info['pwd'])
db = mongo_cli.get_database(db_info['db'])
contract_col = db.get_collection('realtime_future_contract_info')
product_info = db.get_collection('realtime_future_product_info')
contract_col.create_index([('DATE', pmg.DESCENDING), ('CODE', pmg.ASCENDING)], unique=True)
contract_col.create_index([('CODE', pmg.ASCENDING)])
product_info.create_index([('DATE', pmg.DESCENDING), ('CLASS_CODE', pmg.ASCENDING)], unique=True)
product_info.create_index([('CLASS_CODE', pmg.ASCENDING)])
| 42.699552 | 134 | 0.598824 | [
"Apache-2.0"
] | hadrianl/rqalpha_kairui | rqalpha/examples/extend_api/HKMod/realtime_data_server.py | 9,854 | Python |
from pygments.style import Style
from pygments.token import (
Comment, Error, Keyword, Literal, Name, Number, Operator, String, Text
)
class BaseSixteenStyle(Style):
base00 = '#151515'
base01 = '#202020'
base02 = '#303030'
base03 = '#505050'
base04 = '#B0B0B0'
base05 = '#D0D0D0'
base06 = '#E0E0E0'
base07 = '#F5F5F5'
base08 = '#AC4142'
base09 = '#D28445'
base0a = '#F4BF75'
base0b = '#90A959'
base0c = '#75B5AA'
base0d = '#6A9FB5'
base0e = '#AA759F'
base0f = '#8F5536'
default_style = ''
background_color = base00
highlight_color = base02
styles = {
Text: base05,
Error: base08, # .err
Comment: f'italic {base03}', # .c
Comment.Preproc: base0f, # .cp
Comment.PreprocFile: base0b, # .cpf
Keyword: base0e, # .k
Keyword.Type: base08, # .kt
Name.Attribute: base0d, # .na
Name.Builtin: base0d, # .nb
Name.Builtin.Pseudo: base08, # .bp
Name.Class: base0d, # .nc
Name.Constant: base09, # .no
Name.Decorator: base09, # .nd
Name.Function: base0d, # .nf
Name.Namespace: base0d, # .nn
Name.Tag: base0e, # .nt
Name.Variable: base0d, # .nv
Name.Variable.Instance: base08, # .vi
Number: base09, # .m
Operator: base0c, # .o
Operator.Word: base0e, # .ow
Literal: base0b, # .l
String: base0b, # .s
String.Interpol: base0f, # .si
String.Regex: base0c, # .sr
String.Symbol: base09, # .ss
}
from string import capwords # noqa: E402
BaseSixteenStyle.__name__ = 'BaseSixteen{}Style'.format(
capwords('classic-dark', '-').replace('-', '')
)
globals()[BaseSixteenStyle.__name__] = globals()['BaseSixteenStyle']
del globals()['BaseSixteenStyle']
del capwords
| 25.337838 | 74 | 0.570667 | [
"MIT"
] | philj56/base16-pygments | pygments_base16/base16-classic-dark.py | 1,875 | Python |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from test_op import abs_sum
from akg.utils.dsl_create import get_reduce_out_shape
from gen_random import random_gaussian
def abs_sum_run(shape, reduce_axis, keepdims, dtype, attrs):
op_attrs = [reduce_axis, keepdims]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(abs_sum.abs_sum, [shape], [dtype], op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, input1, output = gen_data(dtype, keepdims, reduce_axis, shape)
return mod, expect, (input1, output)
else:
return mod
else:
expect, input1, output = gen_data(dtype, keepdims, reduce_axis, shape)
mod = utils.op_build_test(abs_sum.abs_sum, [shape], [dtype], op_attrs, kernel_name="abs_sum", attrs=attrs)
output = utils.mod_launch(mod, (input1, output), expect=expect)
return input1, output, expect, compare_tensor(output, expect, rtol=5e-03, atol=5e-3, equal_nan=True)
def gen_data(dtype, keepdims, reduce_axis, shape):
input1 = random_gaussian(shape, miu=1, sigma=0.1)
input1 = input1.astype(dtype)
input1_abs = np.abs(input1)
expect = np.sum(input1_abs, axis=reduce_axis, keepdims=keepdims)
out_shape = get_reduce_out_shape(shape, axis=reduce_axis, keepdims=keepdims)
output = np.full(out_shape, np.nan, dtype)
return expect, input1, output
| 43.571429 | 126 | 0.721311 | [
"Apache-2.0"
] | anyrenwei/akg | tests/common/test_run/abs_sum_run.py | 2,135 | Python |
import glob
import os
from distutils.dir_util import copy_tree
from shutil import copy
from alembic.command import init as _init
from metric.cli.conf import Conf
from metric.cli.template import Template
from metric.src import Base
from metric.src.package import Package
def init(name):
"""
## Init
[ID]
Init adalah perintah inisiasi oleh metric untuk membuat sebuah project dengan pondasi yang telah di setup, cara
penggunaan ini bisa dengan 2 cara, membuat project dari direktori saat ini (CWD) atau dengan direktori baru.
[EN]
Init is the command initiation by metric to create a project with the foundation that has been setup, there are
2 ways to work with it, either you can create from current working directory (CWD) or new directory.
@param name: project name
"""
project_path = os.getcwd()
if name != '.':
project_path = os.path.join(os.getcwd(), name)
Package.make_directory(project_path)
_init(Base.base_configuration(project_path), project_path)
packages_build = {
'apps': ('resources',),
'models': tuple(),
}
for k, v in packages_build.items():
Package.make_package(os.path.join(project_path, k))
for i in v:
Package.make_package(os.path.join(project_path, k, i))
dir_build = {
'apps': ('views',),
'models': ('fields',),
'.': ('scripts',)
}
for k, v in dir_build.items():
for i in v:
Package.make_directory(os.path.join(project_path, k, i))
file_remove = ['script.py.mako']
[os.remove(os.path.join(project_path, i)) for i in file_remove]
scripts = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../scripts")
[copy(file, os.path.join(project_path, 'scripts')) for file in glob.glob(os.path.join(scripts, "*.mako"))]
os.rename(os.path.join(project_path, 'env.py'), os.path.join(project_path, 'scripts', 'env.py'))
copy_tree(os.path.join(scripts, "setup"), project_path)
for file in glob.glob(os.path.join(os.path.join(scripts, "setup"), "*.py")):
copy(file, project_path)
Conf.reset(project_path)
def make_resource(name):
"""
## Make resource
[ID]
Perintah ini adalah suatu perintah yang digunakan untuk membuat "resource" baru dari "script" yang telah di
sediakan.
[EN]
This is a command that used to create new "resource" based from the existing "script" provided.
@param name: resource name
"""
t = Template()
t.template_type = 'resource'
t.make(name)
| 30.174419 | 119 | 0.656647 | [
"MIT"
] | kzulfazriawan/metric | metric/cli/__init__.py | 2,595 | Python |
import cv2
import os
image = cv2.imread("/content/drive/My Drive/DIC_personal/data/face.jpg")
cascade = cv2.CascadeClassifier("/content/drive/My Drive/DIC_personal/haarcascades/haarcascade_upperbody.xml")
#image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
face_list = cascade.detectMultiScale(image)
#face_list = cascade.detectMultiScale(image,scaleFactor=1.2, minNeighbors=2, minSize=(1,1))
color = (0, 0, 255)
if len(face_list)>0:
for face in face_list:
x, y, w, h = face
cv2.rectangle(image,(x,y),(x+w, y+h), color, thickness=2)
else:
print("No human")
cv2.imshow('Frame',image)
cv2.waitKey(0)
cv2.destroyAllWindows()
https://qiita.com/PonDad/items/6f9e6d9397951cadc6be | 30.565217 | 110 | 0.72973 | [
"MIT"
] | Mishiba-Toshihiro/IoT_to_AWS | face_1.py | 703 | Python |
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.3, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_3 import models
class ReplicationPerformance(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'transmitted_bytes_per_sec': 'float',
'received_bytes_per_sec': 'float'
}
attribute_map = {
'transmitted_bytes_per_sec': 'transmitted_bytes_per_sec',
'received_bytes_per_sec': 'received_bytes_per_sec'
}
required_args = {
}
def __init__(
self,
transmitted_bytes_per_sec=None, # type: float
received_bytes_per_sec=None, # type: float
):
"""
Keyword args:
transmitted_bytes_per_sec (float): Total bytes transmitted per second.
received_bytes_per_sec (float): Total bytes received per second.
"""
if transmitted_bytes_per_sec is not None:
self.transmitted_bytes_per_sec = transmitted_bytes_per_sec
if received_bytes_per_sec is not None:
self.received_bytes_per_sec = received_bytes_per_sec
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicationPerformance`".format(key))
if key == "transmitted_bytes_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `transmitted_bytes_per_sec`, must be a value greater than or equal to `0.0`")
if key == "received_bytes_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `received_bytes_per_sec`, must be a value greater than or equal to `0.0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReplicationPerformance, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReplicationPerformance):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.040323 | 129 | 0.584818 | [
"BSD-2-Clause"
] | Flav-STOR-WL/py-pure-client | pypureclient/flashblade/FB_2_3/models/replication_performance.py | 4,097 | Python |
'''
Author: Guanghan Ning
E-mail: [email protected]
October 22th, 2018
Unit test for graph.
'''
import os
import sys
sys.path.append(os.path.abspath("../utils/"))
from graph import *
def test_normalize_diagraph():
num_node = 15
self_link = [(i, i) for i in range(num_node)]
neighbor_link = [(0, 1), (1, 2), (3, 4), (4, 5), (2, 8),
(8, 7), (7, 6), (8, 12), (12, 9), (9, 10),
(10, 11), (9, 3), (12, 13), (13, 14)]
edge = self_link + neighbor_link
print("Edge: \n{}\n".format(edge))
hop_dis = get_hop_distance(num_node, edge, max_hop=1)
print("Hop_dis: \n{}\n".format(hop_dis))
max_hop = 1
dilation = 1
valid_hop = range(0, max_hop + 1, dilation)
print("Valid_hop: \n{}\n".format(valid_hop))
adjacency = np.zeros((num_node, num_node))
for hop in valid_hop:
adjacency[hop_dis == hop] = 1
print("Adjacency matrix: \n{}\n".format(adjacency))
normalize_adjacency = normalize_digraph(adjacency)
print("Normalized adjacency matrix: \n{}\n".format(normalize_adjacency))
return
if __name__ == "__main__":
test_normalize_diagraph()
| 26.044444 | 76 | 0.598123 | [
"MIT"
] | MikeoPerfect/video-to-pose3D | pose_trackers/lighttrack/graph/unit_test/test_graph.py | 1,172 | Python |
from django.shortcuts import render
def home(request):
"""
View function for simply rendering the Ionic Angular
index.html
"""
return render(request, 'www/index.html')
| 19 | 56 | 0.684211 | [
"Apache-2.0"
] | broden-wanner/practicality | practicality/frontend/views.py | 190 | Python |
"""
Remove super classes from the train dataset and keep it only in the validation dataset.
Example command: python create_rcv1_heldout_split.py --train_fraction 0.75 --seed 42
"""
import argparse
import jsonlines
from collections import Counter
import numpy as np
import random
import copy
import os
import json
def create_dataset_split(args):
random.seed(args.seed)
# Read the JSON file containing one JSON per line and store the dict
all_docs = []
with jsonlines.open(args.file) as reader:
for obj in reader:
all_docs.append(obj)
# Get a list of all the labels
label_statistics = Counter()
for doc in all_docs:
label_statistics.update(doc['bip:topics:1.0'])
all_labels = list(label_statistics)
# Ignore superclass labels during training
super_class_labels = ['C15', 'C151', 'C17', 'C18', 'C31', 'C33', 'C41', 'E12', 'E13', 'E14', 'E21', 'E31', 'E41', 'E51', 'G15', 'M13', 'M14']
train_labels = [label for label in all_labels if label not in super_class_labels]
# Remove labels in train_labels from the train data
new_docs = []
for doc in all_docs:
doc['bip:topics:1.0'] = [topic for topic in doc['bip:topics:1.0'] if topic in train_labels]
if len(doc['bip:topics:1.0']) != 0:
new_docs.append(doc)
# Create a new file
# Store list of dicts as a json
save_name = 'rcv1_superclass_{}.json'.format(args.seed)
args.save_dir = os.path.split(args.file)[0]
f = open(os.path.join(args.save_dir, save_name), 'w', encoding="ISO-8859-1")
for document in new_docs:
f.write(str(json.dumps(document)) + '\n')
f.close()
def main():
parser = argparse.ArgumentParser()
# Dataset Arguments
parser.add_argument("--file", default='', type=str, help="")
parser.add_argument("--seed", default=42, type=int, help="Random seed for splitting classes.")
args = parser.parse_args()
create_dataset_split(args)
if __name__ == '__main__':
main() | 29.632353 | 145 | 0.664516 | [
"MIT"
] | princeton-nlp/semsup | run_rcv1/preprocessing/create_rcv1_superclass_split.py | 2,015 | Python |
#!/usr/bin/env python
"""Convert *.json, *.csv and other text data files to js for local use and avoid ajax call.
"""
import optparse
from os import listdir
from os.path import abspath, isfile, isdir, join, splitext, basename
import json;
#curdir = os.path.abspath('.')
curdir = "."
filter_text_ext = [".json", ".csv"]
filter_binary_ext = []
def jsfy_file(path, basedir, fout):
fname = basename(path)
if(fname.startswith(".")):
return
#print(path, basedir)
if(not path.startswith(basedir)):
return
filename, extname = splitext( path )
#print( extname )
if(extname in filter_text_ext):
res_key = path[ len(basedir) : ]
print( res_key + " -> " + path )
fin = open(path, "r")
txt = json.dumps( fin.read() )
fout.write("jsfy_res[\"" + res_key + "\"] = " + txt + ";\n\n");
#elif(extname in filter_binary_ext):
#
pass
def jsfy_dir(path, basedir, fout):
if(not path.endswith("/")):
path = path + "/"
fname = basename(path)
if(fname.startswith(".")):
return
#print(path, basedir)
if(not path.startswith(basedir)):
return
#print( path + ":" )
for f in listdir(path):
subpath = join(path,f)
if( isfile(subpath) ):
jsfy_file(subpath, basedir, fout)
elif( isdir(subpath) ):
jsfy_dir(subpath, basedir, fout)
def main():
"""The entry point for this script."""
usage = """usage: %prog [dir] [-b basedir] [-o jsfile]
example:
%prog
%prog assets -o js/jsfy_res.js
"""
parser = optparse.OptionParser(usage)
parser.add_option("-b", "--base", dest="basedir", help="base dir")
parser.add_option("-o", "--output", dest="outputpath", help="export js file path")
(options, args) = parser.parse_args()
if( isinstance(options.basedir, str)):
basedir = options.basedir
else:
basedir = "."
basedir = abspath(basedir)
if( isinstance(options.outputpath, str)):
outputpath = options.outputpath
else:
outputpath ="./jsfy_res.js"
fout = open( outputpath, "w" )
fout.write("// generated with jsfy.py, v0.1 (https://github.com/floatinghotpot/jsfy)\n\n" )
fout.write("var jsfy_res = jsfy_res || {};\n\n" )
if(not basedir.endswith("/")):
basedir = basedir + "/"
for f in args:
f = abspath(f)
if( isfile(f) ): jsfy_file(f,basedir,fout)
elif( isdir(f) ): jsfy_dir(f,basedir,fout)
fout.close()
# end of main()
if __name__ == "__main__":
main()
| 26.060606 | 95 | 0.584109 | [
"MIT"
] | floatinghotpot/ajax-local | tools/jsfy.py | 2,580 | Python |
#!/usr/bin/env python
from netmiko import ConnectHandler
from getpass import getpass
from datetime import datetime
device = {
'device_type': 'arista_eos',
'ip': 'arista1.lasthop.io',
'username': 'pyclass',
'password': getpass(),
'global_delay_factor': 5,
'session_log': 'arista.txt',
}
start = datetime.now()
net_connect = ConnectHandler(**device)
output = net_connect.send_command('show ip arp')
print(output)
print()
print("Elapsed time: {}".format(datetime.now() - start))
| 21.956522 | 56 | 0.693069 | [
"Apache-2.0"
] | grenn72/pynet-ons-feb19 | nornir/netmiko_test.py | 505 | Python |
try:
from heat.common.i18n import _
except ImportError:
pass
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import clients
from heat.engine import properties
from vnc_api import vnc_api
from contrail_heat.resources import contrail
try:
from heat.openstack.common import log as logging
except ImportError:
from oslo_log import log as logging
import uuid
import copy
LOG = logging.getLogger(__name__)
class NetworkPolicy(contrail.ContrailResource):
PROPERTIES = (
NAME, ENTRIES,
) = (
'name', 'entries',
)
_rule_schema = {
"policy_rule": properties.Schema(
properties.Schema.LIST,
_('Array of policy rules'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
"direction": properties.Schema(
properties.Schema.STRING,
_('Direction of policy'),
constraints=[
constraints.AllowedValues(['<>', '<', '>']),
],
default='<>'
),
"protocol": properties.Schema(
properties.Schema.STRING,
_('Protocol to match'),
default='any'
),
"src_ports": properties.Schema(
properties.Schema.LIST,
_('Array of src ports to match'),
required=True,
schema=properties.Schema(
properties.Schema.MAP,
schema={
"start_port": properties.Schema(
properties.Schema.INTEGER,
_('start port to match'),
required=True
),
"end_port": properties.Schema(
properties.Schema.INTEGER,
_('end port to match'),
required=True
),
}
)
),
"dst_ports": properties.Schema(
properties.Schema.LIST,
_('Array of dst ports to match'),
required=True,
schema=properties.Schema(
properties.Schema.MAP,
schema={
"start_port": properties.Schema(
properties.Schema.INTEGER,
_('start port to match'),
required=True
),
"end_port": properties.Schema(
properties.Schema.INTEGER,
_('end port to match'),
required=True
),
}
)
),
"dst_addresses": properties.Schema(
properties.Schema.LIST,
_('Array of dst addresses to match'),
required=True,
schema=properties.Schema(
properties.Schema.MAP,
schema={
"virtual_network": properties.Schema(
properties.Schema.STRING,
_('Virtual network to match'),
required=True
),
}
)
),
"src_addresses": properties.Schema(
properties.Schema.LIST,
_('Array of src addresses to match'),
required=True,
schema=properties.Schema(
properties.Schema.MAP,
schema={
"virtual_network": properties.Schema(
properties.Schema.STRING,
_('Virtual network to match'),
required=True
),
}
)
),
"action_list": properties.Schema(
properties.Schema.MAP,
_('Array of src addresses to match'),
update_allowed=True,
required=True,
schema={
"simple_action": properties.Schema(
properties.Schema.STRING,
_('Simple Action'),
update_allowed=True,
default='pass'
),
"apply_service": properties.Schema(
properties.Schema.LIST,
_('Apply service'),
update_allowed=True,
),
"mirror_to": properties.Schema(
properties.Schema.STRING,
_('Mirror to'),
update_allowed=True,
),
}
),
}
),
required=True,
update_allowed=True,
),
}
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the policy'),
required=True
),
ENTRIES: properties.Schema(
properties.Schema.MAP,
_('Policy entries'),
schema=_rule_schema,
update_allowed=True,
required=True
)
}
attributes_schema = {
"name": attributes.Schema(
_('The name of the policy.'),
),
"fq_name": attributes.Schema(
_('FQ name of this policy.'),
),
"tenant_id": attributes.Schema(
_('The tenant owning this network.'),
),
"rules": attributes.Schema(
_('List of rules.'),
),
"show": attributes.Schema(
_('All attributes.'),
),
}
def fix_apply_service(self, props):
for policy_rule in props['entries']['policy_rule']:
for index, service in enumerate(
policy_rule['action_list']['apply_service'] or []):
try:
si_obj = self.vnc_lib().service_instance_read(id=service)
except:
si_obj = self.vnc_lib().service_instance_read(
fq_name_str=service)
policy_rule['action_list']['apply_service'][
index] = si_obj.get_fq_name_str()
def fix_mirror_to(self, props):
for policy_rule in props['entries']['policy_rule']:
service = policy_rule['action_list']['mirror_to']
if service:
try:
si_obj = self.vnc_lib().service_instance_read(id=service)
except:
si_obj = self.vnc_lib().service_instance_read(
fq_name_str=service)
policy_rule['action_list'][
'mirror_to'] = vnc_api.MirrorActionType(
analyzer_name=si_obj.get_fq_name_str())
def fix_vn_to_fqname(self, props):
for policy_rule in props['entries']['policy_rule']:
for dest_address in policy_rule['dst_addresses']:
try:
dest_address['virtual_network'] = ':'.join(
self.vnc_lib().id_to_fq_name(
dest_address['virtual_network']))
except Exception:
# the user input is already an fq_name_string
pass
for src_address in policy_rule['src_addresses']:
try:
src_address['virtual_network'] = ':'.join(
self.vnc_lib().id_to_fq_name(
src_address['virtual_network']))
except Exception:
# the user input is already an fq_name_string
pass
update_allowed_keys = ('Properties',)
@contrail.set_auth_token
def handle_create(self):
props = {}
props['entries'] = copy.deepcopy(self.properties['entries'])
self.fix_vn_to_fqname(props)
self.fix_apply_service(props)
self.fix_mirror_to(props)
tenant_id = self.stack.context.tenant_id
project_obj = self.vnc_lib().project_read(id=str(uuid.UUID(tenant_id)))
np_obj = vnc_api.NetworkPolicy(name=self.properties[self.NAME],
parent_obj=project_obj)
np_obj.set_network_policy_entries(
vnc_api.PolicyEntriesType.factory(**props['entries']))
np_uuid = super(NetworkPolicy, self).resource_create(np_obj)
self.resource_id_set(np_uuid)
@contrail.set_auth_token
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
props = {}
props['entries'] = copy.deepcopy(prop_diff['entries'])
self.fix_vn_to_fqname(props)
self.fix_apply_service(props)
self.fix_mirror_to(props)
try:
np_obj = self.vnc_lib().network_policy_read(id=self.resource_id)
except vnc_api.NoIdError:
LOG.warn(_("Network Policy %s not found.") % self.name)
raise
except:
LOG.warn(_("Unknown error."))
raise
np_obj.set_network_policy_entries(
vnc_api.PolicyEntriesType.factory(**props['entries']))
self.vnc_lib().network_policy_update(np_obj)
@contrail.set_auth_token
def _show_resource(self):
np_obj = self.vnc_lib().network_policy_read(id=self.resource_id)
dict = {}
dict['name'] = np_obj.get_display_name()
dict['fq_name'] = np_obj.get_fq_name_str()
rules = []
entries = np_obj.get_network_policy_entries()
if entries:
for rule in entries.get_policy_rule():
policy_rule = {}
policy_rule['direction'] = rule.get_direction()
policy_rule['protocol'] = rule.get_protocol()
policy_rule['dst_addresses'] = []
for addr in rule.get_dst_addresses() or []:
policy_rule['dst_addresses'].append(
addr.get_virtual_network())
a_list = rule.get_action_list()
policy_rule['action_list'] = {
'simple_action': a_list.get_simple_action(),
'apply_service': a_list.get_apply_service(),
'mirror_to': a_list.get_mirror_to()
}
policy_rule['dst_ports'] = rule.get_dst_ports()
policy_rule['application'] = rule.get_application()
policy_rule['src_addresses'] = []
for addr in rule.get_src_addresses() or []:
policy_rule['src_addresses'].append(
addr.get_virtual_network())
policy_rule['src_ports'] = rule.get_src_ports()
rules.append(policy_rule)
dict['rules'] = rules
return dict
@contrail.set_auth_token
def handle_delete(self):
try:
self.vnc_lib().network_policy_delete(id=self.resource_id)
except Exception:
pass
def resource_mapping():
return {
'OS::Contrail::NetworkPolicy': NetworkPolicy,
}
| 39.36859 | 81 | 0.454124 | [
"Apache-2.0"
] | atsgen/tf-heat-plugin | contrail_heat/resources/network_policy.py | 12,283 | Python |
"""
Serializer fields for django_hal
"""
from collections import OrderedDict
from django.utils.http import urlencode
from rest_framework import serializers
from .utils import reverse
class LinksField(serializers.DictField):
"""HAL-style _links field.
Parameters
----------
*args : tuple
A tuple representing the relation name, and arguments to
reverse the url. Example: `(name, urlpattern, {'pk', 'pk'})`.
name : str
The string used to identify the url in the final output.
urlpattern : str
A named urlpattern.
kwargs : dict
The kwargs to pass (with the urlpattern) to `reverse`.
This is a dict where the key is the url kwarg, and the value is the
attribute to lookup on the instance. So, `{'user', 'pk'}` would
translate to `{'user': getattr(instance, 'pk')}`.
Example
-------
MySerializer(serializers.Serializer):
_links = LinksField(
('self', 'namespace:view-name', {'pk': 'pk'})
)
# Outputs:
#
# {
# '_links': {
# 'self': 'https://.../my-resource/34'
# }
# }
A shorthand syntax is available to reduce the repetitiveness of
`{'pk': 'pk'}`, when both the kwarg and the instance attribute name
are the same.
('ref', 'urlpattern', 'pk')
is equivalent to
('ref', 'urlpattern', {'pk': 'pk'})
In a full example that looks like:
MySerializer(serializers.Serializer):
_links = LinksField(
('self', 'namespace:view-name', 'pk')
)
# Outputs:
#
# {
# '_links': {
# 'self': { 'href': 'https://.../my-resource/34' }
# }
# }
"""
def __init__(self, *links):
super(LinksField, self).__init__(read_only=True)
self.links = links
def to_representation(self, instance):
"""Return an ordered dictionary of HAL-style links."""
request = self.context.get('request')
ret = OrderedDict()
for link in self.links:
name = link[0]
ret[name] = self.to_link(request, instance, *link[1:])
return ret
def get_attribute(self, instance, *args, **kwargs):
"""Return the whole instance, instead of looking up an attribute value.
Implementation note: We do this because `Serializer.to_representation`
builds the list of serializer fields with something like:
for field in serializer_fields:
field.to_representation(field.get_attribute(instance))
Since we need the instance in `to_representation` so we can query arbitrary
attributes on it to build urls, we simply have to return the instance here.
"""
return instance
def to_link(self, request, instance, urlpattern, kwargs=None,
query_kwargs=None):
"""Return an absolute url for the given urlpattern."""
if query_kwargs:
query_kwargs = {k: getattr(instance, v) for k, v in query_kwargs.items()}
if not kwargs:
url = reverse(urlpattern, request=request)
if not query_kwargs:
return {'href': url}
return {'href': '%s?%s' % (url, urlencode(query_kwargs))}
if isinstance(kwargs, basestring):
# `(ref, urlpattern, string)` where `string` is equivalent to
# `{string: string}`
url = reverse(urlpattern,
kwargs={kwargs: getattr(instance, kwargs)},
request=request)
if not query_kwargs:
return {'href': url}
return {'href': '%s?%s' % (url, urlencode(query_kwargs))}
reverse_kwargs = {}
if kwargs:
for k, v in kwargs.items():
reverse_kwargs[k] = getattr(instance, v)
try:
url = reverse(urlpattern, kwargs=reverse_kwargs, request=request)
if not query_kwargs:
return {'href': url}
return {'href': '%s?%s' % (url, urlencode(query_kwargs))}
except NoReverseMatch:
return None
class QueryField(serializers.HyperlinkedIdentityField):
"""Return the query url that lists related objects in a reverse relation.
Example
-------
.. code:: python
class Book:
title = CharField()
author = ForeignKey(Author)
class Author:
name = CharField()
url('books/query/author/<pk>', ..., name='book-query-by-author')
class AuthorSerializer:
name = CharField()
books = QueryField('book-query-by-author')
>>> nick = Author(name='Nick').save()
>>> book1 = Book(title='Part 1', author=nick)
>>> book2 = Book(title='Part 2', author=nick)
>>> AuthorSerializer(nick)
{
'name': 'Nick',
'books': '../books/query/author/1',
}
Raises
------
django.*.NoReverseMatch
if the `view_name` and `lookup_field` attributes are not configured to
correctly match the URL conf.
"""
lookup_field = 'pk'
def __init__(self, view_name, url_kwarg=None, query_kwarg=None, **kwargs):
assert url_kwarg is not None or query_kwarg is not None, 'The `url_kwarg` argument is required.' # noqa
kwargs['lookup_field'] = kwargs.get('lookup_field', self.lookup_field)
self.url_kwarg = url_kwarg
self.query_kwarg = query_kwarg
super(QueryField, self).__init__(view_name, **kwargs)
def get_url(self, obj, view_name, request, response_format):
lookup_value = getattr(obj, self.lookup_field)
if self.url_kwarg:
kwargs = {self.url_kwarg: lookup_value}
return reverse(view_name,
kwargs=kwargs,
request=request,
format=response_format)
url = reverse(view_name,
request=request,
format=response_format)
query_kwargs = {self.query_kwarg: lookup_value}
return u'%s?%s' % (url, urlencode(query_kwargs))
| 31.176471 | 112 | 0.556447 | [
"MIT"
] | jacktrades/django-hal | django_hal/fields.py | 6,360 | Python |
from app import app
if __name__=="__main__":
app.run(debug=True) | 17.25 | 24 | 0.710145 | [
"MIT"
] | 3salles/rankaa | backend/run.py | 69 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module has all statistic related transforms."""
from __future__ import absolute_import
from __future__ import division
import heapq
import math
import sys
from builtins import round
from apache_beam import coders
from apache_beam import typehints
from apache_beam.transforms.core import *
from apache_beam.transforms.ptransform import PTransform
__all__ = [
'ApproximateUnique',
]
# Type variables
T = typehints.TypeVariable('T')
K = typehints.TypeVariable('K')
V = typehints.TypeVariable('V')
class ApproximateUnique(object):
"""
Hashes input elements and uses those to extrapolate the size of the entire
set of hash values by assuming the rest of the hash values are as densely
distributed as the sample space.
"""
_NO_VALUE_ERR_MSG = 'Either size or error should be set. Received {}.'
_MULTI_VALUE_ERR_MSG = 'Either size or error should be set. ' \
'Received {size = %s, error = %s}.'
_INPUT_SIZE_ERR_MSG = 'ApproximateUnique needs a size >= 16 for an error ' \
'<= 0.50. In general, the estimation error is about ' \
'2 / sqrt(sample_size). Received {size = %s}.'
_INPUT_ERROR_ERR_MSG = 'ApproximateUnique needs an estimation error ' \
'between 0.01 and 0.50. Received {error = %s}.'
@staticmethod
def parse_input_params(size=None, error=None):
"""
Check if input params are valid and return sample size.
:param size: an int not smaller than 16, which we would use to estimate
number of unique values.
:param error: max estimation error, which is a float between 0.01 and 0.50.
If error is given, sample size will be calculated from error with
_get_sample_size_from_est_error function.
:return: sample size
:raises:
ValueError: If both size and error are given, or neither is given, or
values are out of range.
"""
if None not in (size, error):
raise ValueError(ApproximateUnique._MULTI_VALUE_ERR_MSG % (size, error))
elif size is None and error is None:
raise ValueError(ApproximateUnique._NO_VALUE_ERR_MSG)
elif size is not None:
if not isinstance(size, int) or size < 16:
raise ValueError(ApproximateUnique._INPUT_SIZE_ERR_MSG % (size))
else:
return size
else:
if error < 0.01 or error > 0.5:
raise ValueError(ApproximateUnique._INPUT_ERROR_ERR_MSG % (error))
else:
return ApproximateUnique._get_sample_size_from_est_error(error)
@staticmethod
def _get_sample_size_from_est_error(est_err):
"""
:return: sample size
Calculate sample size from estimation error
"""
#math.ceil in python2.7 returns a float, while it returns an int in python3.
return int(math.ceil(4.0 / math.pow(est_err, 2.0)))
@typehints.with_input_types(T)
@typehints.with_output_types(int)
class Globally(PTransform):
""" Approximate.Globally approximate number of unique values"""
def __init__(self, size=None, error=None):
self._sample_size = ApproximateUnique.parse_input_params(size, error)
def expand(self, pcoll):
coder = coders.registry.get_coder(pcoll)
return pcoll \
| 'CountGlobalUniqueValues' \
>> (CombineGlobally(ApproximateUniqueCombineFn(self._sample_size,
coder)))
@typehints.with_input_types(typehints.KV[K, V])
@typehints.with_output_types(typehints.KV[K, int])
class PerKey(PTransform):
""" Approximate.PerKey approximate number of unique values per key"""
def __init__(self, size=None, error=None):
self._sample_size = ApproximateUnique.parse_input_params(size, error)
def expand(self, pcoll):
coder = coders.registry.get_coder(pcoll)
return pcoll \
| 'CountPerKeyUniqueValues' \
>> (CombinePerKey(ApproximateUniqueCombineFn(self._sample_size,
coder)))
class _LargestUnique(object):
"""
An object to keep samples and calculate sample hash space. It is an
accumulator of a combine function.
"""
_HASH_SPACE_SIZE = 2.0 * sys.maxsize
def __init__(self, sample_size):
self._sample_size = sample_size
self._min_hash = sys.maxsize
self._sample_heap = []
self._sample_set = set()
def add(self, element):
"""
:param an element from pcoll.
:return: boolean type whether the value is in the heap
Adds a value to the heap, returning whether the value is (large enough to
be) in the heap.
"""
if len(self._sample_heap) >= self._sample_size and element < self._min_hash:
return False
if element not in self._sample_set:
self._sample_set.add(element)
heapq.heappush(self._sample_heap, element)
if len(self._sample_heap) > self._sample_size:
temp = heapq.heappop(self._sample_heap)
self._sample_set.remove(temp)
self._min_hash = self._sample_heap[0]
elif element < self._min_hash:
self._min_hash = element
return True
def get_estimate(self):
"""
:return: estimation count of unique values
If heap size is smaller than sample size, just return heap size.
Otherwise, takes into account the possibility of hash collisions,
which become more likely than not for 2^32 distinct elements.
Note that log(1+x) ~ x for small x, so for sampleSize << maxHash
log(1 - sample_size/sample_space) / log(1 - 1/sample_space) ~ sample_size
and hence estimate ~ sample_size * hash_space / sample_space
as one would expect.
Given sample_size / sample_space = est / hash_space
est = sample_size * hash_space / sample_space
Given above sample_size approximate,
est = log1p(-sample_size/sample_space) / log1p(-1/sample_space)
* hash_space / sample_space
"""
if len(self._sample_heap) < self._sample_size:
return len(self._sample_heap)
else:
sample_space_size = sys.maxsize - 1.0 * self._min_hash
est = (math.log1p(-self._sample_size / sample_space_size)
/ math.log1p(-1 / sample_space_size)
* self._HASH_SPACE_SIZE
/ sample_space_size)
return round(est)
class ApproximateUniqueCombineFn(CombineFn):
"""
ApproximateUniqueCombineFn computes an estimate of the number of
unique values that were combined.
"""
def __init__(self, sample_size, coder):
self._sample_size = sample_size
self._coder = coder
def create_accumulator(self, *args, **kwargs):
return _LargestUnique(self._sample_size)
def add_input(self, accumulator, element, *args, **kwargs):
try:
accumulator.add(hash(self._coder.encode(element)))
return accumulator
except Exception as e:
raise RuntimeError("Runtime exception: %s", e)
# created an issue https://issues.apache.org/jira/browse/BEAM-7285 to speep up
# merge process.
def merge_accumulators(self, accumulators, *args, **kwargs):
merged_accumulator = self.create_accumulator()
for accumulator in accumulators:
for i in accumulator._sample_heap:
merged_accumulator.add(i)
return merged_accumulator
@staticmethod
def extract_output(accumulator):
return accumulator.get_estimate()
def display_data(self):
return {'sample_size': self._sample_size}
| 34.529661 | 80 | 0.692968 | [
"Apache-2.0",
"BSD-3-Clause"
] | TimvdLippe/beam | sdks/python/apache_beam/transforms/stats.py | 8,149 | Python |
from meta_policy_search.utils import logger
from meta_policy_search.meta_algos.base import MAMLAlgo
from meta_policy_search.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer
import tensorflow as tf
from collections import OrderedDict
class TRPOMAML(MAMLAlgo):
"""
Algorithm for TRPO MAML
Args:
policy (Policy): policy object
name (str): tf variable scope
step_size (int): trust region size for the meta policy optimization through TPRO
inner_type (str): One of 'log_likelihood', 'likelihood_ratio', 'dice', choose which inner update to use
exploration (bool): whether to use E-MAML or MAML
inner_lr (float) : gradient step size used for inner step
meta_batch_size (int): number of meta-learning tasks
num_inner_grad_steps (int) : number of gradient updates taken per maml iteration
trainable_inner_step_size (boolean): whether make the inner step size a trainable variable
"""
def __init__(
self,
*args,
name="trpo_maml",
step_size=0.01,
inner_type='likelihood_ratio',
exploration=False,
**kwargs
):
super(TRPOMAML, self).__init__(*args, **kwargs)
assert inner_type in ["log_likelihood", "likelihood_ratio", "dice"]
self.step_size = step_size
self.inner_type = inner_type
self.name = name
self._optimization_keys = ['observations', 'actions', 'advantages', 'agent_infos']
self.exploration = exploration
if exploration: # add adjusted average rewards tp optimization keys
self._optimization_keys.append('adj_avg_rewards')
self.optimizer = ConjugateGradientOptimizer()
self.build_graph()
def _adapt_objective_sym(self, action_sym, adv_sym, dist_info_old_sym, dist_info_new_sym):
if self.inner_type == 'likelihood_ratio':
with tf.variable_scope("likelihood_ratio"):
likelihood_ratio_adapt = self.policy.distribution.likelihood_ratio_sym(action_sym,
dist_info_old_sym,
dist_info_new_sym)
with tf.variable_scope("surrogate_loss"):
surr_obj_adapt = -tf.reduce_mean(likelihood_ratio_adapt * adv_sym)
elif self.inner_type == 'log_likelihood':
with tf.variable_scope("log_likelihood"):
log_likelihood_adapt = self.policy.distribution.log_likelihood_sym(action_sym, dist_info_new_sym)
with tf.variable_scope("surrogate_loss"):
surr_obj_adapt = -tf.reduce_mean(log_likelihood_adapt * adv_sym)
else:
raise NotImplementedError
return surr_obj_adapt
def build_graph(self):
"""
Creates the computation graph
"""
""" Create Variables """
# assert self.num_inner_grad_steps == 1 or not self.exploration, "Not sure if the math is right for more than 1 inner step"
with tf.variable_scope(self.name):
self.step_sizes = self._create_step_size_vars()
""" --- Build inner update graph for adapting the policy and sampling trajectories --- """
# this graph is only used for adapting the policy and not computing the meta-updates
self.adapted_policies_params, self.adapt_input_ph_dict = self._build_inner_adaption()
""" ----- Build graph for the meta-update ----- """
self.meta_op_phs_dict = OrderedDict()
obs_phs, action_phs, adv_phs, dist_info_old_phs, all_phs_dict = self._make_input_placeholders('step0')
self.meta_op_phs_dict.update(all_phs_dict)
distribution_info_vars, current_policy_params = [], []
all_surr_objs, all_inner_kls = [], []
for i in range(self.meta_batch_size):
dist_info_sym = self.policy.distribution_info_sym(obs_phs[i], params=None)
distribution_info_vars.append(dist_info_sym) # step 0
current_policy_params.append(self.policy.policy_params) # set to real policy_params (tf.Variable)
initial_distribution_info_vars = distribution_info_vars
initial_action_phs = action_phs
with tf.variable_scope(self.name):
""" Inner updates"""
for step_id in range(1, self.num_inner_grad_steps+1):
surr_objs, adapted_policy_params = [], []
# inner adaptation step for each task
for i in range(self.meta_batch_size):
surr_loss = self._adapt_objective_sym(action_phs[i], adv_phs[i], dist_info_old_phs[i], distribution_info_vars[i])
adapted_params_var = self._adapt_sym(surr_loss, current_policy_params[i])
adapted_policy_params.append(adapted_params_var)
surr_objs.append(surr_loss)
all_surr_objs.append(surr_objs)
# Create new placeholders for the next step
obs_phs, action_phs, adv_phs, dist_info_old_phs, all_phs_dict = self._make_input_placeholders('step%i' % step_id)
self.meta_op_phs_dict.update(all_phs_dict)
# dist_info_vars_for_next_step
distribution_info_vars = [self.policy.distribution_info_sym(obs_phs[i], params=adapted_policy_params[i])
for i in range(self.meta_batch_size)]
current_policy_params = adapted_policy_params
""" Outer objective """
surr_objs, outer_kls = [], []
# Create placeholders
# meta-objective
for i in range(self.meta_batch_size):
likelihood_ratio = self.policy.distribution.likelihood_ratio_sym(action_phs[i], dist_info_old_phs[i],
distribution_info_vars[i])
outer_kl = tf.reduce_mean(self.policy.distribution.kl_sym(dist_info_old_phs[i], distribution_info_vars[i]))
surr_obj = - tf.reduce_mean(likelihood_ratio * adv_phs[i])
if self.exploration:
# add adj_avg_reward placeholder
adj_avg_rewards = tf.placeholder(dtype=tf.float32, shape=[None], name='adj_avg_rewards' + '_' + str(self.num_inner_grad_steps) + '_' + str(i))
self.meta_op_phs_dict['step%i_task%i_%s' % (self.num_inner_grad_steps, i, 'adj_avg_rewards')] = adj_avg_rewards
log_likelihood_inital = self.policy.distribution.log_likelihood_sym(initial_action_phs[i],
initial_distribution_info_vars[i])
surr_obj += - tf.reduce_mean(adj_avg_rewards) * tf.reduce_mean(log_likelihood_inital)
surr_objs.append(surr_obj)
outer_kls.append(outer_kl)
mean_outer_kl = tf.reduce_mean(tf.stack(outer_kls))
""" Mean over meta tasks """
meta_objective = tf.reduce_mean(tf.stack(surr_objs, 0))
self.optimizer.build_graph(
loss=meta_objective,
target=self.policy,
input_ph_dict=self.meta_op_phs_dict,
leq_constraint=(mean_outer_kl, self.step_size),
)
def optimize_policy(self, all_samples_data, log=True):
"""
Performs MAML outer step
Args:
all_samples_data (list) : list of lists of lists of samples (each is a dict) split by gradient update and
meta task
log (bool) : whether to log statistics
Returns:
None
"""
meta_op_input_dict = self._extract_input_dict_meta_op(all_samples_data, self._optimization_keys)
logger.log("Computing KL before")
mean_kl_before = self.optimizer.constraint_val(meta_op_input_dict)
logger.log("Computing loss before")
loss_before = self.optimizer.loss(meta_op_input_dict)
logger.log("Optimizing")
self.optimizer.optimize(meta_op_input_dict)
logger.log("Computing loss after")
loss_after = self.optimizer.loss(meta_op_input_dict)
logger.log("Computing KL after")
mean_kl = self.optimizer.constraint_val(meta_op_input_dict)
if log:
logger.logkv('MeanKLBefore', mean_kl_before)
logger.logkv('MeanKL', mean_kl)
logger.logkv('LossBefore', loss_before)
logger.logkv('LossAfter', loss_after)
logger.logkv('dLoss', loss_before - loss_after) | 45.578125 | 162 | 0.624272 | [
"MIT"
] | Manifold-Computing/MMAML-rl | meta_policy_search/meta_algos/trpo_maml.py | 8,751 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2neo.error.client import *
from py2neo.error.server import *
| 33.238095 | 74 | 0.747851 | [
"Apache-2.0"
] | alaalqadi/py2neo | py2neo/error/__init__.py | 698 | Python |
import json
import os
import pandas as pd
import sys
sys.path.append('scripts/')
from polygon import Collection, Footprint
class Writer:
"""
Class that stores smart label values per instance
"""
def __init__(self, filename):
"""
Class initialization.
:param filename: name of the file to store the data, str
"""
self.filename = filename
self.content = {}
def add(self, instance, result):
"""
Function that adds an instance with its smart labels to the collection
:param instance: name of instance, str
:param result: smart labels, dict {label_name: label_value}
:return:
"""
self.content[instance] = result
def get_instances(self) -> list:
"""
Function that gets the instances that already exist in the file
:return: existing instances, list
"""
return list(self.content.keys())
def reset(self):
"""
Function that resets the file to an empty state.
:return:
"""
del self.content
self.content = {}
def save(self):
"""
Function that saves all the smart labels in the class to a local file
TODO: add saving to AWS based on AWS_SAVE in config
:return:
"""
with open(self.filename, "w") as f:
json.dump(self.content, f)
class JsonWriter(Writer):
"""
Class that saves results in json format.
"""
def __init__(self, filename='test'):
Writer.__init__(self, filename)
if not self.filename.endswith('.json'):
self.filename += '.json'
# with open(self.filename, 'r') as f:
# self.content = json.load(f)
self.content = {}
def save(self):
"""
Function that saves the writer's content to local system in json format.
:return:
"""
with open(self.filename, 'a') as json_file:
json.dump(self.content, json_file)
class CsvWriter:
def __init__(self, filename='result', features=[]):
assert isinstance(filename, str), "Expected name to be str, got {}".format(filename)
self.filename = filename
self.features = features
self.content = {}
if self.filename + '.csv' in os.listdir():
self.csv = pd.read_csv(self.filename + '.csv', index_col=0)
# self.csv = self.csv.to_dict(orient='list')
else:
self.csv = {}
self.reset()
self.csv = pd.DataFrame(self.csv)
self.csv.to_csv(self.filename + '.csv', mode='w')
print('csv saved as {}.csv'.format(self.filename))
def add(self, instance, result):
if self._check(result):
for _feature in list(result.keys()):
if _feature not in list(self.csv.keys()):
return ValueError
self.content[instance] = result
result = {key: [value] for key, value in result.items()}
_df = pd.DataFrame.from_dict(result)
self.csv = self.csv.append(_df, ignore_index=True)
def _check(self, result):
return len(list(result.keys())) == len(self.features)
def save(self):
df = pd.DataFrame(self.csv)
df.to_csv(self.filename + '.csv', mode='a', header=False)
def reset(self):
self.csv = {}
# self.csv['iter'] = []
for feature in self.features:
self.csv[feature] = []
class ShpWriter:
def __init__(self, name='result'):
self.name = name
def save(self, collection):
if not isinstance(collection, Collection):
print('Expected Collection, got {}'.format(collection))
raise TypeError
if not isinstance(collection.class_type, Footprint.__class__):
print('Collection should be made of Footprints, got {}'.format(collection.class_type))
raise AttributeError
r = []
for f in collection:
r.append(f.polygon)
dict = {'name': [0 for x in r], 'geometry': r}
df = gpd.GeoDataFrame(dict)
df.to_file('{}.shp'.format(self.name)) | 25.992647 | 89 | 0.678642 | [
"MIT"
] | STASYA00/CityMorph | scripts/writer.py | 3,535 | Python |
# Author
# Angelica ACOSTA ARTETA
import unittest
from balance import summing, stringarray, need, weighting
class TestBalance(unittest.TestCase):
def test_summing(self):
self.assertEqual(summing([]), 0)
self.assertEqual(summing([3]), 3)
self.assertEqual(summing([1,1,1,1,1]), 5)
self.assertEqual(summing([1,-1]), 0)
self.assertEqual(summing([-2,-2]), -4)
def test_stringarray(self):
self.assertEqual(stringarray([]), "")
self.assertEqual(stringarray([5]), "5")
self.assertEqual(stringarray([1,1,1,1,1]), "1, 1, 1, 1, 1")
def test_need(self):
self.assertEqual(need(1), (1,0))
self.assertEqual(need(0), (None,None))
self.assertEqual(need(-4), (None,None))
self.assertEqual(need(27), (27,3))
def test_weighting(self):
first_test=weighting(1)
self.assertEqual(summing(first_test[0]), summing(first_test[1]))
second_test=weighting(532)
self.assertEqual(summing(second_test[0]), summing(second_test[1]))
thrird_test=weighting(1000)
self.assertEqual(summing(thrird_test[0]), summing(thrird_test[1]))
if __name__ == '__main__':
unittest.main() | 32.945946 | 74 | 0.631665 | [
"MIT"
] | angelicacosta/Ternary-Balance | test_balance.py | 1,219 | Python |
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('copy')
def copy(input, tensor_name="", debug_ops_spec=[], name=None):
r"""Copy Op.
Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the
device on which the tensor is allocated.
N.B.: If the all downstream attached debug ops are disabled given the current
gRPC gating status, the output will simply forward the input tensor without
deep-copying. See the documentation of Debug* ops for more details.
Unlike the CopyHost Op, this op does not have HostMemory constraint on its
input or output.
Args:
input: A `Tensor`. Input tensor.
tensor_name: An optional `string`. Defaults to `""`.
The name of the input tensor.
debug_ops_spec: An optional list of `strings`. Defaults to `[]`.
A list of debug op spec (op, url, gated_grpc) for attached debug
ops. Each element of the list has the format
<debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
"DebugIdentity;file:///tmp/tfdbg_1;0".
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor, deep-copied from input.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_, _, _op = _op_def_lib._apply_op_helper(
"Copy", input=input, tensor_name=tensor_name,
debug_ops_spec=debug_ops_spec, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "tensor_name",
_op.get_attr("tensor_name"), "debug_ops_spec",
_op.get_attr("debug_ops_spec"))
_execute.record_gradient(
"Copy", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "Copy", name,
_ctx._post_execution_callbacks, input, "tensor_name", tensor_name,
"debug_ops_spec", debug_ops_spec)
return _result
except _core._FallbackException:
return copy_eager_fallback(
input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec,
name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def copy_eager_fallback(input, tensor_name="", debug_ops_spec=[], name=None):
r"""This is the slowpath function for Eager mode.
This is for function copy
"""
_ctx = _context.context()
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "tensor_name", tensor_name, "debug_ops_spec",
debug_ops_spec)
_result = _execute.execute(b"Copy", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Copy", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('copy_host')
def copy_host(input, tensor_name="", debug_ops_spec=[], name=None):
r"""Copy Host Op.
Performs CPU-to-CPU deep-copying of tensor.
N.B.: If the all downstream attached debug ops are disabled given the current
gRPC gating status, the output will simply forward the input tensor without
deep-copying. See the documentation of Debug* ops for more details.
Unlike the Copy Op, this op has HostMemory constraint on its input or output.
Args:
input: A `Tensor`. Input tensor.
tensor_name: An optional `string`. Defaults to `""`.
The name of the input tensor.
debug_ops_spec: An optional list of `strings`. Defaults to `[]`.
A list of debug op spec (op, url, gated_grpc) for attached debug
ops. Each element of the list has the format
<debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
"DebugIdentity;file:///tmp/tfdbg_1;0".
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor, deep-copied from input.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy_host' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_, _, _op = _op_def_lib._apply_op_helper(
"CopyHost", input=input, tensor_name=tensor_name,
debug_ops_spec=debug_ops_spec, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "tensor_name",
_op.get_attr("tensor_name"), "debug_ops_spec",
_op.get_attr("debug_ops_spec"))
_execute.record_gradient(
"CopyHost", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "CopyHost", name,
_ctx._post_execution_callbacks, input, "tensor_name", tensor_name,
"debug_ops_spec", debug_ops_spec)
return _result
except _core._FallbackException:
return copy_host_eager_fallback(
input, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec,
name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def copy_host_eager_fallback(input, tensor_name="", debug_ops_spec=[], name=None):
r"""This is the slowpath function for Eager mode.
This is for function copy_host
"""
_ctx = _context.context()
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_ops_spec is None:
debug_ops_spec = []
if not isinstance(debug_ops_spec, (list, tuple)):
raise TypeError(
"Expected list for 'debug_ops_spec' argument to "
"'copy_host' Op, not %r." % debug_ops_spec)
debug_ops_spec = [_execute.make_str(_s, "debug_ops_spec") for _s in debug_ops_spec]
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "tensor_name", tensor_name, "debug_ops_spec",
debug_ops_spec)
_result = _execute.execute(b"CopyHost", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"CopyHost", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debug_identity')
def debug_identity(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""Debug Identity Op.
Provides an identity mapping of the non-Ref type input tensor for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor that equals the input tensor.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_identity' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_, _, _op = _op_def_lib._apply_op_helper(
"DebugIdentity", input=input, device_name=device_name,
tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "device_name",
_op.get_attr("device_name"), "tensor_name",
_op.get_attr("tensor_name"), "debug_urls",
_op.get_attr("debug_urls"), "gated_grpc",
_op.get_attr("gated_grpc"))
_execute.record_gradient(
"DebugIdentity", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "DebugIdentity", name,
_ctx._post_execution_callbacks, input, "device_name", device_name,
"tensor_name", tensor_name, "debug_urls", debug_urls, "gated_grpc",
gated_grpc)
return _result
except _core._FallbackException:
return debug_identity_eager_fallback(
input, device_name=device_name, tensor_name=tensor_name,
debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def debug_identity_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""This is the slowpath function for Eager mode.
This is for function debug_identity
"""
_ctx = _context.context()
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_identity' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
tensor_name, "debug_urls", debug_urls, "gated_grpc", gated_grpc)
_result = _execute.execute(b"DebugIdentity", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DebugIdentity", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debug_nan_count')
def debug_nan_count(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""Debug NaN Value Counter Op
Counts number of NaNs in the input tensor, for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011.
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
An integer output tensor that is the number of NaNs in the input.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_nan_count' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_, _, _op = _op_def_lib._apply_op_helper(
"DebugNanCount", input=input, device_name=device_name,
tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "device_name",
_op.get_attr("device_name"), "tensor_name",
_op.get_attr("tensor_name"), "debug_urls",
_op.get_attr("debug_urls"), "gated_grpc",
_op.get_attr("gated_grpc"))
_execute.record_gradient(
"DebugNanCount", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "DebugNanCount", name,
_ctx._post_execution_callbacks, input, "device_name", device_name,
"tensor_name", tensor_name, "debug_urls", debug_urls, "gated_grpc",
gated_grpc)
return _result
except _core._FallbackException:
return debug_nan_count_eager_fallback(
input, device_name=device_name, tensor_name=tensor_name,
debug_urls=debug_urls, gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def debug_nan_count_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], gated_grpc=False, name=None):
r"""This is the slowpath function for Eager mode.
This is for function debug_nan_count
"""
_ctx = _context.context()
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_nan_count' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
tensor_name, "debug_urls", debug_urls, "gated_grpc", gated_grpc)
_result = _execute.execute(b"DebugNanCount", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DebugNanCount", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debug_numeric_summary')
def debug_numeric_summary(input, device_name="", tensor_name="", debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):
r"""Debug Numeric Summary Op.
Provide a basic summary of numeric value types, range and distribution.
Args:
input: A `Tensor`. Input tensor, non-Reference type, float or double.
device_name: An optional `string`. Defaults to `""`.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
lower_bound: An optional `float`. Defaults to `float('-inf')`.
(float) The lower bound <= which values will be included in the
generalized -inf count. Default: -inf.
upper_bound: An optional `float`. Defaults to `float('inf')`.
(float) The upper bound >= which values will be included in the
generalized +inf count. Default: +inf.
mute_if_healthy: An optional `bool`. Defaults to `False`.
(bool) Do not send data to the debug URLs unless at least one
of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and
inf counts) is non-zero.
gated_grpc: An optional `bool`. Defaults to `False`.
Whether this op will be gated. If any of the debug_urls of this
debug node is of the grpc:// scheme, when the value of this attribute is set
to True, the data will not actually be sent via the grpc stream unless this
debug op has been enabled at the debug_url. If all of the debug_urls of this
debug node are of the grpc:// scheme and the debug op is enabled at none of
them, the output will be an empty Tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float64`.
A double tensor of shape [14 + nDimensions], where nDimensions is the
the number of dimensions of the tensor's shape. The elements of output are:
[0]: is initialized (1.0) or not (0.0).
[1]: total number of elements
[2]: NaN element count
[3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by
default.
[4]: negative element count (excluding -inf), if lower_bound is the default
-inf. Otherwise, this is the count of elements > lower_bound and < 0.
[5]: zero element count
[6]: positive element count (excluding +inf), if upper_bound is the default
-inf. Otherwise, this is the count of elements < upper_bound and > 0.
[7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by
default.
Output elements [1:8] are all zero, if the tensor is uninitialized.
[8]: minimum of all non-inf and non-NaN elements.
If uninitialized or no such element exists: +inf.
[9]: maximum of all non-inf and non-NaN elements.
If uninitialized or no such element exists: -inf.
[10]: mean of all non-inf and non-NaN elements.
If uninitialized or no such element exists: NaN.
[11]: variance of all non-inf and non-NaN elements.
If uninitialized or no such element exists: NaN.
[12]: Data type of the tensor encoded as an enum integer. See the DataType
proto for more details.
[13]: Number of dimensions of the tensor (ndims).
[14+]: Sizes of the dimensions.
"""
_ctx = _context.context()
if not _ctx.executing_eagerly():
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_numeric_summary' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if lower_bound is None:
lower_bound = float('-inf')
lower_bound = _execute.make_float(lower_bound, "lower_bound")
if upper_bound is None:
upper_bound = float('inf')
upper_bound = _execute.make_float(upper_bound, "upper_bound")
if mute_if_healthy is None:
mute_if_healthy = False
mute_if_healthy = _execute.make_bool(mute_if_healthy, "mute_if_healthy")
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_, _, _op = _op_def_lib._apply_op_helper(
"DebugNumericSummary", input=input, device_name=device_name,
tensor_name=tensor_name, debug_urls=debug_urls,
lower_bound=lower_bound, upper_bound=upper_bound,
mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "device_name",
_op.get_attr("device_name"), "tensor_name",
_op.get_attr("tensor_name"), "debug_urls",
_op.get_attr("debug_urls"), "lower_bound",
_op.get_attr("lower_bound"), "upper_bound",
_op.get_attr("upper_bound"), "mute_if_healthy",
_op.get_attr("mute_if_healthy"), "gated_grpc",
_op.get_attr("gated_grpc"))
_execute.record_gradient(
"DebugNumericSummary", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._handle, _ctx.device_name, "DebugNumericSummary", name,
_ctx._post_execution_callbacks, input, "device_name", device_name,
"tensor_name", tensor_name, "debug_urls", debug_urls, "lower_bound",
lower_bound, "upper_bound", upper_bound, "mute_if_healthy",
mute_if_healthy, "gated_grpc", gated_grpc)
return _result
except _core._FallbackException:
return debug_numeric_summary_eager_fallback(
input, device_name=device_name, tensor_name=tensor_name,
debug_urls=debug_urls, lower_bound=lower_bound,
upper_bound=upper_bound, mute_if_healthy=mute_if_healthy,
gated_grpc=gated_grpc, name=name)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def debug_numeric_summary_eager_fallback(input, device_name="", tensor_name="", debug_urls=[], lower_bound=float('-inf'), upper_bound=float('inf'), mute_if_healthy=False, gated_grpc=False, name=None):
r"""This is the slowpath function for Eager mode.
This is for function debug_numeric_summary
"""
_ctx = _context.context()
if device_name is None:
device_name = ""
device_name = _execute.make_str(device_name, "device_name")
if tensor_name is None:
tensor_name = ""
tensor_name = _execute.make_str(tensor_name, "tensor_name")
if debug_urls is None:
debug_urls = []
if not isinstance(debug_urls, (list, tuple)):
raise TypeError(
"Expected list for 'debug_urls' argument to "
"'debug_numeric_summary' Op, not %r." % debug_urls)
debug_urls = [_execute.make_str(_s, "debug_urls") for _s in debug_urls]
if lower_bound is None:
lower_bound = float('-inf')
lower_bound = _execute.make_float(lower_bound, "lower_bound")
if upper_bound is None:
upper_bound = float('inf')
upper_bound = _execute.make_float(upper_bound, "upper_bound")
if mute_if_healthy is None:
mute_if_healthy = False
mute_if_healthy = _execute.make_bool(mute_if_healthy, "mute_if_healthy")
if gated_grpc is None:
gated_grpc = False
gated_grpc = _execute.make_bool(gated_grpc, "gated_grpc")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "device_name", device_name, "tensor_name",
tensor_name, "debug_urls", debug_urls, "lower_bound", lower_bound,
"upper_bound", upper_bound, "mute_if_healthy", mute_if_healthy,
"gated_grpc", gated_grpc)
_result = _execute.execute(b"DebugNumericSummary", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DebugNumericSummary", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "Copy"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_ops_spec"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "CopyHost"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_ops_spec"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "DebugIdentity"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "device_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_urls"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "gated_grpc"
# type: "bool"
# default_value {
# b: false
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "DebugNanCount"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type: DT_INT64
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "device_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_urls"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "gated_grpc"
# type: "bool"
# default_value {
# b: false
# }
# }
# allows_uninitialized_input: true
# }
# op {
# name: "DebugNumericSummary"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type: DT_DOUBLE
# }
# attr {
# name: "T"
# type: "type"
# }
# attr {
# name: "device_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "tensor_name"
# type: "string"
# default_value {
# s: ""
# }
# }
# attr {
# name: "debug_urls"
# type: "list(string)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "lower_bound"
# type: "float"
# default_value {
# f: -inf
# }
# }
# attr {
# name: "upper_bound"
# type: "float"
# default_value {
# f: inf
# }
# }
# attr {
# name: "mute_if_healthy"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "gated_grpc"
# type: "bool"
# default_value {
# b: false
# }
# }
# allows_uninitialized_input: true
# }
_op_def_lib = _InitOpDefLibrary(b"\nl\n\004Copy\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013tensor_name\022\006string\032\002\022\000\"\"\n\016debug_ops_spec\022\014list(string)\032\002\n\000\230\001\001\np\n\010CopyHost\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013tensor_name\022\006string\032\002\022\000\"\"\n\016debug_ops_spec\022\014list(string)\032\002\n\000\230\001\001\n\244\001\n\rDebugIdentity\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001\n\243\001\n\rDebugNanCount\022\n\n\005input\"\001T\032\n\n\006output\030\t\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001\n\200\002\n\023DebugNumericSummary\022\n\n\005input\"\001T\032\n\n\006output\030\002\"\t\n\001T\022\004type\"\031\n\013device_name\022\006string\032\002\022\000\"\031\n\013tensor_name\022\006string\032\002\022\000\"\036\n\ndebug_urls\022\014list(string)\032\002\n\000\"\033\n\013lower_bound\022\005float\032\005%\000\000\200\377\"\033\n\013upper_bound\022\005float\032\005%\000\000\200\177\"\033\n\017mute_if_healthy\022\004bool\032\002(\000\"\026\n\ngated_grpc\022\004bool\032\002(\000\230\001\001")
| 38.706089 | 1,593 | 0.651218 | [
"Apache-2.0",
"MIT"
] | Soum-Soum/Tensorflow_Face_Finder | venv1/Lib/site-packages/tensorflow/python/debug/ops/gen_debug_ops.py | 33,055 | Python |
from dagster import AssetKey, DagsterEventType, EventRecordsFilter, check, seven
from .utils import capture_error
def _normalize_asset_cursor_str(cursor_string):
# the cursor for assets is derived from a json serialized string of the path. Because there are
# json serialization differences between JS and Python in its treatment of whitespace, we should
# take extra precaution here and do a deserialization/serialization pass
if not cursor_string:
return cursor_string
try:
return seven.json.dumps(seven.json.loads(cursor_string))
except seven.JSONDecodeError:
return cursor_string
@capture_error
def get_assets(graphene_info, prefix=None, cursor=None, limit=None):
from ..schema.pipelines.pipeline import GrapheneAsset
from ..schema.roots.assets import GrapheneAssetConnection
instance = graphene_info.context.instance
normalized_cursor_str = _normalize_asset_cursor_str(cursor)
materialized_keys = instance.get_asset_keys(
prefix=prefix, limit=limit, cursor=normalized_cursor_str
)
asset_nodes_by_asset_key = {
asset_key: asset_node
for asset_key, asset_node in get_asset_nodes_by_asset_key(graphene_info).items()
if (not prefix or asset_key.path[: len(prefix)] == prefix)
and (not cursor or asset_key.to_string() > cursor)
}
asset_keys = sorted(set(materialized_keys).union(asset_nodes_by_asset_key.keys()), key=str)
if limit:
asset_keys = asset_keys[:limit]
return GrapheneAssetConnection(
nodes=[
GrapheneAsset(
key=asset_key,
definition=asset_nodes_by_asset_key.get(asset_key),
)
for asset_key in asset_keys
]
)
def get_asset_nodes_by_asset_key(graphene_info):
from ..schema.asset_graph import GrapheneAssetNode
return {
external_asset_node.asset_key: GrapheneAssetNode(repository, external_asset_node)
for location in graphene_info.context.repository_locations
for repository in location.get_repositories().values()
for external_asset_node in repository.get_external_asset_nodes()
}
def get_asset_nodes(graphene_info):
from ..schema.asset_graph import GrapheneAssetNode
return [
GrapheneAssetNode(repository, external_asset_node)
for location in graphene_info.context.repository_locations
for repository in location.get_repositories().values()
for external_asset_node in repository.get_external_asset_nodes()
]
def get_asset_node(graphene_info, asset_key):
from ..schema.errors import GrapheneAssetNotFoundError
check.inst_param(asset_key, "asset_key", AssetKey)
node = next((n for n in get_asset_nodes(graphene_info) if n.assetKey == asset_key), None)
if not node:
return GrapheneAssetNotFoundError(asset_key=asset_key)
return node
def get_asset(graphene_info, asset_key):
from ..schema.errors import GrapheneAssetNotFoundError
from ..schema.pipelines.pipeline import GrapheneAsset
check.inst_param(asset_key, "asset_key", AssetKey)
instance = graphene_info.context.instance
asset_nodes_by_asset_key = get_asset_nodes_by_asset_key(graphene_info)
asset_node = asset_nodes_by_asset_key.get(asset_key)
if not asset_node and not instance.has_asset_key(asset_key):
return GrapheneAssetNotFoundError(asset_key=asset_key)
return GrapheneAsset(key=asset_key, definition=asset_node)
def get_asset_events(graphene_info, asset_key, partitions=None, limit=None, before_timestamp=None):
check.inst_param(asset_key, "asset_key", AssetKey)
check.opt_int_param(limit, "limit")
check.opt_float_param(before_timestamp, "before_timestamp")
instance = graphene_info.context.instance
event_records = instance.get_event_records(
EventRecordsFilter(
event_type=DagsterEventType.ASSET_MATERIALIZATION,
asset_key=asset_key,
asset_partitions=partitions,
before_timestamp=before_timestamp,
),
limit=limit,
)
return [event_record.event_log_entry for event_record in event_records]
def get_asset_run_ids(graphene_info, asset_key):
check.inst_param(asset_key, "asset_key", AssetKey)
instance = graphene_info.context.instance
return instance.run_ids_for_asset_key(asset_key)
def get_assets_for_run_id(graphene_info, run_id):
from ..schema.pipelines.pipeline import GrapheneAsset
check.str_param(run_id, "run_id")
records = graphene_info.context.instance.all_logs(run_id)
asset_keys = [
record.dagster_event.asset_key
for record in records
if record.is_dagster_event and record.dagster_event.asset_key
]
return [GrapheneAsset(key=asset_key) for asset_key in asset_keys]
| 35.492647 | 100 | 0.74249 | [
"Apache-2.0"
] | StratoDem/dagster | python_modules/dagster-graphql/dagster_graphql/implementation/fetch_assets.py | 4,827 | Python |
import csv
source_file = "/Users/guodong/Downloads/grain_rj_evaluation.csv"
target_file = "/Users/guodong/Downloads/grain_rj_avg.csv"
target = open(target_file, 'w')
with open(source_file, 'r') as source:
source_csv = csv.reader(source_file, delimiter=',')
hj = []
rid = []
zone = []
rows = []
bid = []
qid = 0
rowid = 0
for row in source_csv:
if (rowid == 5):
target.write(row[0] + ",")
target.write(row[1] + ",")
hj.append(float(row[2].strip()))
rid.append(float(row[3].strip()))
zone.append(float(row[4].strip()))
rows.append(float(row[5].strip()))
bid.append(float(row[6].strip()))
rowid = rowid + 1
target.write(row)
row_id = 0
sum_t = 0.0
for line in lines:
if (line.strip() == 'TIMEOUT'):
print("TIMEOUT")
row_id = 0
sum_t = 0.0
continue
sum_t += (float(line.strip()))
row_id += 1
if (row_id == 5):
print(sum_t / 5)
sum_t = 0.0
row_id = 0
| 24.325581 | 64 | 0.537285 | [
"MIT"
] | graindb/graindb | scripts/calculate.py | 1,046 | Python |
"""This uses the CLUE as a Bluetooth LE sensor node."""
# Adafruit Service demo for Adafruit CLUE board.
# Accessible via Adafruit Bluefruit Playground app and Web Bluetooth Dashboard.
import time
import board
from digitalio import DigitalInOut
import neopixel_write
from adafruit_ble import BLERadio
import ulab
from adafruit_clue import clue
from adafruit_ble_adafruit.adafruit_service import AdafruitServerAdvertisement
from adafruit_ble_adafruit.accelerometer_service import AccelerometerService
from adafruit_ble_adafruit.addressable_pixel_service import AddressablePixelService
from adafruit_ble_adafruit.barometric_pressure_service import BarometricPressureService
from adafruit_ble_adafruit.button_service import ButtonService
from adafruit_ble_adafruit.humidity_service import HumidityService
from adafruit_ble_adafruit.light_sensor_service import LightSensorService
from adafruit_ble_adafruit.microphone_service import MicrophoneService
from adafruit_ble_adafruit.temperature_service import TemperatureService
from adafruit_ble_adafruit.tone_service import ToneService
accel_svc = AccelerometerService()
accel_svc.measurement_period = 100
accel_last_update = 0
# CLUE has just one board pixel. 3 RGB bytes * 1 pixel.
NEOPIXEL_BUF_LENGTH = 3 * 1
neopixel_svc = AddressablePixelService()
neopixel_buf = bytearray(NEOPIXEL_BUF_LENGTH)
# Take over NeoPixel control from clue.
clue._pixel.deinit() # pylint: disable=protected-access
neopixel_out = DigitalInOut(board.NEOPIXEL)
neopixel_out.switch_to_output()
baro_svc = BarometricPressureService()
baro_svc.measurement_period = 100
baro_last_update = 0
button_svc = ButtonService()
button_svc.set_pressed(False, clue.button_a, clue.button_b)
humidity_svc = HumidityService()
humidity_svc.measurement_period = 100
humidity_last_update = 0
light_svc = LightSensorService()
light_svc.measurement_period = 100
light_last_update = 0
# Send 256 16-bit samples at a time.
MIC_NUM_SAMPLES = 256
mic_svc = MicrophoneService()
mic_svc.number_of_channels = 1
mic_svc.measurement_period = 100
mic_last_update = 0
mic_samples = ulab.zeros(MIC_NUM_SAMPLES, dtype=ulab.uint16)
temp_svc = TemperatureService()
temp_svc.measurement_period = 100
temp_last_update = 0
tone_svc = ToneService()
clue_display = clue.simple_text_display(text_scale=3, colors=(clue.WHITE,))
clue_display[0].text = "Temperature &"
clue_display[1].text = "Humidity"
clue_display[3].text = "Temp: {:.1f} C".format(clue.temperature)
clue_display[5].text = "Humi: {:.1f} %".format(clue.humidity)
ble = BLERadio()
# The Web Bluetooth dashboard identifies known boards by their
# advertised name, not by advertising manufacturer data.
ble.name = "Attic"
# The Bluefruit Playground app looks in the manufacturer data
# in the advertisement. That data uses the USB PID as a unique ID.
# Adafruit CLUE USB PID:
# Arduino: 0x8071, CircuitPython: 0x8072, app supports either
adv = AdafruitServerAdvertisement()
adv.pid = 0x8072
while True:
# Advertise when not connected.
ble.start_advertising(adv)
while not ble.connected:
pass
ble.stop_advertising()
while ble.connected:
now_msecs = time.monotonic_ns() // 1000000 # pylint: disable=no-member
if now_msecs - accel_last_update >= accel_svc.measurement_period:
accel_svc.acceleration = clue.acceleration
accel_last_update = now_msecs
if now_msecs - baro_last_update >= baro_svc.measurement_period:
baro_svc.pressure = clue.pressure
baro_last_update = now_msecs
button_svc.set_pressed(False, clue.button_a, clue.button_b)
if now_msecs - humidity_last_update >= humidity_svc.measurement_period:
humidity_svc.humidity = clue.humidity
humidity_last_update = now_msecs
clue_display[5].text = "Humi: {:.1f} %".format(clue.humidity)
print("Humi: {:.1f} %".format(clue.humidity))
if now_msecs - light_last_update >= light_svc.measurement_period:
# Return "clear" color value from color sensor.
light_svc.light_level = clue.color[3]
light_last_update = now_msecs
if now_msecs - mic_last_update >= mic_svc.measurement_period:
clue._mic.record( # pylint: disable=protected-access
mic_samples, len(mic_samples)
)
# This subtraction yields unsigned values which are
# reinterpreted as signed after passing.
mic_svc.sound_samples = mic_samples - 32768
mic_last_update = now_msecs
neopixel_values = neopixel_svc.values
if neopixel_values is not None:
start = neopixel_values.start
if start > NEOPIXEL_BUF_LENGTH:
continue
data = neopixel_values.data
data_len = min(len(data), NEOPIXEL_BUF_LENGTH - start)
neopixel_buf[start : start + data_len] = data[:data_len]
if neopixel_values.write_now:
neopixel_write.neopixel_write(neopixel_out, neopixel_buf)
if now_msecs - temp_last_update >= temp_svc.measurement_period:
temp_svc.temperature = clue.temperature
temp_last_update = now_msecs
clue_display[3].text = "Temp: {:.1f} C".format(clue.temperature)
print("Temp: {:.1f} C".format(clue.temperature))
tone = tone_svc.tone
if tone is not None:
freq, duration_msecs = tone
if freq != 0:
if duration_msecs != 0:
# Note that this blocks. Alternatively we could
# use now_msecs to time a tone in a non-blocking
# way, but then the other updates might make the
# tone interval less consistent.
clue.play_tone(freq, duration_msecs / 1000)
else:
clue.stop_tone()
clue.start_tone(freq)
else:
clue.stop_tone()
last_tone = tone
clue_display.show()
time.sleep(5)
# import time
# from adafruit_clue import clue
# import adafruit_ble_broadcastnet
# print("This is BroadcastNet CLUE sensor:", adafruit_ble_broadcastnet.device_address)
# while True:
# measurement = adafruit_ble_broadcastnet.AdafruitSensorMeasurement()
# measurement.temperature = clue.temperature
# measurement.pressure = clue.pressure
# measurement.relative_humidity = clue.humidity
# measurement.acceleration = clue.acceleration
# measurement.magnetic = clue.magnetic
# print(measurement)
# adafruit_ble_broadcastnet.broadcast(measurement)
# time.sleep(5)
# """This uses the CLUE as a Bluetooth LE sensor node."""
# import time
# from adafruit_clue import clue
# from adafruit_ble import BLERadio
# from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
# from adafruit_ble.services.nordic import UARTService
# ble = BLERadio()
# ble.name = "patio"
# uart_server = UARTService()
# advertisement = ProvideServicesAdvertisement(uart_server)
# while True:
# # measurement = adafruit_ble.advertising.AdafruitSensorMeasurement()
# # measurement.temperature = clue.temperature
# # measurement.pressure = clue.pressure
# # measurement.relative_humidity = clue.humidity
# # measurement.acceleration = clue.acceleration
# # measurement.magnetic = clue.magnetic
# print("{},{},{}\n".format(clue.temperature-5,clue.humidity,clue.pressure))
# # Advertise when not connected.
# ble.start_advertising(advertisement)
# print(advertisement)
# while not ble.connected:
# pass
# ble.stop_advertising()
# while ble.connected:
# print("{},{},{}\n".format(clue.temperature-5,clue.humidity,clue.pressure))
# uart_server.write("{},{},{}\n".format(clue.temperature-5,clue.humidity,clue.pressure))
# time.sleep(15)
# #time.sleep(1) | 35.4375 | 96 | 0.712774 | [
"MIT"
] | zhangxd6/homeautomation | clue/temperature/code.py | 7,938 | Python |
# Copyright (c) 2013-2014 Will Thames <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Main ansible-lint package."""
from ansiblelint.rules import AnsibleLintRule
from ansiblelint.version import __version__
__all__ = (
"__version__",
"AnsibleLintRule" # deprecated, import it directly from rules
)
| 44.7 | 79 | 0.774049 | [
"MIT"
] | ragne/ansible-lint | lib/ansiblelint/__init__.py | 1,341 | Python |
"""
Quick Sort
----------
Uses partitioning to recursively divide and sort the list
Time Complexity: O(n**2) worst case
Space Complexity: O(n**2) this version
Stable: No
Psuedo Code: CLRS. Introduction to Algorithms. 3rd ed.
"""
count = 0
def sort(seq):
"""
Takes a list of integers and sorts them in ascending order. This sorted
list is then returned.
:param seq: A list of integers
:rtype: A list of sorted integers
"""
global count
if len(seq) <= 1:
return seq
else:
pivot = seq[0]
left, right = [], []
for x in seq[1:]:
count += 1
if x < pivot:
left.append(x)
else:
right.append(x)
return sort(left) + [pivot] + sort(right)
if __name__ == '__main__':
# print sort([9,8,7,6,5,4,3,2,1,0])
print sort([1,2,3,4,5,6,7,8,9,10])
print count
| 21.627907 | 75 | 0.539785 | [
"BSD-3-Clause"
] | appleface2050/algorithms | algorithms/sorting/quick_sort.py | 930 | Python |
import logging
from datetime import datetime
from pprint import pprint as pp
import click
from flask.cli import with_appcontext
from scout.load import load_exons
from scout.server.extensions import store
from scout.utils.handle import get_file_handle
from scout.utils.scout_requests import fetch_ensembl_exons
LOG = logging.getLogger(__name__)
@click.command("exons", short_help="Load exons")
@click.option(
"-e",
"--exons-file",
type=click.Path(exists=True),
help="Path to file with ensembl exons",
)
@click.option("-b", "--build", type=click.Choice(["37", "38"]), default="37", show_default=True)
@with_appcontext
def exons(build, exons_file):
"""Load exons into the scout database. If no file, fetch exons from ensembl biomart"""
adapter = store
LOG.info("Running scout load exons")
start = datetime.now()
# Test if there are any exons loaded
existing_exon = adapter.exon(build=build)
if existing_exon:
LOG.warning("Dropping all exons ")
adapter.drop_exons(build=build)
LOG.info("Exons dropped")
# Load the exons
nr_exons = 0
if exons_file:
ensembl_exons = get_file_handle(exons_file)
for nr_exons, line in enumerate(ensembl_exons, 1):
pass
ensembl_exons = get_file_handle(exons_file)
else:
ensembl_exons = fetch_ensembl_exons(build=build)
nr_exons = 1360000
try:
load_exons(adapter, ensembl_exons, build, nr_exons=nr_exons)
except Exception as err:
LOG.warning("Something went wrong with ensembl biomart")
# LOG.info("Try to fetch one chromosome at the time")
LOG.info("Please download a mart dump manually, see instructions in user guide for admins")
return
LOG.info("Time to load exons: {0}".format(datetime.now() - start))
| 31.5 | 99 | 0.695676 | [
"BSD-3-Clause"
] | Clinical-Genomics/scout | scout/commands/load/exons.py | 1,827 | Python |
n, x = map(int, input().split())
arr = list(map(int, input().split()))
for i in arr:
if i < x: print(i, end=' ')
| 23.2 | 37 | 0.543103 | [
"MIT"
] | KHJcode/Algorithm-study | Baekjoon/Python/10871.py | 116 | Python |
import os
import sys as _sys
import platform
import re
PY2 = _sys.version_info < (3,)
PY3 = not PY2
RE_NUM = re.compile(r'(\d+).+')
if not PY2:
# these were moved around for Python 3
from urllib.parse import (quote as url_quote, unquote as url_unquote,
urlencode)
# Python 3 does not have basestring anymore; we include
# *only* the str here as this is used for textual data.
basestring = (str,)
# for assertions that the data is either encoded or non-encoded text
str_or_bytes = (str, bytes)
# xrange is gone, replace it with range
xrange = range
# the unicode type is str
unicode_type = str
def dictkeys(dct):
"""
Returns a list of keys of dictionary
dict.keys returns a view that works like .keys in Python 2
*except* any modifications in the dictionary will be visible
(and will cause errors if the view is being iterated over while
it is modified).
"""
return list(dct.keys())
def dictvalues(dct):
"""
Returns a list of values of a dictionary
dict.values returns a view that works like .values in Python 2
*except* any modifications in the dictionary will be visible
(and will cause errors if the view is being iterated over while
it is modified).
"""
return list(dct.values())
def dict_iteritems(dct):
"""
Returns an iterator of items (key/value pairs) of a dictionary
dict.items returns a view that works like .items in Python 2
*except* any modifications in the dictionary will be visible
(and will cause errors if the view is being iterated over while
it is modified).
"""
return dct.items()
def dict_itervalues(dct):
"""
:param dict dct:
:returns: an iterator of the values of a dictionary
"""
return dct.values()
def byte(*args):
"""
This is the same as Python 2 `chr(n)` for bytes in Python 3
Returns a single byte `bytes` for the given int argument (we
optimize it a bit here by passing the positional argument tuple
directly to the bytes constructor.
"""
return bytes(args)
class long(int):
"""
A marker class that signifies that the integer value should be
serialized as `l` instead of `I`
"""
def __repr__(self):
return str(self) + 'L'
def canonical_str(value):
"""
Return the canonical str value for the string.
In both Python 3 and Python 2 this is str.
"""
return str(value)
def is_integer(value):
return isinstance(value, int)
else:
from urllib import quote as url_quote, unquote as url_unquote, urlencode
basestring = basestring
str_or_bytes = basestring
xrange = xrange
unicode_type = unicode
dictkeys = dict.keys
dictvalues = dict.values
dict_iteritems = dict.iteritems
dict_itervalues = dict.itervalues
byte = chr
long = long
def canonical_str(value):
"""
Returns the canonical string value of the given string.
In Python 2 this is the value unchanged if it is an str, otherwise
it is the unicode value encoded as UTF-8.
"""
try:
return str(value)
except UnicodeEncodeError:
return str(value.encode('utf-8'))
def is_integer(value):
return isinstance(value, (int, long))
def as_bytes(value):
if not isinstance(value, bytes):
return value.encode('UTF-8')
return value
def to_digit(value):
if value.isdigit():
return int(value)
match = RE_NUM.match(value)
return int(match.groups()[0]) if match else 0
def get_linux_version(release_str):
ver_str = release_str.split('-')[0]
return tuple(map(to_digit, ver_str.split('.')[:3]))
HAVE_SIGNAL = os.name == 'posix'
EINTR_IS_EXPOSED = _sys.version_info[:2] <= (3, 4)
LINUX_VERSION = None
if platform.system() == 'Linux':
LINUX_VERSION = get_linux_version(platform.release())
| 26.993506 | 76 | 0.624489 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | EnjoyLifeFund/macHighSierra-py36-pkgs | pika/compat.py | 4,157 | Python |
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class ExtendedBoolValueTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'value': 'ExtendedBool',
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'value': 'value',
'reject_on_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, value=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
"""ExtendedBoolValueTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._value = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if value is not None:
self.value = value
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def value(self):
"""Gets the value of this ExtendedBoolValueTest. # noqa: E501
:return: The value of this ExtendedBoolValueTest. # noqa: E501
:rtype: ExtendedBool
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this ExtendedBoolValueTest.
:param value: The value of this ExtendedBoolValueTest. # noqa: E501
:type: ExtendedBool
"""
self._value = value
@property
def reject_on_error(self):
"""Gets the reject_on_error of this ExtendedBoolValueTest. # noqa: E501
:return: The reject_on_error of this ExtendedBoolValueTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this ExtendedBoolValueTest.
:param reject_on_error: The reject_on_error of this ExtendedBoolValueTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this ExtendedBoolValueTest. # noqa: E501
:return: The checked of this ExtendedBoolValueTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this ExtendedBoolValueTest.
:param checked: The checked of this ExtendedBoolValueTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExtendedBoolValueTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ExtendedBoolValueTest):
return True
return self.to_dict() != other.to_dict()
| 27.764368 | 116 | 0.593252 | [
"MIT"
] | Telestream/telestream-cloud-python-sdk | telestream_cloud_qc_sdk/telestream_cloud_qc/models/extended_bool_value_test.py | 4,831 | Python |
# Everything we've seen to this point has been a problem known as regression in
# which we're trying to predict an actual numeric value for each observation of
# N input numeric values. A more common problem is that of classification -
# predicting a single binary occurance, class or label for each input. The
# example we'll explore now is attempting to predict for every passenger aboard
# the Titanic, if they survived or not. Clearly, this is not a numeric value,
# but a boolean one: True (survived) or False (didn't survive)
#
# A different way to think about classification is in terms closer to regression
# where instead of approximating an output value for each input, we're
# learning a threshold line in the function where values below these threshold
# doesn't belong to a class, and values above it do.
#
# The weights of an output unit determine the logical expression for the
# corresponding input, while the bias acts as the threshold (axon hillock) that
# must be surpassed in order for the unit to activate. So the bias basically
# describe the excitability of the unit, or how likely it is to fire. While the
# weights are the effect of the individual inputs. Mathematically:
#
# y = w * x + b >= 0 => w * x >= -b
#
# That means that in order for the output of the unit to be greater than 1 we
# need w * x to be greater than the negative of the bias. Remember that in
# classification the input x is a binary 0 or 1, so we have two cases:
#
# x = 0: w * 0 >= -b = 0 >= -b
# x = 1: w * 1 >= -b = w >= -b
#
# So basically, the bias describes two properties: (a) the default activation of
# the unit, whether it should fire or not on zero input (x = 0). And (b) how big
# should the weights be to excite or inhibit that default activation for a non-
# zero input (x = 1). A positive bias (1) will fire unless there are enough
# negative weights (where the input is 1) to inhibit it, while a negative bias
# (-1) will not fire unless there are enough positive weights to excite it. With
# these two variables, we can describe any single-argument boolean function:
#
# w b y >= -b f
# =================================
# 0 1 0 * x >= -1 T
# 0 -1 0 * x >= 1 F
# 1 -1 1 * x >= 1 x F (when x=F) or T (x=T) # identify
# -1 0 -1 * x >= 0 !x F (when x=T) or T (x=F) # negation
#
# When we add arguments, we can support more boolean operations like AND and OR.
# Lets start with AND: we will need the sum of a subgroup of the weights exceed
# the negative bias:
#
# w1 w2 b y >= -b f
# ==================================
# 1 1 -2 x1 + x2 >= 2 x1 AND x2
# -1 1 -1 -x1 + x2 >= 1 !x1 AND x2
# 1 -1 -1 x1 - x2 >= 1 x1 AND !x2
# -1 -1 0 -x1 - x2 >= 0 !x1 AND !x2
#
# It's possible to have other weights, but there's a subgroup of the weights
# where each isn't big enough to exceed -b by itself, but their sum does. All
# of these weights needs to be activated (by an input of 1) in order for the sum
# to be greater than -b.
#
# Now for the OR. Because we might have several such subgroups that satisfy the
# relationship above, each subgroup can, by itself, exceed -b. Thus there's an
# OR operator between these subgroups:
#
# w1 w2 w3 b y >= -b f
# ==============================================
# 1 1 2 -2 x1 + x2 + 2*x3 >= 2 ( x1 AND x2) OR ( x3)
# -1 1 -2 -1 -x1 + x2 - 2*x3 >= 1 (!x1 AND x2) OR (!x3)
#
# We end up with function structures like:
#
# f = (x1 AND x2 ...) OR ( x2 AND x3 ...) ...
# f = (x1 AND !x2 ...) OR (!x2 AND x3 ...) ...
# f = (x1 AND x2 ...) OR ( x3 AND x4 ...) ...
# ^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^
# subgroup 1 subgroup 2
#
# Where the OR separates all subgroups of the weights that has a sum greater
# than -b, while the AND separates the individual weights within each such
# group.
#
# NOTE that each input is always used with the same sign across all subgroups,
# either identity or negation - never both. Our model can only approximate
# linear boolean functions which are ones where each input always contributes
# the same amount towards the same output: T or F. If one argument is more
# likely to make the output true, it must be the case that regardless of all
# other arguments, it will continue to make the output similarily likely to be
# true (or false). It cannot be the case where one of the inputs is sometimes
# used as an identity and other times is negated. For example, these boolean
# functions aren't linear and thus cannot be approximated by this model:
#
# (x1 AND !x2) OR (!x1 AND x2) # exclusive-or (XOR)
# (x1 AND x2) OR (!x1 AND !x2) # Equivalence
#
# This is because it's impossible to choose a weight for the input that's both
# negative and positive. We need to pick one. So either that input makes the
# output bigger, or smaller, or neither - but not conditionally both. NOTE that
# this is a weak definition of linearity in boolean function, and is possibly
# wrong. I couldn't easily wrap my head around it, so perhaps the wikipedia
# entry[1] on it will help.
#
# [1] https://en.wikipedia.org/wiki/Linearity#Boolean_functions
import numpy as np
np.random.seed(1)
EPOCHS = 300
ALPHA = 0.01
# Our 1-dimensional input is the sex of the passenger: m (male) or f (female)
# Our output is a number, either 1 (survived) or 0 (didn't survive)
X = ["f", "m", "f", "m", "f", "m", "f", "m", "f", "m", "f", "m", "f", "m"]
T = [ 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0 ]
# One of the main issues to take care of is encoding: how do we transform these
# textual categories into numeric inputs that we can estimate. One naive
# approach might be to use a single input feature, say a value of 0 represents a
# male, and 1 represents a female. That wouldn't work, because any kind of
# weight we'll use will end up increasing for females. Thus we have no way to
# find different weights for the different categories. This is not necessarily
# correct for ordinal values like age or fare cost, but it's still common to
# learn these weights independently by grouping multiple numeric values into a
# discrete set of categories ("young", "old" for age; "cheap", "expansive" for
# fare cost). The same limitation obviously applied if we use more values with
# binary encoding.
#
# The best known approach currently is one-hot (or one-of-k) in which each value
# is assigned a completely different input. If we have k values, we'll use
# k input neurons (one for male and the other for female) in which only one
# neuron can be lit (value of 1) for any given training case. If we have
# multiple categories we can concatenate multiple such one-of-k's as needed as
# that maintains the fact that each value is assign a separate input and weight.
N = len(set(X)) # 1 per unique value
# encode the input data strings into a list of one-of-k's. We want to return a
# list of numbers, where all are set zeros, but only one is to set to one. That
# should be applied to each feature - one for value. More features would require
# a concatenation of such one-of-k's
def one_of_k(v):
x = np.zeros(N)
idx = ["m", "f"].index(v)
x[idx] = 1.
return x
X = np.array([one_of_k(x) for x in X])
w = np.random.randn(N + 1) * 0.01 # start with small random weights
data = zip(X, T)
for i in xrange(EPOCHS):
np.random.shuffle(data)
e = 0
# we will now also compute the accuracy as a count of how many instances in
# the data were predicted correctly. This is a more quantitive way of
# representing the correctness of the prediction as opposed to an arbitrary
# error function
accuracy = 0
# mini-batches
for x, t in data:
# predict
x = np.append(x, 1.) # add the fixed bias.
y = sum(w * x)
# error & derivatives
e += (y - t) ** 2 / 2
dy = (y - t)
dw = dy * x
# update
w += ALPHA * -dw # mini-batch update
# did we predict correctly? We need to transform the output number
# into a boolean prediction: whether the label should be turned on
# or off. For this example, we'll simply see if the prediction is
# closer to 0 or 1, by first clipping to the [0, 1] range in order
# to trim values outside of this range, and then rounding.
accuracy += 1 if round(np.clip(y, 0, 1)) == t else 0
e /= len(data)
print "%s: ERROR = %f ; ACCURACY = %d of %d" % (i, e, accuracy, len(data))
print
print "W = %s" % w
| 47.311475 | 80 | 0.64784 | [
"MIT"
] | avinoamr/ai-neural | 07_classification.py | 8,658 | Python |
from enum import Enum
from dataclasses import dataclass
class TokenType(Enum):
#TYPES
INT = 0
FLOAT = 1
#OPERATORS
PLUS = 2
MINUS = 3
DIVIDE = 4
MULTIPLY = 5
#PARENTHESES
LPAREN = 6
RPAREN = 7
#SQUARE BRACKETS
L_SQUAREBRACKET = 8
R_SQUAREBRACKET = 9
#ANGLE BRACKETS
L_ANGLEBRACKET = 12
R_ANGLEBRACKET = 13
@dataclass
class Token:
type: TokenType
value: any = None
def __repr__(self):
return self.type.name + (f":{self.value}" if self.value != None else "") | 20.344828 | 80 | 0.574576 | [
"Apache-2.0"
] | cattoware/Math-Interpreter | tokens.py | 590 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Jan 29, 2021
@file: train_unmixing.py
@desc: Perform the training of the models for the unmixing problem.
@author: laugh12321
@contact: [email protected]
"""
import os
import numpy as np
import tensorflow as tf
from typing import Dict
import src.model.enums as enums
from src.utils import io, transforms
from src.model.models import _get_model
from src.utils.transforms import UNMIXING_TRANSFORMS
from src.evaluation import time_metrics
from src.evaluation.performance_metrics import UNMIXING_LOSSES, \
UNMIXING_TRAIN_METRICS
def train(data: Dict[str, np.ndarray],
model_name: str,
dest_path: str,
sample_size: int,
n_classes: int,
lr: float,
batch_size: int,
epochs: int,
verbose: int,
shuffle: bool,
patience: int,
seed: int):
"""
Function for running experiments on various unmixing models,
given a set of hyper parameters.
:param data: The data dictionary containing
the subsets for training and validation.
First dimension of the datasets should be the number of samples.
:param model_name: Name of the model, it serves as a key in the
dictionary holding all functions returning models.
:param dest_path: Path to where all experiment runs will be saved as
subdirectories in this given directory.
:param sample_size: Size of the input sample.
:param n_classes: Number of classes.
:param lr: Learning rate for the model, i.e., regulates
the size of the step in the gradient descent process.
:param batch_size: Size of the batch used in training phase,
it is the size of samples per gradient step.
:param epochs: Number of epochs for model to train.
:param verbose: Verbosity mode used in training, (0, 1 or 2).
:param shuffle: Boolean indicating whether to shuffle datasets.
:param patience: Number of epochs without improvement in order to
stop the training phase.
:param seed: Seed for training reproducibility.
"""
# Reproducibility:
np.random.seed(seed=seed)
model = _get_model(
model_key=model_name,
**{'input_size': sample_size,
'n_classes': n_classes})
model.summary()
model.compile(
optimizer=tf.keras.optimizers.Adam(lr=lr),
loss=UNMIXING_LOSSES[model_name],
metrics=UNMIXING_TRAIN_METRICS[model_name])
time_history = time_metrics.TimeHistory()
mcp_save = tf.keras.callbacks.ModelCheckpoint(
os.path.join(dest_path, 'model.h5'),
save_best_only=True,
monitor='val_loss',
mode='min')
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=patience,
mode='min')
callbacks = [time_history, mcp_save, early_stopping]
train_dict = data[enums.Dataset.TRAIN].copy()
val_dict = data[enums.Dataset.VAL].copy()
min_, max_ = data[enums.DataStats.MIN], data[enums.DataStats.MAX]
transformations = [transforms.MinMaxNormalize(min_=min_, max_=max_)]
transformations += [t() for t in UNMIXING_TRANSFORMS[model_name]]
train_dict = transforms.apply_transformations(train_dict, transformations)
val_dict = transforms.apply_transformations(val_dict, transformations)
history = model.fit(
x=train_dict[enums.Dataset.DATA],
y=train_dict[enums.Dataset.LABELS],
epochs=epochs,
verbose=verbose,
shuffle=shuffle,
validation_data=(val_dict[enums.Dataset.DATA],
val_dict[enums.Dataset.LABELS]),
callbacks=callbacks,
batch_size=batch_size)
np.savetxt(os.path.join(dest_path,
'min-max.csv'), np.array([min_, max_]),
delimiter=',', fmt='%f')
history.history[time_metrics.TimeHistory.__name__] = time_history.average
io.save_metrics(dest_path=dest_path,
file_name='training_metrics.csv',
metrics=history.history)
| 34.116667 | 78 | 0.67318 | [
"MIT"
] | laugh12321/DACN | src/model/train_unmixing.py | 4,094 | Python |
"""Probability mass function for a beta binomial distribution
Functions
---------
betabinom_pmf
Probability mass function for a beta binomial distribution
"""
from bbpmf.betabinom_pmf import betabinom_pmf
| 21.1 | 62 | 0.781991 | [
"MIT"
] | anthony-aylward/bbpmf | bbpmf/__init__.py | 211 | Python |
from typing import List
from scripter.backend.note_text import NoteText
from scripter.io.writer_base import WriterBase
class FormatWriter(WriterBase):
def __init__(self, *, format:str=None, **kwargs):
super().__init__()
if format is None:
self.format = 'P.{page}\n{text}\n'
def dump(self, fp, texts: List[NoteText]):
for text in texts:
fp.write(self.format.format(
page=text.page,
text=text.text
))
| 27.888889 | 53 | 0.603586 | [
"MIT"
] | elda27/scripter | scripter/io/format_writer.py | 502 | Python |
import random
random.sample(set([1, 2, 3, 4, 5, 6]), 2) # random select from set
| 27 | 66 | 0.654321 | [
"MIT"
] | district10/snippet-manager | snippets/python-set-random.py | 81 | Python |
"""Tests for stubs.
Verify that various things in stubs are consistent with how things behave at runtime.
"""
import argparse
import copy
import enum
import importlib
import inspect
import re
import sys
import types
import warnings
from functools import singledispatch
from pathlib import Path
from typing import Any, Dict, Generic, Iterator, List, Optional, Tuple, TypeVar, Union, cast
from typing_extensions import Type
import mypy.build
import mypy.modulefinder
import mypy.types
from mypy import nodes
from mypy.config_parser import parse_config_file
from mypy.options import Options
from mypy.util import FancyFormatter
class Missing:
"""Marker object for things that are missing (from a stub or the runtime)."""
def __repr__(self) -> str:
return "MISSING"
MISSING = Missing()
T = TypeVar("T")
if sys.version_info >= (3, 5, 3):
MaybeMissing = Union[T, Missing]
else:
# work around a bug in 3.5.2 and earlier's typing.py
class MaybeMissingMeta(type):
def __getitem__(self, arg: Any) -> Any:
return Union[arg, Missing]
class MaybeMissing(metaclass=MaybeMissingMeta): # type: ignore
pass
_formatter = FancyFormatter(sys.stdout, sys.stderr, False)
def _style(message: str, **kwargs: Any) -> str:
"""Wrapper around mypy.util for fancy formatting."""
kwargs.setdefault("color", "none")
return _formatter.style(message, **kwargs)
class Error:
def __init__(
self,
object_path: List[str],
message: str,
stub_object: MaybeMissing[nodes.Node],
runtime_object: MaybeMissing[Any],
*,
stub_desc: Optional[str] = None,
runtime_desc: Optional[str] = None
) -> None:
"""Represents an error found by stubtest.
:param object_path: Location of the object with the error,
e.g. ``["module", "Class", "method"]``
:param message: Error message
:param stub_object: The mypy node representing the stub
:param runtime_object: Actual object obtained from the runtime
:param stub_desc: Specialised description for the stub object, should you wish
:param runtime_desc: Specialised description for the runtime object, should you wish
"""
self.object_desc = ".".join(object_path)
self.message = message
self.stub_object = stub_object
self.runtime_object = runtime_object
self.stub_desc = stub_desc or str(getattr(stub_object, "type", stub_object))
self.runtime_desc = runtime_desc or str(runtime_object)
def is_missing_stub(self) -> bool:
"""Whether or not the error is for something missing from the stub."""
return isinstance(self.stub_object, Missing)
def is_positional_only_related(self) -> bool:
"""Whether or not the error is for something being (or not being) positional-only."""
# TODO: This is hacky, use error codes or something more resilient
return "leading double underscore" in self.message
def get_description(self, concise: bool = False) -> str:
"""Returns a description of the error.
:param concise: Whether to return a concise, one-line description
"""
if concise:
return _style(self.object_desc, bold=True) + " " + self.message
stub_line = None
stub_file = None # type: None
if not isinstance(self.stub_object, Missing):
stub_line = self.stub_object.line
# TODO: Find a way of getting the stub file
stub_loc_str = ""
if stub_line:
stub_loc_str += " at line {}".format(stub_line)
if stub_file:
stub_loc_str += " in file {}".format(Path(stub_file))
runtime_line = None
runtime_file = None
if not isinstance(self.runtime_object, Missing):
try:
runtime_line = inspect.getsourcelines(self.runtime_object)[1]
except (OSError, TypeError):
pass
try:
runtime_file = inspect.getsourcefile(self.runtime_object)
except TypeError:
pass
runtime_loc_str = ""
if runtime_line:
runtime_loc_str += " at line {}".format(runtime_line)
if runtime_file:
runtime_loc_str += " in file {}".format(Path(runtime_file))
output = [
_style("error: ", color="red", bold=True),
_style(self.object_desc, bold=True),
" ",
self.message,
"\n",
"Stub:",
_style(stub_loc_str, dim=True),
"\n",
_style(self.stub_desc + "\n", color="blue", dim=True),
"Runtime:",
_style(runtime_loc_str, dim=True),
"\n",
_style(self.runtime_desc + "\n", color="blue", dim=True),
]
return "".join(output)
def test_module(module_name: str) -> Iterator[Error]:
"""Tests a given module's stub against introspecting it at runtime.
Requires the stub to have been built already, accomplished by a call to ``build_stubs``.
:param module_name: The module to test
"""
stub = get_stub(module_name)
if stub is None:
yield Error([module_name], "failed to find stubs", MISSING, None)
return
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
runtime = importlib.import_module(module_name)
except Exception as e:
yield Error([module_name], "failed to import: {}".format(e), stub, MISSING)
return
with warnings.catch_warnings():
warnings.simplefilter("ignore")
yield from verify(stub, runtime, [module_name])
@singledispatch
def verify(
stub: nodes.Node, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
"""Entry point for comparing a stub to a runtime object.
We use single dispatch based on the type of ``stub``.
:param stub: The mypy node representing a part of the stub
:param runtime: The runtime object corresponding to ``stub``
"""
yield Error(object_path, "is an unknown mypy node", stub, runtime)
@verify.register(nodes.MypyFile)
def verify_mypyfile(
stub: nodes.MypyFile, runtime: MaybeMissing[types.ModuleType], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if not isinstance(runtime, types.ModuleType):
yield Error(object_path, "is not a module", stub, runtime)
return
# Check things in the stub that are public
to_check = set(
m
for m, o in stub.names.items()
if o.module_public and (not m.startswith("_") or hasattr(runtime, m))
)
runtime_public_contents = [
m
for m in dir(runtime)
if not m.startswith("_")
# Ensure that the object's module is `runtime`, e.g. so that we don't pick up reexported
# modules and infinitely recurse. Unfortunately, there's no way to detect an explicit
# reexport missing from the stubs (that isn't specified in __all__)
and getattr(getattr(runtime, m), "__module__", None) == runtime.__name__
]
# Check all things declared in module's __all__, falling back to runtime_public_contents
to_check.update(getattr(runtime, "__all__", runtime_public_contents))
to_check.difference_update({"__file__", "__doc__", "__name__", "__builtins__", "__package__"})
for entry in sorted(to_check):
yield from verify(
stub.names[entry].node if entry in stub.names else MISSING,
getattr(runtime, entry, MISSING),
object_path + [entry],
)
@verify.register(nodes.TypeInfo)
def verify_typeinfo(
stub: nodes.TypeInfo, runtime: MaybeMissing[Type[Any]], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime, stub_desc=repr(stub))
return
if not isinstance(runtime, type):
yield Error(object_path, "is not a type", stub, runtime, stub_desc=repr(stub))
return
# Check everything already defined in the stub
to_check = set(stub.names)
# There's a reasonable case to be made that we should always check all dunders, but it's
# currently quite noisy. We could turn this into a denylist instead of an allowlist.
to_check.update(
# cast to workaround mypyc complaints
m for m in cast(Any, vars)(runtime) if not m.startswith("_") or m in SPECIAL_DUNDERS
)
for entry in sorted(to_check):
mangled_entry = entry
if entry.startswith("__") and not entry.endswith("__"):
mangled_entry = "_{}{}".format(stub.name, entry)
yield from verify(
next((t.names[entry].node for t in stub.mro if entry in t.names), MISSING),
getattr(runtime, mangled_entry, MISSING),
object_path + [entry],
)
def _verify_static_class_methods(
stub: nodes.FuncBase, runtime: Any, object_path: List[str]
) -> Iterator[str]:
if stub.name in ("__new__", "__init_subclass__", "__class_getitem__"):
# Special cased by Python, so don't bother checking
return
if inspect.isbuiltin(runtime):
# The isinstance checks don't work reliably for builtins, e.g. datetime.datetime.now, so do
# something a little hacky that seems to work well
probably_class_method = isinstance(getattr(runtime, "__self__", None), type)
if probably_class_method and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not probably_class_method and stub.is_class:
yield "stub is a classmethod but runtime is not"
return
# Look the object up statically, to avoid binding by the descriptor protocol
static_runtime = importlib.import_module(object_path[0])
for entry in object_path[1:]:
try:
static_runtime = inspect.getattr_static(static_runtime, entry)
except AttributeError:
# This can happen with mangled names, ignore for now.
# TODO: pass more information about ancestors of nodes/objects to verify, so we don't
# have to do this hacky lookup. Would be useful in a couple other places too.
return
if isinstance(static_runtime, classmethod) and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not isinstance(static_runtime, classmethod) and stub.is_class:
yield "stub is a classmethod but runtime is not"
if isinstance(static_runtime, staticmethod) and not stub.is_static:
yield "runtime is a staticmethod but stub is not"
if not isinstance(static_runtime, staticmethod) and stub.is_static:
yield "stub is a staticmethod but runtime is not"
def _verify_arg_name(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter, function_name: str
) -> Iterator[str]:
"""Checks whether argument names match."""
# Ignore exact names for most dunder methods
if is_dunder(function_name, exclude_special=True):
return
def strip_prefix(s: str, prefix: str) -> str:
return s[len(prefix):] if s.startswith(prefix) else s
if strip_prefix(stub_arg.variable.name, "__") == runtime_arg.name:
return
def names_approx_match(a: str, b: str) -> bool:
a = a.strip("_")
b = b.strip("_")
return a.startswith(b) or b.startswith(a) or len(a) == 1 or len(b) == 1
# Be more permissive about names matching for positional-only arguments
if runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY and names_approx_match(
stub_arg.variable.name, runtime_arg.name
):
return
# This comes up with namedtuples, so ignore
if stub_arg.variable.name == "_self":
return
yield (
'stub argument "{}" differs from runtime argument "{}"'.format(
stub_arg.variable.name, runtime_arg.name
)
)
def _verify_arg_default_value(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter
) -> Iterator[str]:
"""Checks whether argument default values are compatible."""
if runtime_arg.default != inspect.Parameter.empty:
if stub_arg.kind not in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT):
yield (
'runtime argument "{}" has a default value but stub argument does not'.format(
runtime_arg.name
)
)
else:
runtime_type = get_mypy_type_of_runtime_value(runtime_arg.default)
# Fallback to the type annotation type if var type is missing. The type annotation
# is an UnboundType, but I don't know enough to know what the pros and cons here are.
# UnboundTypes have ugly question marks following them, so default to var type.
# Note we do this same fallback when constructing signatures in from_overloadedfuncdef
stub_type = stub_arg.variable.type or stub_arg.type_annotation
if isinstance(stub_type, mypy.types.TypeVarType):
stub_type = stub_type.upper_bound
if (
runtime_type is not None
and stub_type is not None
# Avoid false positives for marker objects
and type(runtime_arg.default) != object
and not is_subtype_helper(runtime_type, stub_type)
):
yield (
'runtime argument "{}" has a default value of type {}, '
"which is incompatible with stub argument type {}".format(
runtime_arg.name, runtime_type, stub_type
)
)
else:
if stub_arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT):
yield (
'stub argument "{}" has a default value but runtime argument does not'.format(
stub_arg.variable.name
)
)
def maybe_strip_cls(name: str, args: List[nodes.Argument]) -> List[nodes.Argument]:
if name in ("__init_subclass__", "__class_getitem__"):
# These are implicitly classmethods. If the stub chooses not to have @classmethod, we
# should remove the cls argument
if args[0].variable.name == "cls":
return args[1:]
return args
class Signature(Generic[T]):
def __init__(self) -> None:
self.pos = [] # type: List[T]
self.kwonly = {} # type: Dict[str, T]
self.varpos = None # type: Optional[T]
self.varkw = None # type: Optional[T]
def __str__(self) -> str:
def get_name(arg: Any) -> str:
if isinstance(arg, inspect.Parameter):
return arg.name
if isinstance(arg, nodes.Argument):
return arg.variable.name
raise AssertionError
def get_type(arg: Any) -> Optional[str]:
if isinstance(arg, inspect.Parameter):
return None
if isinstance(arg, nodes.Argument):
return str(arg.variable.type or arg.type_annotation)
raise AssertionError
def has_default(arg: Any) -> bool:
if isinstance(arg, inspect.Parameter):
return arg.default != inspect.Parameter.empty
if isinstance(arg, nodes.Argument):
return arg.kind in (nodes.ARG_OPT, nodes.ARG_NAMED_OPT)
raise AssertionError
def get_desc(arg: Any) -> str:
arg_type = get_type(arg)
return (
get_name(arg)
+ (": {}".format(arg_type) if arg_type else "")
+ (" = ..." if has_default(arg) else "")
)
kw_only = sorted(self.kwonly.values(), key=lambda a: (has_default(a), get_name(a)))
ret = "def ("
ret += ", ".join(
[get_desc(arg) for arg in self.pos]
+ (["*" + get_name(self.varpos)] if self.varpos else (["*"] if self.kwonly else []))
+ [get_desc(arg) for arg in kw_only]
+ (["**" + get_name(self.varkw)] if self.varkw else [])
)
ret += ")"
return ret
@staticmethod
def from_funcitem(stub: nodes.FuncItem) -> "Signature[nodes.Argument]":
stub_sig = Signature() # type: Signature[nodes.Argument]
stub_args = maybe_strip_cls(stub.name, stub.arguments)
for stub_arg in stub_args:
if stub_arg.kind in (nodes.ARG_POS, nodes.ARG_OPT):
stub_sig.pos.append(stub_arg)
elif stub_arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT):
stub_sig.kwonly[stub_arg.variable.name] = stub_arg
elif stub_arg.kind == nodes.ARG_STAR:
stub_sig.varpos = stub_arg
elif stub_arg.kind == nodes.ARG_STAR2:
stub_sig.varkw = stub_arg
else:
raise AssertionError
return stub_sig
@staticmethod
def from_inspect_signature(signature: inspect.Signature) -> "Signature[inspect.Parameter]":
runtime_sig = Signature() # type: Signature[inspect.Parameter]
for runtime_arg in signature.parameters.values():
if runtime_arg.kind in (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
):
runtime_sig.pos.append(runtime_arg)
elif runtime_arg.kind == inspect.Parameter.KEYWORD_ONLY:
runtime_sig.kwonly[runtime_arg.name] = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_POSITIONAL:
runtime_sig.varpos = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_KEYWORD:
runtime_sig.varkw = runtime_arg
else:
raise AssertionError
return runtime_sig
@staticmethod
def from_overloadedfuncdef(stub: nodes.OverloadedFuncDef) -> "Signature[nodes.Argument]":
"""Returns a Signature from an OverloadedFuncDef.
If life were simple, to verify_overloadedfuncdef, we'd just verify_funcitem for each of its
items. Unfortunately, life isn't simple and overloads are pretty deceitful. So instead, we
try and combine the overload's items into a single signature that is compatible with any
lies it might try to tell.
"""
# For most dunder methods, just assume all args are positional-only
assume_positional_only = is_dunder(stub.name, exclude_special=True)
all_args = {} # type: Dict[str, List[Tuple[nodes.Argument, int]]]
for func in map(_resolve_funcitem_from_decorator, stub.items):
assert func is not None
args = maybe_strip_cls(stub.name, func.arguments)
for index, arg in enumerate(args):
# For positional-only args, we allow overloads to have different names for the same
# argument. To accomplish this, we just make up a fake index-based name.
name = (
"__{}".format(index)
if arg.variable.name.startswith("__") or assume_positional_only
else arg.variable.name
)
all_args.setdefault(name, []).append((arg, index))
def get_position(arg_name: str) -> int:
# We just need this to return the positional args in the correct order.
return max(index for _, index in all_args[arg_name])
def get_type(arg_name: str) -> mypy.types.ProperType:
with mypy.state.strict_optional_set(True):
all_types = [
arg.variable.type or arg.type_annotation for arg, _ in all_args[arg_name]
]
return mypy.typeops.make_simplified_union([t for t in all_types if t])
def get_kind(arg_name: str) -> int:
kinds = {arg.kind for arg, _ in all_args[arg_name]}
if nodes.ARG_STAR in kinds:
return nodes.ARG_STAR
if nodes.ARG_STAR2 in kinds:
return nodes.ARG_STAR2
# The logic here is based on two tenets:
# 1) If an arg is ever optional (or unspecified), it is optional
# 2) If an arg is ever positional, it is positional
is_opt = (
len(all_args[arg_name]) < len(stub.items)
or nodes.ARG_OPT in kinds
or nodes.ARG_NAMED_OPT in kinds
)
is_pos = nodes.ARG_OPT in kinds or nodes.ARG_POS in kinds
if is_opt:
return nodes.ARG_OPT if is_pos else nodes.ARG_NAMED_OPT
return nodes.ARG_POS if is_pos else nodes.ARG_NAMED
sig = Signature() # type: Signature[nodes.Argument]
for arg_name in sorted(all_args, key=get_position):
# example_arg_name gives us a real name (in case we had a fake index-based name)
example_arg_name = all_args[arg_name][0][0].variable.name
arg = nodes.Argument(
nodes.Var(example_arg_name, get_type(arg_name)),
type_annotation=None,
initializer=None,
kind=get_kind(arg_name),
)
if arg.kind in (nodes.ARG_POS, nodes.ARG_OPT):
sig.pos.append(arg)
elif arg.kind in (nodes.ARG_NAMED, nodes.ARG_NAMED_OPT):
sig.kwonly[arg.variable.name] = arg
elif arg.kind == nodes.ARG_STAR:
sig.varpos = arg
elif arg.kind == nodes.ARG_STAR2:
sig.varkw = arg
else:
raise AssertionError
return sig
def _verify_signature(
stub: Signature[nodes.Argument], runtime: Signature[inspect.Parameter], function_name: str
) -> Iterator[str]:
# Check positional arguments match up
for stub_arg, runtime_arg in zip(stub.pos, runtime.pos):
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
if (
runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY
and not stub_arg.variable.name.startswith("__")
and not stub_arg.variable.name.strip("_") == "self"
and not is_dunder(function_name, exclude_special=True) # noisy for dunder methods
):
yield (
'stub argument "{}" should be positional-only '
'(rename with a leading double underscore, i.e. "__{}")'.format(
stub_arg.variable.name, runtime_arg.name
)
)
if (
runtime_arg.kind != inspect.Parameter.POSITIONAL_ONLY
and stub_arg.variable.name.startswith("__")
):
yield (
'stub argument "{}" should be positional or keyword '
"(remove leading double underscore)".format(stub_arg.variable.name)
)
# Check unmatched positional args
if len(stub.pos) > len(runtime.pos):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through *args. Hence, a) we can't check that the runtime actually takes those
# parameters and b) below, we don't enforce that the stub takes *args, since runtime logic
# may prevent those arguments from actually being accepted.
if runtime.varpos is None:
for stub_arg in stub.pos[len(runtime.pos):]:
# If the variable is in runtime.kwonly, it's just mislabelled as not a
# keyword-only argument
if stub_arg.variable.name not in runtime.kwonly:
yield 'runtime does not have argument "{}"'.format(stub_arg.variable.name)
else:
yield 'stub argument "{}" is not keyword-only'.format(stub_arg.variable.name)
if stub.varpos is not None:
yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name)
elif len(stub.pos) < len(runtime.pos):
for runtime_arg in runtime.pos[len(stub.pos):]:
if runtime_arg.name not in stub.kwonly:
yield 'stub does not have argument "{}"'.format(runtime_arg.name)
else:
yield 'runtime argument "{}" is not keyword-only'.format(runtime_arg.name)
# Checks involving *args
if len(stub.pos) <= len(runtime.pos) or runtime.varpos is None:
if stub.varpos is None and runtime.varpos is not None:
yield 'stub does not have *args argument "{}"'.format(runtime.varpos.name)
if stub.varpos is not None and runtime.varpos is None:
yield 'runtime does not have *args argument "{}"'.format(stub.varpos.variable.name)
# Check keyword-only args
for arg in sorted(set(stub.kwonly) & set(runtime.kwonly)):
stub_arg, runtime_arg = stub.kwonly[arg], runtime.kwonly[arg]
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
# Check unmatched keyword-only args
if runtime.varkw is None or not set(runtime.kwonly).issubset(set(stub.kwonly)):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through *kwargs. Hence, a) we only check if the runtime actually takes those
# parameters when the above condition holds and b) below, we don't enforce that the stub
# takes *kwargs, since runtime logic may prevent additional arguments from actually being
# accepted.
for arg in sorted(set(stub.kwonly) - set(runtime.kwonly)):
yield 'runtime does not have argument "{}"'.format(arg)
for arg in sorted(set(runtime.kwonly) - set(stub.kwonly)):
if arg in set(stub_arg.variable.name for stub_arg in stub.pos):
# Don't report this if we've reported it before
if len(stub.pos) > len(runtime.pos) and runtime.varpos is not None:
yield 'stub argument "{}" is not keyword-only'.format(arg)
else:
yield 'stub does not have argument "{}"'.format(arg)
# Checks involving **kwargs
if stub.varkw is None and runtime.varkw is not None:
# As mentioned above, don't enforce that the stub takes **kwargs.
# Also check against positional parameters, to avoid a nitpicky message when an argument
# isn't marked as keyword-only
stub_pos_names = set(stub_arg.variable.name for stub_arg in stub.pos)
# Ideally we'd do a strict subset check, but in practice the errors from that aren't useful
if not set(runtime.kwonly).issubset(set(stub.kwonly) | stub_pos_names):
yield 'stub does not have **kwargs argument "{}"'.format(runtime.varkw.name)
if stub.varkw is not None and runtime.varkw is None:
yield 'runtime does not have **kwargs argument "{}"'.format(stub.varkw.variable.name)
@verify.register(nodes.FuncItem)
def verify_funcitem(
stub: nodes.FuncItem, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if (
not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType))
and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType))
and not inspect.ismethoddescriptor(runtime)
):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
for message in _verify_static_class_methods(stub, runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
try:
signature = inspect.signature(runtime)
except (ValueError, RuntimeError):
# inspect.signature throws sometimes
# catch RuntimeError because of https://bugs.python.org/issue39504
return
stub_sig = Signature.from_funcitem(stub)
runtime_sig = Signature.from_inspect_signature(signature)
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
runtime_desc="def " + str(signature),
)
@verify.register(Missing)
def verify_none(
stub: Missing, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
yield Error(object_path, "is not present in stub", stub, runtime)
@verify.register(nodes.Var)
def verify_var(
stub: nodes.Var, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
# Don't always yield an error here, because we often can't find instance variables
if len(object_path) <= 2:
yield Error(object_path, "is not present at runtime", stub, runtime)
return
runtime_type = get_mypy_type_of_runtime_value(runtime)
if (
runtime_type is not None
and stub.type is not None
and not is_subtype_helper(runtime_type, stub.type)
):
should_error = True
# Avoid errors when defining enums, since runtime_type is the enum itself, but we'd
# annotate it with the type of runtime.value
if isinstance(runtime, enum.Enum):
runtime_type = get_mypy_type_of_runtime_value(runtime.value)
if runtime_type is not None and is_subtype_helper(runtime_type, stub.type):
should_error = False
if should_error:
yield Error(
object_path,
"variable differs from runtime type {}".format(runtime_type),
stub,
runtime,
)
@verify.register(nodes.OverloadedFuncDef)
def verify_overloadedfuncdef(
stub: nodes.OverloadedFuncDef, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.is_property:
# We get here in cases of overloads from property.setter
return
if (
not isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType))
and not isinstance(runtime, (types.MethodType, types.BuiltinMethodType))
and not inspect.ismethoddescriptor(runtime)
):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
for message in _verify_static_class_methods(stub, runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
try:
signature = inspect.signature(runtime)
except ValueError:
return
stub_sig = Signature.from_overloadedfuncdef(stub)
runtime_sig = Signature.from_inspect_signature(signature)
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
# TODO: This is a little hacky, but the addition here is super useful
if "has a default value of type" in message:
message += (
". This is often caused by overloads failing to account for explicitly passing "
"in the default value."
)
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
stub_desc=str(stub.type) + "\nInferred signature: {}".format(stub_sig),
runtime_desc="def " + str(signature),
)
@verify.register(nodes.TypeVarExpr)
def verify_typevarexpr(
stub: nodes.TypeVarExpr, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if False:
yield None
def _verify_property(stub: nodes.Decorator, runtime: Any) -> Iterator[str]:
assert stub.func.is_property
if isinstance(runtime, property):
return
if inspect.isdatadescriptor(runtime):
# It's enough like a property...
return
# Sometimes attributes pretend to be properties, for instance, to express that they
# are read only. So allowlist if runtime_type matches the return type of stub.
runtime_type = get_mypy_type_of_runtime_value(runtime)
func_type = (
stub.func.type.ret_type if isinstance(stub.func.type, mypy.types.CallableType) else None
)
if (
runtime_type is not None
and func_type is not None
and is_subtype_helper(runtime_type, func_type)
):
return
yield "is inconsistent, cannot reconcile @property on stub with runtime object"
def _resolve_funcitem_from_decorator(dec: nodes.OverloadPart) -> Optional[nodes.FuncItem]:
"""Returns a FuncItem that corresponds to the output of the decorator.
Returns None if we can't figure out what that would be. For convenience, this function also
accepts FuncItems.
"""
if isinstance(dec, nodes.FuncItem):
return dec
if dec.func.is_property:
return None
def apply_decorator_to_funcitem(
decorator: nodes.Expression, func: nodes.FuncItem
) -> Optional[nodes.FuncItem]:
if not isinstance(decorator, nodes.RefExpr):
return None
if decorator.fullname is None:
# Happens with namedtuple
return None
if decorator.fullname in (
"builtins.staticmethod",
"typing.overload",
"abc.abstractmethod",
):
return func
if decorator.fullname == "builtins.classmethod":
assert func.arguments[0].variable.name in ("cls", "metacls")
ret = copy.copy(func)
# Remove the cls argument, since it's not present in inspect.signature of classmethods
ret.arguments = ret.arguments[1:]
return ret
# Just give up on any other decorators. After excluding properties, we don't run into
# anything else when running on typeshed's stdlib.
return None
func = dec.func # type: nodes.FuncItem
for decorator in dec.original_decorators:
resulting_func = apply_decorator_to_funcitem(decorator, func)
if resulting_func is None:
return None
func = resulting_func
return func
@verify.register(nodes.Decorator)
def verify_decorator(
stub: nodes.Decorator, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.func.is_property:
for message in _verify_property(stub, runtime):
yield Error(object_path, message, stub, runtime)
return
func = _resolve_funcitem_from_decorator(stub)
if func is not None:
yield from verify(func, runtime, object_path)
@verify.register(nodes.TypeAlias)
def verify_typealias(
stub: nodes.TypeAlias, runtime: MaybeMissing[Any], object_path: List[str]
) -> Iterator[Error]:
if False:
yield None
SPECIAL_DUNDERS = ("__init__", "__new__", "__call__", "__init_subclass__", "__class_getitem__")
def is_dunder(name: str, exclude_special: bool = False) -> bool:
"""Returns whether name is a dunder name.
:param exclude_special: Whether to return False for a couple special dunder methods.
"""
if exclude_special and name in SPECIAL_DUNDERS:
return False
return name.startswith("__") and name.endswith("__")
def is_subtype_helper(left: mypy.types.Type, right: mypy.types.Type) -> bool:
"""Checks whether ``left`` is a subtype of ``right``."""
left = mypy.types.get_proper_type(left)
right = mypy.types.get_proper_type(right)
if (
isinstance(left, mypy.types.LiteralType)
and isinstance(left.value, int)
and left.value in (0, 1)
and isinstance(right, mypy.types.Instance)
and right.type.fullname == "builtins.bool"
):
# Pretend Literal[0, 1] is a subtype of bool to avoid unhelpful errors.
return True
with mypy.state.strict_optional_set(True):
return mypy.subtypes.is_subtype(left, right)
def get_mypy_type_of_runtime_value(runtime: Any) -> Optional[mypy.types.Type]:
"""Returns a mypy type object representing the type of ``runtime``.
Returns None if we can't find something that works.
"""
if runtime is None:
return mypy.types.NoneType()
if isinstance(runtime, property):
# Give up on properties to avoid issues with things that are typed as attributes.
return None
def anytype() -> mypy.types.AnyType:
return mypy.types.AnyType(mypy.types.TypeOfAny.unannotated)
if isinstance(
runtime,
(types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
):
builtins = get_stub("builtins")
assert builtins is not None
type_info = builtins.names["function"].node
assert isinstance(type_info, nodes.TypeInfo)
fallback = mypy.types.Instance(type_info, [anytype()])
try:
signature = inspect.signature(runtime)
arg_types = []
arg_kinds = []
arg_names = []
for arg in signature.parameters.values():
arg_types.append(anytype())
arg_names.append(
None if arg.kind == inspect.Parameter.POSITIONAL_ONLY else arg.name
)
has_default = arg.default == inspect.Parameter.empty
if arg.kind == inspect.Parameter.POSITIONAL_ONLY:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.KEYWORD_ONLY:
arg_kinds.append(nodes.ARG_NAMED if has_default else nodes.ARG_NAMED_OPT)
elif arg.kind == inspect.Parameter.VAR_POSITIONAL:
arg_kinds.append(nodes.ARG_STAR)
elif arg.kind == inspect.Parameter.VAR_KEYWORD:
arg_kinds.append(nodes.ARG_STAR2)
else:
raise AssertionError
except ValueError:
arg_types = [anytype(), anytype()]
arg_kinds = [nodes.ARG_STAR, nodes.ARG_STAR2]
arg_names = [None, None]
return mypy.types.CallableType(
arg_types,
arg_kinds,
arg_names,
ret_type=anytype(),
fallback=fallback,
is_ellipsis_args=True,
)
# Try and look up a stub for the runtime object
stub = get_stub(type(runtime).__module__)
if stub is None:
return None
type_name = type(runtime).__name__
if type_name not in stub.names:
return None
type_info = stub.names[type_name].node
if isinstance(type_info, nodes.Var):
return type_info.type
if not isinstance(type_info, nodes.TypeInfo):
return None
if isinstance(runtime, tuple):
# Special case tuples so we construct a valid mypy.types.TupleType
optional_items = [get_mypy_type_of_runtime_value(v) for v in runtime]
items = [(i if i is not None else anytype()) for i in optional_items]
fallback = mypy.types.Instance(type_info, [anytype()])
return mypy.types.TupleType(items, fallback)
fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars])
try:
# Literals are supposed to be only bool, int, str, bytes or enums, but this seems to work
# well (when not using mypyc, for which bytes and enums are also problematic).
return mypy.types.LiteralType(
value=runtime,
fallback=fallback,
)
except TypeError:
# Ask for forgiveness if we're using mypyc.
return fallback
_all_stubs = {} # type: Dict[str, nodes.MypyFile]
def build_stubs(modules: List[str], options: Options, find_submodules: bool = False) -> List[str]:
"""Uses mypy to construct stub objects for the given modules.
This sets global state that ``get_stub`` can access.
Returns all modules we might want to check. If ``find_submodules`` is False, this is equal
to ``modules``.
:param modules: List of modules to build stubs for.
:param options: Mypy options for finding and building stubs.
:param find_submodules: Whether to attempt to find submodules of the given modules as well.
"""
data_dir = mypy.build.default_data_dir()
search_path = mypy.modulefinder.compute_search_paths([], options, data_dir)
find_module_cache = mypy.modulefinder.FindModuleCache(
search_path, fscache=None, options=options
)
all_modules = []
sources = []
for module in modules:
all_modules.append(module)
if not find_submodules:
module_path = find_module_cache.find_module(module)
if not isinstance(module_path, str):
# test_module will yield an error later when it can't find stubs
continue
sources.append(mypy.modulefinder.BuildSource(module_path, module, None))
else:
found_sources = find_module_cache.find_modules_recursive(module)
sources.extend(found_sources)
all_modules.extend(s.module for s in found_sources if s.module not in all_modules)
try:
res = mypy.build.build(sources=sources, options=options)
except mypy.errors.CompileError as e:
output = [
_style("error: ", color="red", bold=True),
"not checking stubs due to failed mypy compile:\n",
str(e),
]
print("".join(output))
raise RuntimeError from e
if res.errors:
output = [
_style("error: ", color="red", bold=True),
"not checking stubs due to mypy build errors:\n",
]
print("".join(output) + "\n".join(res.errors))
raise RuntimeError
global _all_stubs
_all_stubs = res.files
return all_modules
def get_stub(module: str) -> Optional[nodes.MypyFile]:
"""Returns a stub object for the given module, if we've built one."""
return _all_stubs.get(module)
def get_typeshed_stdlib_modules(custom_typeshed_dir: Optional[str]) -> List[str]:
"""Returns a list of stdlib modules in typeshed (for current Python version)."""
stdlib_py_versions = mypy.modulefinder.load_stdlib_py_versions(custom_typeshed_dir)
packages = set()
# Typeshed doesn't cover Python 3.5.
if sys.version_info < (3, 6):
version_info = (3, 6)
else:
version_info = sys.version_info[0:2]
for module, versions in stdlib_py_versions.items():
minver, maxver = versions
if version_info >= minver and (maxver is None or version_info <= maxver):
packages.add(module)
if custom_typeshed_dir:
typeshed_dir = Path(custom_typeshed_dir)
else:
typeshed_dir = Path(mypy.build.default_data_dir()) / "typeshed"
stdlib_dir = typeshed_dir / "stdlib"
modules = []
for path in stdlib_dir.rglob("*.pyi"):
if path.stem == "__init__":
path = path.parent
module = ".".join(path.relative_to(stdlib_dir).parts[:-1] + (path.stem,))
if module.split(".")[0] in packages:
modules.append(module)
return sorted(modules)
def get_allowlist_entries(allowlist_file: str) -> Iterator[str]:
def strip_comments(s: str) -> str:
try:
return s[: s.index("#")].strip()
except ValueError:
return s.strip()
with open(allowlist_file) as f:
for line in f.readlines():
entry = strip_comments(line)
if entry:
yield entry
def test_stubs(args: argparse.Namespace, use_builtins_fixtures: bool = False) -> int:
"""This is stubtest! It's time to test the stubs!"""
# Load the allowlist. This is a series of strings corresponding to Error.object_desc
# Values in the dict will store whether we used the allowlist entry or not.
allowlist = {
entry: False
for allowlist_file in args.allowlist
for entry in get_allowlist_entries(allowlist_file)
}
allowlist_regexes = {entry: re.compile(entry) for entry in allowlist}
# If we need to generate an allowlist, we store Error.object_desc for each error here.
generated_allowlist = set()
modules = args.modules
if args.check_typeshed:
assert not args.modules, "Cannot pass both --check-typeshed and a list of modules"
modules = get_typeshed_stdlib_modules(args.custom_typeshed_dir)
annoying_modules = {"antigravity", "this"}
modules = [m for m in modules if m not in annoying_modules]
assert modules, "No modules to check"
options = Options()
options.incremental = False
options.custom_typeshed_dir = args.custom_typeshed_dir
options.config_file = args.mypy_config_file
options.use_builtins_fixtures = use_builtins_fixtures
if options.config_file:
def set_strict_flags() -> None: # not needed yet
return
parse_config_file(options, set_strict_flags, options.config_file, sys.stdout, sys.stderr)
try:
modules = build_stubs(modules, options, find_submodules=not args.check_typeshed)
except RuntimeError:
return 1
exit_code = 0
for module in modules:
for error in test_module(module):
# Filter errors
if args.ignore_missing_stub and error.is_missing_stub():
continue
if args.ignore_positional_only and error.is_positional_only_related():
continue
if error.object_desc in allowlist:
allowlist[error.object_desc] = True
continue
is_allowlisted = False
for w in allowlist:
if allowlist_regexes[w].fullmatch(error.object_desc):
allowlist[w] = True
is_allowlisted = True
break
if is_allowlisted:
continue
# We have errors, so change exit code, and output whatever necessary
exit_code = 1
if args.generate_allowlist:
generated_allowlist.add(error.object_desc)
continue
print(error.get_description(concise=args.concise))
# Print unused allowlist entries
if not args.ignore_unused_allowlist:
for w in allowlist:
# Don't consider an entry unused if it regex-matches the empty string
# This lets us allowlist errors that don't manifest at all on some systems
if not allowlist[w] and not allowlist_regexes[w].fullmatch(""):
exit_code = 1
print("note: unused allowlist entry {}".format(w))
# Print the generated allowlist
if args.generate_allowlist:
for e in sorted(generated_allowlist):
print(e)
exit_code = 0
return exit_code
def parse_options(args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Compares stubs to objects introspected from the runtime."
)
parser.add_argument("modules", nargs="*", help="Modules to test")
parser.add_argument("--concise", action="store_true", help="Make output concise")
parser.add_argument(
"--ignore-missing-stub",
action="store_true",
help="Ignore errors for stub missing things that are present at runtime",
)
parser.add_argument(
"--ignore-positional-only",
action="store_true",
help="Ignore errors for whether an argument should or shouldn't be positional-only",
)
parser.add_argument(
"--custom-typeshed-dir", metavar="DIR", help="Use the custom typeshed in DIR"
)
parser.add_argument(
"--check-typeshed", action="store_true", help="Check all stdlib modules in typeshed"
)
parser.add_argument(
"--allowlist",
"--whitelist",
action="append",
metavar="FILE",
default=[],
help=(
"Use file as an allowlist. Can be passed multiple times to combine multiple "
"allowlists. Allowlists can be created with --generate-allowlist"
),
)
parser.add_argument(
"--generate-allowlist",
"--generate-whitelist",
action="store_true",
help="Print an allowlist (to stdout) to be used with --allowlist",
)
parser.add_argument(
"--ignore-unused-allowlist",
"--ignore-unused-whitelist",
action="store_true",
help="Ignore unused allowlist entries",
)
config_group = parser.add_argument_group(
title='mypy config file',
description="Use a config file instead of command line arguments. "
"Plugins and mypy path are the only supported "
"configurations.",
)
config_group.add_argument(
'--mypy-config-file',
help=(
"An existing mypy configuration file, currently used by stubtest to help "
"determine mypy path and plugins"
),
)
return parser.parse_args(args)
def main() -> int:
mypy.util.check_python_version("stubtest")
return test_stubs(parse_options(sys.argv[1:]))
if __name__ == "__main__":
sys.exit(main())
| 39.297233 | 99 | 0.634427 | [
"MIT"
] | HarisHijazi/mojarnik-server | venv/Lib/site-packages/mypy/stubtest.py | 49,711 | Python |
from __future__ import annotations
from datetime import (
datetime,
timedelta,
)
from typing import Hashable
import warnings
import numpy as np
from pandas._libs import (
index as libindex,
lib,
)
from pandas._libs.tslibs import (
BaseOffset,
NaT,
Period,
Resolution,
Tick,
)
from pandas._libs.tslibs.parsing import (
DateParseError,
parse_time_string,
)
from pandas._typing import (
Dtype,
DtypeObj,
)
from pandas.errors import InvalidIndexError
from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
is_datetime64_any_dtype,
is_integer,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.missing import is_valid_na_for_dtype
from pandas.core.arrays.period import (
PeriodArray,
period_array,
raise_on_incompatible,
validate_dtype_freq,
)
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.indexes.datetimes import (
DatetimeIndex,
Index,
)
from pandas.core.indexes.extension import inherit_names
from pandas.core.indexes.numeric import Int64Index
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update({"target_klass": "PeriodIndex or list of Periods"})
_shared_doc_kwargs = {
"klass": "PeriodArray",
}
# --- Period index sketch
def _new_PeriodIndex(cls, **d):
# GH13277 for unpickling
values = d.pop("data")
if values.dtype == "int64":
freq = d.pop("freq", None)
values = PeriodArray(values, freq=freq)
return cls._simple_new(values, **d)
else:
return cls(values, **d)
@inherit_names(
["strftime", "start_time", "end_time"] + PeriodArray._field_ops,
PeriodArray,
wrap=True,
)
@inherit_names(["is_leap_year", "_format_native_types"], PeriodArray)
class PeriodIndex(DatetimeIndexOpsMixin):
"""
Immutable ndarray holding ordinal values indicating regular periods in time.
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1d int np.ndarray or PeriodArray), optional
Optional period-like data to construct index with.
copy : bool
Make a copy of input ndarray.
freq : str or period object, optional
One of pandas period strings or corresponding objects.
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
dtype : str or PeriodDtype, default None
Attributes
----------
day
dayofweek
day_of_week
dayofyear
day_of_year
days_in_month
daysinmonth
end_time
freq
freqstr
hour
is_leap_year
minute
month
quarter
qyear
second
start_time
week
weekday
weekofyear
year
Methods
-------
asfreq
strftime
to_timestamp
See Also
--------
Index : The base pandas Index type.
Period : Represents a period of time.
DatetimeIndex : Index with datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
period_range : Create a fixed-frequency PeriodIndex.
Examples
--------
>>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3])
>>> idx
PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')
"""
_typ = "periodindex"
_attributes = ["name"]
_data: PeriodArray
freq: BaseOffset
_data_cls = PeriodArray
_engine_type = libindex.PeriodEngine
_supports_partial_string_indexing = True
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in Index
# These are defined here instead of via inherit_names for mypy
@doc(
PeriodArray.asfreq,
other="pandas.arrays.PeriodArray",
other_name="PeriodArray",
**_shared_doc_kwargs,
)
def asfreq(self, freq=None, how: str = "E") -> PeriodIndex:
arr = self._data.asfreq(freq, how)
return type(self)._simple_new(arr, name=self.name)
@doc(PeriodArray.to_timestamp)
def to_timestamp(self, freq=None, how="start") -> DatetimeIndex:
arr = self._data.to_timestamp(freq, how)
return DatetimeIndex._simple_new(arr, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.hour.fget)
def hour(self) -> Int64Index:
return Int64Index(self._data.hour, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.minute.fget)
def minute(self) -> Int64Index:
return Int64Index(self._data.minute, name=self.name)
# https://github.com/python/mypy/issues/1362
# error: Decorated property not supported
@property # type:ignore[misc]
@doc(PeriodArray.second.fget)
def second(self) -> Int64Index:
return Int64Index(self._data.second, name=self.name)
# ------------------------------------------------------------------------
# Index Constructors
def __new__(
cls,
data=None,
ordinal=None,
freq=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
**fields,
) -> PeriodIndex:
valid_field_set = {
"year",
"month",
"day",
"quarter",
"hour",
"minute",
"second",
}
if not set(fields).issubset(valid_field_set):
argument = list(set(fields) - valid_field_set)[0]
raise TypeError(f"__new__() got an unexpected keyword argument {argument}")
name = maybe_extract_name(name, data, cls)
if data is None and ordinal is None:
# range-based.
data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields)
# PeriodArray._generate range does validation that fields is
# empty when really using the range-based constructor.
freq = freq2
data = PeriodArray(data, freq=freq)
else:
freq = validate_dtype_freq(dtype, freq)
# PeriodIndex allow PeriodIndex(period_index, freq=different)
# Let's not encourage that kind of behavior in PeriodArray.
if freq and isinstance(data, cls) and data.freq != freq:
# TODO: We can do some of these with no-copy / coercion?
# e.g. D -> 2D seems to be OK
data = data.asfreq(freq)
if data is None and ordinal is not None:
# we strangely ignore `ordinal` if data is passed.
ordinal = np.asarray(ordinal, dtype=np.int64)
data = PeriodArray(ordinal, freq=freq)
else:
# don't pass copy here, since we copy later.
data = period_array(data=data, freq=freq)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
# ------------------------------------------------------------------------
# Data
@property
def values(self) -> np.ndarray:
return np.asarray(self, dtype=object)
def _maybe_convert_timedelta(self, other):
"""
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
"""
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
if isinstance(self.freq, Tick):
# _check_timedeltalike_freq_compat will raise if incompatible
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, BaseOffset):
if other.base == self.freq.base:
return other.n
raise raise_on_incompatible(self, other)
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
raise raise_on_incompatible(self, None)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if not isinstance(dtype, PeriodDtype):
return False
return dtype.freq == self.freq
# ------------------------------------------------------------------------
# Index Methods
def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray:
"""
where : array of timestamps
mask : np.ndarray[bool]
Array of booleans where data is not NA.
"""
if isinstance(where, DatetimeIndex):
where = PeriodIndex(where._values, freq=self.freq)
elif not isinstance(where, PeriodIndex):
raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")
return super().asof_locs(where, mask)
@doc(Index.astype)
def astype(self, dtype, copy: bool = True, how=lib.no_default):
dtype = pandas_dtype(dtype)
if how is not lib.no_default:
# GH#37982
warnings.warn(
"The 'how' keyword in PeriodIndex.astype is deprecated and "
"will be removed in a future version. "
"Use index.to_timestamp(how=how) instead",
FutureWarning,
stacklevel=2,
)
else:
how = "start"
if is_datetime64_any_dtype(dtype):
# 'how' is index-specific, isn't part of the EA interface.
tz = getattr(dtype, "tz", None)
return self.to_timestamp(how=how).tz_localize(tz)
return super().astype(dtype, copy=copy)
@property
def is_full(self) -> bool:
"""
Returns True if this PeriodIndex is range-like in that all Periods
between start and end are present, in order.
"""
if len(self) == 0:
return True
if not self.is_monotonic_increasing:
raise ValueError("Index is not monotonic")
values = self.asi8
return ((values[1:] - values[:-1]) < 2).all()
@property
def inferred_type(self) -> str:
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return "period"
# ------------------------------------------------------------------------
# Indexing Methods
def _convert_tolerance(self, tolerance, target):
# Returned tolerance must be in dtype/units so that
# `|self._get_engine_target() - target._engine_target()| <= tolerance`
# is meaningful. Since PeriodIndex returns int64 for engine_target,
# we may need to convert timedelta64 tolerance to int64.
tolerance = super()._convert_tolerance(tolerance, target)
if self.dtype == target.dtype:
# convert tolerance to i8
tolerance = self._maybe_convert_timedelta(tolerance)
return tolerance
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label.
Parameters
----------
key : Period, NaT, str, or datetime
String or datetime key must be parsable as Period.
Returns
-------
loc : int or ndarray[int64]
Raises
------
KeyError
Key is not present in the index.
TypeError
If key is listlike or otherwise not hashable.
"""
orig_key = key
if not is_scalar(key):
raise InvalidIndexError(key)
if is_valid_na_for_dtype(key, self.dtype):
key = NaT
elif isinstance(key, str):
try:
loc = self._get_string_slice(key)
return loc
except (TypeError, ValueError):
pass
try:
asdt, reso_str = parse_time_string(key, self.freq)
except (ValueError, DateParseError) as err:
# A string with invalid format
raise KeyError(f"Cannot interpret '{key}' as period") from err
reso = Resolution.from_attrname(reso_str)
grp = reso.freq_group.value
freqn = self.dtype.freq_group_code
# _get_string_slice will handle cases where grp < freqn
assert grp >= freqn
# BusinessDay is a bit strange. It has a *lower* code, but we never parse
# a string as "BusinessDay" resolution, just Day.
if grp == freqn or (
reso == Resolution.RESO_DAY and self.dtype.freq.name == "B"
):
key = Period(asdt, freq=self.freq)
loc = self.get_loc(key, method=method, tolerance=tolerance)
return loc
elif method is None:
raise KeyError(key)
else:
key = asdt
elif isinstance(key, Period):
sfreq = self.freq
kfreq = key.freq
if not (
sfreq.n == kfreq.n
and sfreq._period_dtype_code == kfreq._period_dtype_code
):
# GH#42247 For the subset of DateOffsets that can be Period freqs,
# checking these two attributes is sufficient to check equality,
# and much more performant than `self.freq == key.freq`
raise KeyError(key)
elif isinstance(key, datetime):
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
else:
# in particular integer, which Period constructor would cast to string
raise KeyError(key)
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'}, or None
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem", None, lib.no_default]
self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound")
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, str):
try:
parsed, reso_str = parse_time_string(label, self.freq)
except ValueError as err:
# string cannot be parsed as datetime-like
raise self._invalid_indexer("slice", label) from err
reso = Resolution.from_attrname(reso_str)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
return lower if side == "left" else upper
elif not isinstance(label, self._data._recognized_scalars):
raise self._invalid_indexer("slice", label)
return label
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
grp = reso.freq_group
iv = Period(parsed, freq=grp.value)
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
def _validate_partial_date_slice(self, reso: Resolution):
assert isinstance(reso, Resolution), (type(reso), reso)
grp = reso.freq_group
freqn = self.dtype.freq_group_code
if not grp.value < freqn:
# TODO: we used to also check for
# reso in ["day", "hour", "minute", "second"]
# why is that check not needed?
raise ValueError
def _get_string_slice(self, key: str):
parsed, reso_str = parse_time_string(key, self.freq)
reso = Resolution.from_attrname(reso_str)
try:
return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
def period_range(
start=None, end=None, periods: int | None = None, freq=None, name=None
) -> PeriodIndex:
"""
Return a fixed frequency PeriodIndex.
The day (calendar) is the default frequency.
Parameters
----------
start : str or period-like, default None
Left bound for generating periods.
end : str or period-like, default None
Right bound for generating periods.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, optional
Frequency alias. By default the freq is taken from `start` or `end`
if those are Period objects. Otherwise, the default is ``"D"`` for
daily frequency.
name : str, default None
Name of the resulting PeriodIndex.
Returns
-------
PeriodIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',
'2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',
'2018-01'],
dtype='period[M]')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
>>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
... end=pd.Period('2017Q2', freq='Q'), freq='M')
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]')
"""
if com.count_not_none(start, end, periods) != 2:
raise ValueError(
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)):
freq = "D"
data, freq = PeriodArray._generate_range(start, end, periods, freq, fields={})
data = PeriodArray(data, freq=freq)
return PeriodIndex(data, name=name)
| 32.029268 | 96 | 0.593207 | [
"MIT"
] | ATJWen/weather-app | env/Lib/site-packages/pandas/core/indexes/period.py | 19,698 | Python |
from __future__ import annotations
import re
from typing import Callable, ClassVar, List, Optional, Pattern, Sequence, Tuple, Union, cast
import discord
from discord.ext import commands
_ID_RE = re.compile(r"([0-9]{15,21})$")
_USER_MENTION_RE = re.compile(r"<@!?([0-9]{15,21})>$")
_CHAN_MENTION_RE = re.compile(r"<#([0-9]{15,21})>$")
_ROLE_MENTION_RE = re.compile(r"<@&([0-9]{15,21})>$")
class MessagePredicate(Callable[[discord.Message], bool]):
def __init__(self, predicate: Callable[["MessagePredicate", discord.Message], bool]) -> None:
self._pred: Callable[["MessagePredicate", discord.Message], bool] = predicate
self.result = None
def __call__(self, message: discord.Message) -> bool:
return self._pred(self, message)
@classmethod
def same_context(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
if ctx is not None:
channel = channel or ctx.channel
user = user or ctx.author
return cls(
lambda self, m: (user is None or user.id == m.author.id)
and (channel is None or channel.id == m.channel.id)
)
@classmethod
def cancelled(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
return cls(
lambda self, m: (same_context(m) and m.content.lower() == f"{ctx.prefix}cancel")
)
@classmethod
def yes_or_no(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
content = m.content.lower()
if content in ("yes", "y"):
self.result = True
elif content in ("no", "n"):
self.result = False
else:
return False
return True
return cls(predicate)
@classmethod
def valid_int(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = int(m.content)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def valid_float(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = float(m.content)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def positive(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
number = float(m.content)
except ValueError:
return False
else:
if number > 0:
self.result = number
return True
else:
return False
return cls(predicate)
@classmethod
def valid_role(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
role = self._find_role(guild, m.content)
if role is None:
return False
self.result = role
return True
return cls(predicate)
@classmethod
def valid_member(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
match = _ID_RE.match(m.content) or _USER_MENTION_RE.match(m.content)
if match:
result = guild.get_member(int(match.group(1)))
else:
result = guild.get_member_named(m.content)
if result is None:
return False
self.result = result
return True
return cls(predicate)
@classmethod
def valid_text_channel(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
match = _ID_RE.match(m.content) or _CHAN_MENTION_RE.match(m.content)
if match:
result = guild.get_channel(int(match.group(1)))
else:
result = discord.utils.get(guild.text_channels, name=m.content)
if not isinstance(result, discord.TextChannel):
return False
self.result = result
return True
return cls(predicate)
@classmethod
def has_role(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
if user is None:
if ctx is None:
raise TypeError(
"One of `user` or `ctx` must be supplied to `MessagePredicate.has_role`."
)
user = ctx.author
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
role = self._find_role(guild, m.content)
if role is None or role not in user.roles:
return False
self.result = role
return True
return cls(predicate)
@classmethod
def equal_to(
cls,
value: str,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and m.content == value)
@classmethod
def lower_equal_to(
cls,
value: str,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and m.content.lower() == value)
@classmethod
def less(
cls,
value: Union[int, float],
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
valid_int = cls.valid_int(ctx, channel, user)
valid_float = cls.valid_float(ctx, channel, user)
return cls(lambda self, m: (valid_int(m) or valid_float(m)) and float(m.content) < value)
@classmethod
def greater(
cls,
value: Union[int, float],
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
valid_int = cls.valid_int(ctx, channel, user)
valid_float = cls.valid_float(ctx, channel, user)
return cls(lambda self, m: (valid_int(m) or valid_float(m)) and float(m.content) > value)
@classmethod
def length_less(
cls,
length: int,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and len(m.content) <= length)
@classmethod
def length_greater(
cls,
length: int,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and len(m.content) >= length)
@classmethod
def contained_in(
cls,
collection: Sequence[str],
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = collection.index(m.content)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def lower_contained_in(
cls,
collection: Sequence[str],
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = collection.index(m.content.lower())
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def regex(
cls,
pattern: Union[Pattern[str], str],
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
if isinstance(pattern, str):
pattern_obj = re.compile(pattern)
else:
pattern_obj = pattern
match = pattern_obj.search(m.content)
if match:
self.result = match
return True
return False
return cls(predicate)
@staticmethod
def _find_role(guild: discord.Guild, argument: str) -> Optional[discord.Role]:
match = _ID_RE.match(argument) or _ROLE_MENTION_RE.match(argument)
if match:
result = guild.get_role(int(match.group(1)))
else:
result = discord.utils.get(guild.roles, name=argument)
return result
@staticmethod
def _get_guild(
ctx: commands.Context, channel: discord.TextChannel, user: discord.Member
) -> discord.Guild:
if ctx is not None:
return ctx.guild
elif channel is not None:
return channel.guild
elif user is not None:
return user.guild
class ReactionPredicate(Callable[[discord.Reaction, discord.abc.User], bool]):
YES_OR_NO_EMOJIS: ClassVar[Tuple[str, str]] = (
"\N{WHITE HEAVY CHECK MARK}",
"\N{NEGATIVE SQUARED CROSS MARK}",
)
"""Tuple[str, str] : A tuple containing the tick emoji and cross emoji, in that order."""
ALPHABET_EMOJIS: ClassVar[List[str]] = [
chr(code)
for code in range(
ord("\N{REGIONAL INDICATOR SYMBOL LETTER A}"),
ord("\N{REGIONAL INDICATOR SYMBOL LETTER Z}") + 1,
)
]
"""List[str] : A list of all 26 alphabetical letter emojis."""
NUMBER_EMOJIS: ClassVar[List[str]] = [
chr(code) + "\N{COMBINING ENCLOSING KEYCAP}" for code in range(ord("0"), ord("9") + 1)
]
"""List[str] : A list of all single-digit number emojis, 0 through 9."""
def __init__(
self, predicate: Callable[["ReactionPredicate", discord.Reaction, discord.abc.User], bool]
) -> None:
self._pred: Callable[
["ReactionPredicate", discord.Reaction, discord.abc.User], bool
] = predicate
self.result = None
def __call__(self, reaction: discord.Reaction, user: discord.abc.User) -> bool:
return self._pred(self, reaction, user)
# noinspection PyUnusedLocal
@classmethod
def same_context(
cls, message: Optional[discord.Message] = None, user: Optional[discord.abc.User] = None
) -> "ReactionPredicate":
# noinspection PyProtectedMember
me_id = message._state.self_id
return cls(
lambda self, r, u: u.id != me_id
and (message is None or r.message.id == message.id)
and (user is None or u.id == user.id)
)
@classmethod
def with_emojis(
cls,
emojis: Sequence[Union[str, discord.Emoji, discord.PartialEmoji]],
message: Optional[discord.Message] = None,
user: Optional[discord.abc.User] = None,
) -> "ReactionPredicate":
same_context = cls.same_context(message, user)
def predicate(self: ReactionPredicate, r: discord.Reaction, u: discord.abc.User):
if not same_context(r, u):
return False
try:
self.result = emojis.index(r.emoji)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def yes_or_no(
cls, message: Optional[discord.Message] = None, user: Optional[discord.abc.User] = None
) -> "ReactionPredicate":
same_context = cls.same_context(message, user)
def predicate(self: ReactionPredicate, r: discord.Reaction, u: discord.abc.User) -> bool:
if not same_context(r, u):
return False
try:
self.result = not bool(self.YES_OR_NO_EMOJIS.index(r.emoji))
except ValueError:
return False
else:
return True
return cls(predicate)
| 33.072727 | 98 | 0.580233 | [
"MIT"
] | Codin-Nerds/HotWired-Bot | bot/utils/messagepredicate.py | 16,371 | Python |
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
from tensorflow.contrib.tensor_forest.python import constants
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self, num_trees=100, max_nodes=10000, bagging_fraction=1.0,
max_depth=0, num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0, split_after_samples=250,
min_split_samples=5,
valid_leaf_threshold=1, **kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.max_depth = max_depth
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.min_split_samples = min_split_samples
self.valid_leaf_threshold = valid_leaf_threshold
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Allow each tree to be unbalanced by up to a factor of 2.
self.max_depth = (self.max_depth or
int(2 * math.ceil(math.log(self.max_nodes, 2))))
# The Random Forest literature recommends sqrt(# features) for
# classification problems, and p/3 for regression problems.
# TODO(thomaswc): Consider capping this for large number of features.
self.num_splits_to_consider = (
self.num_splits_to_consider or
max(10, int(math.ceil(math.sqrt(self.num_features)))))
# max_fertile_nodes doesn't effect performance, only training speed.
# We therefore set it primarily based upon space considerations.
# Each fertile node takes up num_splits_to_consider times as much
# as space as a non-fertile node. We want the fertile nodes to in
# total only take up as much space as the non-fertile nodes, so
num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))
# But always use at least 1000 accumulate slots.
num_fertile = max(num_fertile, 1000)
self.max_fertile_nodes = self.max_fertile_nodes or num_fertile
# But it also never needs to be larger than the number of leaves,
# which is max_nodes / 2.
self.max_fertile_nodes = min(self.max_fertile_nodes,
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = variable_scope.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32,
shape=[params.max_nodes, 2],
initializer=init_ops.constant_initializer(-2))
self.tree_thresholds = variable_scope.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=init_ops.constant_initializer(-1.0))
self.tree_depths = variable_scope.get_variable(
name=self.get_tree_name('tree_depths', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(1))
self.end_of_tree = variable_scope.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=dtypes.int32,
initializer=constant_op.constant([1]))
self.start_epoch = tf_variables.Variable(
[0] * (params.max_nodes), name='start_epoch')
if training:
self.node_to_accumulator_map = variable_scope.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_features = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_thresholds = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=init_ops.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = variable_scope.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
if training:
self.candidate_split_sums = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_sums = variable_scope.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = variable_scope.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.candidate_split_squares = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_squares = variable_scope.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
else:
self.node_squares = constant_op.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = constant_op.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = constant_op.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
for i in range(params.num_trees):
with ops.device(device_assigner.get_device(i)):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestDeviceAssigner(object):
"""A device assigner that uses the default device.
Write subclasses that implement get_device for control over how trees
get assigned to devices. This assumes that whole trees are assigned
to a device.
"""
def __init__(self):
self.cached = None
def get_device(self, unused_tree_num):
if not self.cached:
dummy = constant_op.constant(0)
self.cached = dummy.device
return self.cached
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self, params, device_assigner=None,
variables=None, tree_variables_class=TreeTrainingVariables,
tree_graphs=None, training=True,
t_ops=training_ops,
i_ops=inference_ops):
self.params = params
self.device_assigner = device_assigner or RandomForestDeviceAssigner()
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(
self.variables[i], self.params,
t_ops.Load(), i_ops.Load(), i)
for i in range(self.params.num_trees)]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(1, self.params.num_features, input_data)
return array_ops.concat(
1, [split_data[ind] for ind in self.params.bagged_features[tree_num]])
def training_graph(self, input_data, input_labels, data_spec=None,
epoch=None, **tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(
self.trees[i].training_graph(
tree_data, tree_labels, seed, data_spec=data_spec,
epoch=([0] if epoch is None else epoch),
**tree_kwargs))
return control_flow_ops.group(*tree_graphs)
def inference_graph(self, input_data, data_spec=None):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random forest inference graph.
"""
data_spec = ([constants.DATA_FLOAT] * self.params.num_features
if data_spec is None else data_spec)
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(tree_data,
data_spec))
with ops.device(self.device_assigner.get_device(0)):
all_predict = array_ops.pack(probabilities)
return math_ops.reduce_sum(all_predict, 0) / self.params.num_trees
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(array_ops.pack(sizes))
def training_loss(self):
return math_ops.neg(self.average_size())
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.neg(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.pack(impurities))
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, t_ops, i_ops, tree_num):
self.training_ops = t_ops
self.inference_ops = i_ops
self.variables = variables
self.params = params
self.tree_num = tree_num
def tree_initialization(self):
def _init_tree():
return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op
def _nothing():
return control_flow_ops.no_op()
return control_flow_ops.cond(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [1, 1])), -2),
_init_tree, _nothing)
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = array_ops.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def training_graph(self, input_data, input_labels, random_seed,
data_spec, epoch=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A list of tf.dtype values specifying the original types of
each column.
epoch: A tensor or placeholder for the epoch the training data comes from.
Returns:
The last op in the random tree training graph.
"""
epoch = [0] if epoch is None else epoch
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums,
splits_squares, totals_indices, totals_sums,
totals_squares, input_leaves) = (
self.training_ops.count_extremely_random_stats(
input_data, sparse_indices, sparse_values, sparse_shape,
data_spec, input_labels, self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
self.variables.start_epoch, epoch,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_sums, totals_indices,
totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_squares,
splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_squares, totals_indices,
totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
self.training_ops.sample_inputs(
input_data, sparse_indices, sparse_values, sparse_shape,
self.variables.node_to_accumulator_map,
input_leaves, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = state_ops.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = state_ops.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with ops.control_dependencies(splits_update_ops):
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
finished, stale = self.training_ops.finished_nodes(
leaves, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
self.variables.start_epoch, epoch,
num_split_after_samples=self.params.split_after_samples,
min_split_samples=self.params.min_split_samples)
# Update leaf scores.
non_fertile_leaves = array_ops.boolean_mask(
leaves, math_ops.less(array_ops.gather(
self.variables.node_to_accumulator_map, leaves), 0))
# TODO(gilberth): It should be possible to limit the number of non
# fertile leaves we calculate scores for, especially since we can only take
# at most array_ops.shape(finished)[0] of them.
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares,
non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
# Calculate best splits.
with ops.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(
finished, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with ops.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates,
tree_threshold_updates, tree_depth_updates, new_eot) = (
self.training_ops.grow_tree(
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.node_to_accumulator_map, finished, split_indices,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = state_ops.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
depth_update_op = state_ops.scatter_update(
self.variables.tree_depths, tree_update_indices, tree_depth_updates)
# TODO(thomaswc): Only update the epoch on the new leaves.
new_epoch_updates = epoch * array_ops.ones_like(tree_depth_updates)
epoch_update_op = state_ops.scatter_update(
self.variables.start_epoch, tree_update_indices,
new_epoch_updates)
# Update fertile slots.
with ops.control_dependencies([depth_update_op]):
(node_map_updates, accumulators_cleared, accumulators_allocated) = (
self.training_ops.update_fertile_slots(
finished, non_fertile_leaves,
non_fertile_leaf_scores,
self.variables.end_of_tree, self.variables.tree_depths,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
stale,
max_depth=self.params.max_depth,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
gated_new_eot, = control_flow_ops.tuple([new_eot],
control_inputs=[node_map_updates])
eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(state_ops.scatter_update(
self.variables.node_to_accumulator_map,
array_ops.squeeze(array_ops.slice(node_map_updates, [0, 0], [1, -1]),
squeeze_dims=[0]),
array_ops.squeeze(array_ops.slice(node_map_updates, [1, 0], [1, -1]),
squeeze_dims=[0])))
cleared_and_allocated_accumulators = array_ops.concat(
0, [accumulators_cleared, accumulators_allocated])
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = array_ops.tile(
array_ops.expand_dims(array_ops.expand_dims(
array_ops.zeros_like(cleared_and_allocated_accumulators,
dtype=dtypes.float32), 1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(accumulators_cleared,
dtype=dtypes.float32)), 1),
[1, self.params.num_output_columns])
total_reset = array_ops.tile(
array_ops.expand_dims(
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = array_ops.concat(0, [total_cleared, total_reset])
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(
cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates)
def finish_iteration(self):
"""Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations.
"""
return []
def inference_graph(self, input_data, data_spec):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random tree inference graph.
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, ops.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.shape
input_data = []
return self.inference_ops.tree_predictions(
input_data, sparse_indices, sparse_values, sparse_shape, data_spec,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_sums,
valid_leaf_threshold=self.params.valid_leaf_threshold)
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = array_ops.where(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
| 41.420616 | 80 | 0.696788 | [
"Apache-2.0"
] | AdityaPai2398/tensorflow | tensorflow/contrib/tensor_forest/python/tensor_forest.py | 34,959 | Python |
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class NeutronCreateFloatingIpRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'floatingip': 'CreateFloatingIpOption'
}
attribute_map = {
'floatingip': 'floatingip'
}
def __init__(self, floatingip=None):
"""NeutronCreateFloatingIpRequestBody - a model defined in huaweicloud sdk"""
self._floatingip = None
self.discriminator = None
self.floatingip = floatingip
@property
def floatingip(self):
"""Gets the floatingip of this NeutronCreateFloatingIpRequestBody.
:return: The floatingip of this NeutronCreateFloatingIpRequestBody.
:rtype: CreateFloatingIpOption
"""
return self._floatingip
@floatingip.setter
def floatingip(self, floatingip):
"""Sets the floatingip of this NeutronCreateFloatingIpRequestBody.
:param floatingip: The floatingip of this NeutronCreateFloatingIpRequestBody.
:type: CreateFloatingIpOption
"""
self._floatingip = floatingip
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NeutronCreateFloatingIpRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.8125 | 85 | 0.575281 | [
"Apache-2.0"
] | huaweicloud/huaweicloud-sdk-python-v3 | huaweicloud-sdk-eip/huaweicloudsdkeip/v2/model/neutron_create_floating_ip_request_body.py | 3,115 | Python |
#!/usr/bin/env python
# Copyright (c) 2020, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
log_forwarding_profile.py
==========================
Ensure that all security rules have the same log forwarding profile assigned.
This script checks if any rules are missing the specified log forwarding profile
and applies the profile if it is missing. This is done with as few API calls as
possible.
Environment variables required:
PAN_HOSTNAME: The hostname or IP of the Firewall
PAN_USERNAME: The username of a firewall admin
PAN_PASSWORD: The password of a firewall admin
PAN_LOG_PROFILE: The name of the log forwarding profile to apply
"""
import os
from panos.firewall import Firewall
from panos.policies import Rulebase, SecurityRule
HOSTNAME = os.environ["PAN_HOSTNAME"]
USERNAME = os.environ["PAN_USERNAME"]
PASSWORD = os.environ["PAN_PASSWORD"]
LOG_PROFILE = os.environ["PAN_LOG_PROFILE"]
def main():
# Create a connection to a firewall and a rulebase to work inside
fw = Firewall(HOSTNAME, USERNAME, PASSWORD)
rulebase = fw.add(Rulebase())
# Fetch all the security rules from the firewall into a list
rules = SecurityRule.refreshall(rulebase, add=False)
print(f"Checking {len(rules)} rules...")
# Iterate over the list and collect names of rules that are
# missing the log forwarding profile
for rule in rules:
if rule.log_setting != LOG_PROFILE:
print(f"{rule.name}")
rule.log_setting = LOG_PROFILE
rule.log_start = 0
rule.log_end = 1
rule.apply()
# At this point, we've added SecurityRule objects to the Firewall
# for each rule that doesn't have the right log forwarding profile.
# Now, trigger a commit
# In this case, we'll wait for the commit to finish and trigger an exception
# if the commit finished with any errors.
print("Starting commit")
fw.commit(sync=True, exception=True)
print("Commit finished successfully")
if __name__ == "__main__":
main()
| 34.493671 | 80 | 0.728073 | [
"ISC"
] | AnthoBalitrand/pan-os-python | examples/log_forwarding_profile.py | 2,725 | Python |
from mtree.tests.fixtures.generator import ADD, REMOVE, QUERY
"""
actions = '16a16r16a16r'
dimensions = 2
remove_chance = 0.1
"""
DIMENSIONS = 2
def PERFORM(callback):
callback(ADD((56, 1), QUERY((64, 81), 51.97930568373785, 1)))
callback(ADD((87, 41), QUERY((52, 7), 30.190749870740614, 5)))
callback(ADD((88, 28), QUERY((17, 35), 67.08029578831348, 8)))
callback(ADD((96, 12), QUERY((68, 33), 74.63129337618862, 7)))
callback(ADD((77, 28), QUERY((69, 19), 71.44090699195228, 2)))
callback(ADD((85, 13), QUERY((13, 68), 49.40532609344623, 9)))
callback(ADD((89, 19), QUERY((16, 45), 8.029243653157412, 1)))
callback(ADD((46, 36), QUERY((53, 7), 63.75551369709334, 13)))
callback(ADD((28, 79), QUERY((34, 21), 26.456815885225716, 7)))
callback(ADD((17, 5), QUERY((70, 7), 54.46473535481833, 7)))
callback(ADD((11, 46), QUERY((64, 73), 68.97573608900954, 9)))
callback(ADD((68, 17), QUERY((14, 92), 25.839497640736255, 14)))
callback(ADD((95, 76), QUERY((65, 2), 60.34034160799483, 8)))
callback(ADD((51, 19), QUERY((50, 21), 60.21071456061972, 6)))
callback(ADD((81, 11), QUERY((72, 64), 21.04820410609335, 17)))
callback(ADD((60, 16), QUERY((78, 22), 34.206961896855105, 17)))
callback(REMOVE((11, 46), QUERY((78, 45), 31.252374030599206, 22)))
callback(REMOVE((28, 79), QUERY((45, 58), 68.43253328646372, 3)))
callback(REMOVE((89, 19), QUERY((47, 63), 14.99964801938054, 9)))
callback(REMOVE((87, 41), QUERY((67, 36), 62.19227908413332, 7)))
callback(REMOVE((56, 1), QUERY((14, 57), 4.440807680148424, 8)))
callback(REMOVE((51, 19), QUERY((25, 27), 7.651374923350458, 3)))
callback(REMOVE((96, 12), QUERY((51, 3), 67.32194556967877, 15)))
callback(REMOVE((60, 16), QUERY((54, 14), 3.837232462603879, 4)))
callback(REMOVE((77, 28), QUERY((1, 91), 34.38605910436729, 1)))
callback(REMOVE((68, 17), QUERY((47, 62), 18.341821615576404, 8)))
callback(REMOVE((95, 76), QUERY((59, 74), 24.077321729856862, 11)))
callback(REMOVE((88, 28), QUERY((92, 20), 56.955622608836656, 4)))
callback(REMOVE((46, 36), QUERY((39, 29), 62.89158647814607, 2)))
callback(REMOVE((81, 11), QUERY((98, 83), 18.288322424781704, 5)))
callback(REMOVE((17, 5), QUERY((42, 92), 56.769437012179324, 2)))
callback(REMOVE((85, 13), QUERY((26, 76), 48.28027186168586, 3)))
callback(ADD((91, 73), QUERY((76, 40), 34.020714910648344, 5)))
callback(ADD((39, 81), QUERY((30, 40), 29.155255079371738, 3)))
callback(ADD((48, 58), QUERY((79, 46), 13.066598526112276, 6)))
callback(ADD((56, 9), QUERY((37, 73), 43.30235362029393, 3)))
callback(ADD((62, 86), QUERY((100, 53), 52.66463458846595, 4)))
callback(ADD((68, 72), QUERY((36, 62), 49.45151195189656, 5)))
callback(ADD((24, 35), QUERY((43, 51), 13.339453943524173, 9)))
callback(ADD((88, 33), QUERY((63, 30), 76.58326004110971, 8)))
callback(ADD((75, 64), QUERY((25, 3), 1.1680470754746342, 2)))
callback(ADD((69, 56), QUERY((59, 87), 42.15278911997811, 9)))
callback(ADD((69, 57), QUERY((92, 30), 39.627621965620534, 4)))
callback(ADD((27, 65), QUERY((33, 88), 34.795324676392674, 4)))
callback(ADD((45, 40), QUERY((52, 54), 65.54197829034334, 14)))
callback(ADD((36, 57), QUERY((19, 76), 4.61911404860988, 13)))
callback(ADD((81, 52), QUERY((83, 38), 1.2237590444023105, 15)))
callback(ADD((83, 55), QUERY((98, 68), 74.0099200692048, 2)))
callback(REMOVE((68, 72), QUERY((0, 97), 69.99925382725154, 22)))
callback(REMOVE((69, 57), QUERY((33, 18), 37.627376098986545, 4)))
callback(REMOVE((24, 35), QUERY((9, 48), 20.959028967258604, 5)))
callback(REMOVE((75, 64), QUERY((46, 89), 5.3143035769238, 2)))
callback(REMOVE((56, 9), QUERY((88, 52), 28.372717502810147, 13)))
callback(REMOVE((81, 52), QUERY((79, 10), 76.63738416013686, 11)))
callback(REMOVE((27, 65), QUERY((50, 13), 47.384165011926896, 12)))
callback(REMOVE((62, 86), QUERY((57, 88), 39.67711841067303, 5)))
callback(REMOVE((36, 57), QUERY((2, 58), 25.435201121891573, 2)))
callback(REMOVE((91, 73), QUERY((83, 68), 27.478308852931708, 10)))
callback(REMOVE((45, 40), QUERY((47, 43), 26.549629815241815, 11)))
callback(REMOVE((88, 33), QUERY((53, 8), 27.959641649919114, 2)))
callback(REMOVE((39, 81), QUERY((93, 49), 20.972481057234063, 4)))
callback(REMOVE((83, 55), QUERY((100, 92), 36.24177843446844, 5)))
callback(REMOVE((69, 56), QUERY((14, 56), 14.701304755915015, 5)))
callback(REMOVE((48, 58), QUERY((60, 78), 40.380192469613306, 0)))
| 58.506667 | 68 | 0.654284 | [
"MIT"
] | erdavila/M-Tree | py/mtree/tests/fixtures/f16.py | 4,388 | Python |
#!/usr/bin/env python
# coding: utf-8
# ## sircat
#
# Makes a catalog of solar wind stream interaction regions (SIRs) and high speed solar wind streams (HSS) for the Wind, STEREO and MAVEN spacecraft since 2007.
#
# Authors: [C. Möstl](https://www.iwf.oeaw.ac.at/en/user-site/christian-moestl/) (twitter @chrisoutofspace), A. J. Weiss, R. L. Bailey, IWF Graz, Austria; Lan Jian, NASA, USA, Maxim Grandin, University of Helsinki, Finland; Hui Huang, Beijing University, China.
#
#
# **current status: work in progress**
#
# If you want to use parts of this code for generating results for peer-reviewed scientific publications, please contact us per email ([email protected], [email protected], [email protected]) for co-authorships.
#
#
# part of https://github.com/cmoestl/heliocats, last update June 2020
#
# ---
#
# ### Installation
# In a command line, do: "git clone https://github.com/cmoestl/heliocats".
#
# Install a specific conda environment to run this code, see README at https://github.com/cmoestl/heliocats
#
# Download the files from https://doi.org/10.6084/m9.figshare.11973693 and place them in the /data folder.
#
#
#
# ### Updates
#
# Adding a new SIR event: change the source files, or add the sir and hss times in section 2 before the master file sircat/HELIO4CAST_SIRCAT_v10_master.xlsx is produced. Then delete the file for the respective spacecraft under sircat/indices_sircat, and run this notebook or script.
#
# Convert this notebook to a script with "jupyter nbconvert --to script sircat.ipynb" in a command line
#
# ---
#
#
# ### Data sources
#
#
# **PSP SIR list**: Allen et al. 2021: https://www.aanda.org/articles/aa/full_html/2021/06/aa39833-20/aa39833-20.html, list at https://sppgway.jhuapl.edu/event_list
#
#
# **STEREO SIR list**: Lan Jian, https://stereo-ssc.nascom.nasa.gov/data/ins_data/impact/level3/
# published in: L. K. Jian et al. https://doi.org/10.1007/s11207-019-1416-8, 2019.
#
# This catalog contains the SIR start and end times, as well as the Pt max time for the stream interface. We use their SIR start and ends time as our *sir_start_time* and *sir_end_time*, and set the *hss_start_time* with the Pt max time. For 4 Pt max times that were nan in the Jian et al. list, the *hss_start_time* has been set similar to the *sir_end_time*.
#
# **To do**: create our own *hss_end_time* by setting it as the first time when the total bulk speed drops below 450 km/s after *sir_end_time*. Lan: For the STEREO HSS catalog, you can opt to list only the events with the fastest speed reaching at least 500 km/s, to be consistent with Grandin et al. (2019)."
#
#
# **Earth SIR/HSS list**: Maxim Grandin et al., 2018, https://doi.org/10.1029/2018JA026396
#
# This catalog directly gives the *hss_start_time* and the *hss_end_time*. This list was determined by an algorithm and there are no specifics about the the SIR times, instead the start time is determined as the start of the increasing speed and is thus is likely closer to an SIR start time than to a stream interface time, which we use as a *hss_start_time*. For simplicity, we have nevertheless taken the given start time as the hss_start_time.
# The times in the Earth SIR/HSS list have been modified to 1 hour earlier as these times were originally given for the magnetopause, but the Wind spacecraft is located at the L1 point. One hour is practically equivalent to the propagation time of a 400 km/s slow solar wind from the L1 point to the magnetopause.
#
# **To do**: In future updates, we may change hss_start_time to the sir_start_time and add a proper hss_start_time by searching for ptmax after a new sir_start_time. The Grandin et al. (2019) catalogue only contains events for which the solar wind speed reached at least 500 km/s. Lan: "For Grandin et al. (2019), you can use the peak of total pressure to approximate the stream interface time."
#
#
# **MARS SIR/HSS list**: Hui Huang et al., 2019, https://doi.org/10.3847/1538-4357/ab25e9 (open access not available).
# This catalog gives the sir_start_time, hss_start_time (=stream interface time) and the sir_end_time.
#
# **To do**: Similar to the STEREO-list, with have added the hss_end_time.
#
#
# All other parameters are calculated from scratch from the spacecraft data via this notebook or script.
#
# ---
#
# ### Other resourcess
#
#
# **Great review on SIRs** by Ian G. Richardson: https://link.springer.com/article/10.1007/s41116-017-0011-z
#
#
# ---
#
#
#
#
#
#
# start with importing packages, get paths from config.py file and make directories
# In[405]:
last_update='2021-July-13'
# In[11]:
import numpy as np
import scipy.io
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
from datetime import timedelta
import seaborn as sns
import datetime
import astropy
import astropy.constants as const
from sunpy.time import parse_time
import time
import pickle
import sys
import os
import urllib
import json
import importlib
import pandas as pd
import copy
import openpyxl
import h5py
from heliocats import plot as hp
importlib.reload(hp) #reload again while debugging
from heliocats import data as hd
importlib.reload(hd) #reload again while debugging
from heliocats import cats as hc
importlib.reload(hc) #reload again while debugging
from heliocats import stats as hs
importlib.reload(hs) #reload again while debugging
#where the in situ data files are located is read
#from config.py
import config
importlib.reload(config)
from config import data_path
from config import data_path_ML
########### make directories first time if not there
resdir='results'
if os.path.isdir(resdir) == False: os.mkdir(resdir)
datadir='data'
if os.path.isdir(datadir) == False: os.mkdir(datadir)
indexdir='sircat/indices_sircat'
if os.path.isdir(indexdir) == False: os.mkdir(indexdir)
catdir='sircat'
if os.path.isdir(catdir) == False: os.mkdir(catdir)
sirplotsdir='sircat/plots_sircat/'
if os.path.isdir(sirplotsdir) == False: os.mkdir(sirplotsdir)
#Convert this notebook to a script with jupyter nbconvert --to script icmecat.ipynb
os.system('jupyter nbconvert --to script sircat.ipynb')
#in situ data files are updated via the icmecat.ipynb notebook
## (1) load data
# ## (1) load data from STEREO-B, STEREO-A, Wind, PSP, and MAVEN
#
# In[2]:
load_data=1
if load_data > 0:
#print('load Ulysses RTN') #made with heliocats.data.save_ulysses_data
#fileuly='ulysses_1990_2009_rtn.p'
#[uly,huly]=pickle.load(open(data_path+fileuly, "rb" ) )
print('load STEREO-B data SCEQ') #yearly magplasma files from stereo science center, conversion to SCEQ
filestb='stereob_2007_2014_sceq.p'
[stb,hstb]=pickle.load(open(data_path+filestb, "rb" ) )
########### CURRENT ACTIVE SPACECRAFT
# ADD BepiColombo
# ADD Solar Orbiter
print('load MAVEN data MSO') #removed magnetosphere by C. Simon Wedlund, 1 data point per orbit, MSO
#filemav='maven_2014_2018.p'
#[mav,hmav]=pickle.load(open(filemav, 'rb' ) )
#filemav='maven_2014_2018_removed.p'
#[mav,hmav]=pickle.load(open(filemav, 'rb' ) )
filemav='maven_2014_2018_removed_smoothed.p'
[mav,hmav]=pickle.load(open(data_path+filemav, 'rb' ) )
#print('load MSL RAD')
#MSL RAD
#rad=hd.load_msl_rad()#, rad.time,rad.dose_sol
##############################################
print('load PSP data SCEQ') #from heliosat, converted to SCEQ similar to STEREO-A/B
filepsp='psp_2018_2021_sceq.p'
[psp,hpsp]=pickle.load(open(data_path+filepsp, "rb" ) )
########### STA
print('load and merge STEREO-A data SCEQ') #yearly magplasma files from stereo science center, conversion to SCEQ
filesta1='stereoa_2007_2020_sceq.p'
sta1=pickle.load(open(data_path+filesta1, "rb" ) )
#beacon data
#filesta2="stereoa_2019_2020_sceq_beacon.p"
#filesta2='stereoa_2019_2020_sept_sceq_beacon.p'
#filesta2='stereoa_2019_now_sceq_beacon.p'
#filesta2="stereoa_2020_august_november_sceq_beacon.p"
filesta2='stereoa_2020_now_sceq_beacon.p'
[sta2,hsta2]=pickle.load(open(data_path+filesta2, "rb" ) )
#cutoff with end of science data
sta2=sta2[np.where(sta2.time >= parse_time('2020-Aug-01 00:00').datetime)[0]]
#make array
sta=np.zeros(np.size(sta1.time)+np.size(sta2.time),dtype=[('time',object),('bx', float),('by', float), ('bz', float),('bt', float),('vt', float),('np', float),('tp', float), ('x', float),('y', float),('z', float), ('r', float),('lat', float),('lon', float)])
#convert to recarray
sta = sta.view(np.recarray)
sta.time=np.hstack((sta1.time,sta2.time))
sta.bx=np.hstack((sta1.bx,sta2.bx))
sta.by=np.hstack((sta1.by,sta2.by))
sta.bz=np.hstack((sta1.bz,sta2.bz))
sta.bt=np.hstack((sta1.bt,sta2.bt))
sta.vt=np.hstack((sta1.vt,sta2.vt))
sta.np=np.hstack((sta1.np,sta2.np))
sta.tp=np.hstack((sta1.tp,sta2.tp))
sta.x=np.hstack((sta1.x,sta2.x))
sta.y=np.hstack((sta1.y,sta2.y))
sta.z=np.hstack((sta1.z,sta2.z))
sta.r=np.hstack((sta1.r,sta2.r))
sta.lon=np.hstack((sta1.lon,sta2.lon))
sta.lat=np.hstack((sta1.lat,sta2.lat))
print('STA Merging done')
########### Wind
print('load and merge Wind data HEEQ')
#from HELCATS HEEQ until 2018 1 1 + new self-processed data with heliosat and hd.save_wind_data
filewin="wind_2007_2018_heeq_helcats.p"
[win1,hwin1]=pickle.load(open(data_path+filewin, "rb" ) )
filewin2="wind_2018_now_heeq.p"
[win2,hwin2]=pickle.load(open(data_path+filewin2, "rb" ) )
#function for spike removal, see list with times in that function
win2=hd.remove_wind_spikes_gaps(win2)
#merge Wind old and new data
#cut off HELCATS data at end of 2017, win2 begins exactly after this
win1=win1[np.where(win1.time < parse_time('2018-Jan-01 00:00').datetime)[0]]
#make array
win=np.zeros(np.size(win1.time)+np.size(win2.time),dtype=[('time',object),('bx', float),('by', float), ('bz', float),('bt', float),('vt', float),('np', float),('tp', float), ('x', float),('y', float),('z', float), ('r', float),('lat', float),('lon', float)])
#convert to recarray
win = win.view(np.recarray)
win.time=np.hstack((win1.time,win2.time))
win.bx=np.hstack((win1.bx,win2.bx))
win.by=np.hstack((win1.by,win2.by))
win.bz=np.hstack((win1.bz,win2.bz))
win.bt=np.hstack((win1.bt,win2.bt))
win.vt=np.hstack((win1.vt,win2.vt))
win.np=np.hstack((win1.np,win2.np))
win.tp=np.hstack((win1.tp,win2.tp))
win.x=np.hstack((win1.x,win2.x))
win.y=np.hstack((win1.y,win2.y))
win.z=np.hstack((win1.z,win2.z))
win.r=np.hstack((win1.r,win2.r))
win.lon=np.hstack((win1.lon,win2.lon))
win.lat=np.hstack((win1.lat,win2.lat))
print('Wind merging done')
print()
print()
print('time ranges of the in situ data: ')
print()
print('active spacecraft:')
print('Wind ',str(win.time[0])[0:10],str(win.time[-1])[0:10])
print('STEREO-A ',str(sta.time[0])[0:10],str(sta.time[-1])[0:10])
print('Parker Solar Probe ',str(psp.time[0])[0:10],str(psp.time[-1])[0:10])
print('MAVEN ',str(mav.time[0])[0:10],str(mav.time[-1])[0:10])
#print('MSL/RAD ',str(rad.time[0])[0:10],str(rad.time[-1])[0:10])
print()
print('missions finished:')
#print('VEX ',str(vex.time[0])[0:10],str(vex.time[-1])[0:10])
#print('MESSENGER ',str(mes.time[0])[0:10],str(mes.time[-1])[0:10])
print('STEREO-B ',str(stb.time[0])[0:10],str(stb.time[-1])[0:10])
#print('Ulysses ',str(uly.time[0])[0:10],str(uly.time[-1])[0:10])
print()
# print('catalogs:')
# print()
# print('HELCATS HIGeoCAT ',str(higeocat_time[0])[0:10],str(higeocat_time[-1])[0:10])
print('done')
# ## (2) make SIRCAT masterfile from STEREO and Wind catalogs
# Here we read raw STEREO SIR and Earth SIR catalogs from Robert Allen, Lan Jian, Maxim Grandin, and Hui Huang et al. and convert to master catalog xlsx file that contains all times in a consistent way.
# In[302]:
#make list for all basic times, ids etc. for master file
rows_list = []
def convert_time(p_time):
#from Allen catalog format to datetime object
p_time_obj=[]
for i in np.arange(0,len(p_time)):
p_str=p_time[i][0:10]+'T'+p_time[i][11:16]+'Z'
p_time_obj.append(parse_time(p_str).datetime)
#print(p_time_obj[i])
#dates with year 1 set to nan:
if mdates.date2num(p_time_obj[i])< 10: p_time_obj[i]=np.nan
return p_time_obj
#read all Allen catalogs
psp_sir_file='sircat/sources/SIR_CIR_List_PSP.csv'
psp_l1_sir_file='sircat/sources/SIR_CIR_List_L1_corr_PSP.csv'
psp_sta_sir_file='sircat/sources/SIR_CIR_List_STA_corr_PSP.csv'
#psp
p_raw=pd.read_csv(psp_sir_file, header=49)
#wind
pw_raw=pd.read_csv(psp_l1_sir_file, header=51)
#sta
pa_raw=pd.read_csv(psp_sta_sir_file, header=51)
print(p_raw.keys())
print()
#################################
############ PSP
print()
p_raw['Start time']=convert_time(p_raw['Start time'])
p_raw['End time']=convert_time(p_raw['End time'])
p_raw['Time of max P']=convert_time(p_raw['Time of max P'])
#print(p_raw['Start time'])
#print(p_raw['End time'])
#print(p_raw['Time of max P'])
for i in np.arange(0,len(p_raw)):
#make id for event
id_time=parse_time(p_raw['Start time'][i]).isot
sc_idstring='SIR_PSP_ALLEN_'
sc_string='PSP'
sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01'
#put all data for this event in a list
list1 = [sircat_id,sc_string,np.nan,parse_time(p_raw['Start time'][i]).isot, np.nan, parse_time(p_raw['End time'][i]).isot, np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan]
#print(list1)
#append to full list
rows_list.append(list1)
print(rows_list[1])
############ Wind
print()
pw_raw['Start time']=convert_time(pw_raw['Start time'])
pw_raw['End time']=convert_time(pw_raw['End time'])
pw_raw['Time of max P']=convert_time(pw_raw['Time of max P'])
#print(pw_raw['Start time'])
#print(pw_raw['End time'])
#print(pw_raw['Time of max P'])
for i in np.arange(0,len(pw_raw)):
#make id for event
id_time=parse_time(pw_raw['Start time'][i]).isot
sc_idstring='SIR_WIND_ALLEN_'
sc_string='Wind'
sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01'
#put all data for this event in a list
list2 = [sircat_id,sc_string,np.nan,parse_time(pw_raw['Start time'][i]).isot, np.nan, parse_time(pw_raw['End time'][i]).isot, np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan]
#print(list1)
#append to full list
rows_list.append(list2)
print(rows_list[-1])
############STA
print()
pa_raw['Start time']=convert_time(pa_raw['Start time'])
pa_raw['End time']=convert_time(pa_raw['End time'])
pa_raw['Time of max P']=convert_time(pa_raw['Time of max P'])
#print(pa_raw['Start time'])
#print(pa_raw['End time'])
#print(pa_raw['Time of max P'])
for i in np.arange(0,len(pa_raw)):
#make id for event
id_time=parse_time(pa_raw['Start time'][i]).isot
sc_idstring='SIR_STEREO_A_ALLEN_'
sc_string='STEREO-A'
sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01'
#put all data for this event in a list
list3 = [sircat_id,sc_string,np.nan,parse_time(pa_raw['Start time'][i]).isot, np.nan, parse_time(pa_raw['End time'][i]).isot, np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan]
#print(list1)
#append to full list
rows_list.append(list3)
print(rows_list[-1])
#
#pw_raw['Start time']
#ptime=parse_time(p_raw['Start time']).datetime
###################### read raw STEREO SIR catalog
file='sircat/sources/STEREO_Level3_SIR_data.xlsx'
print('load Jian STEREO catalog from excel file:', file)
sraw=pd.read_excel(file)
#get 2 times: HSS start (equivalent to SIR start as defined in the L. Jian catalog), HSS end (where speed again < 450km/s)
print('Events in STEREO SIR cat:', sraw.shape[0])
print()
sc=sraw.loc[:,'spacecraft']
year_start=sraw.loc[:,'year_start']
stime=sraw.loc[:,'start_time']
year_end=sraw.loc[:,'year_end']
etime=sraw.loc[:,'end_time']
year_pt=sraw.loc[:,'year_pt']
ptime=sraw.loc[:,'pt_time']
for i in np.arange(0,sraw.shape[0]):
s=stime[i]
y=year_start[i]
doy=int(s[0:3])
hour=int(s[-5:-3])
minute=int(s[-2:])
#print(y,doy,hour, min)
sir_start_time=datetime.datetime(y,1,1)+timedelta(days=doy-1)+timedelta(hours=hour)+timedelta(minutes=minute)
e=etime[i]
y=year_end[i]
doy=int(e[0:3])
hour=int(e[-5:-3])
minute=int(e[-2:])
#print(y,doy,hour, min)
sir_end_time=datetime.datetime(y,1,1)+timedelta(days=doy-1)+timedelta(hours=hour)+timedelta(minutes=minute)
#print(i)
p=ptime[i]
#print(ptime[i])
y=year_pt[i]
doy=int(p[0:3])
hour=int(p[-5:-3])
minute=int(p[-2:])
#print(y,doy,hour, min)
hss_start_time=datetime.datetime(y,1,1)+timedelta(days=doy-1)+timedelta(hours=hour)+timedelta(minutes=minute)
#make id for event
id_time=parse_time(hss_start_time).isot
if sc[i]=='A': sc_idstring='SIR_STEREO_A_JIAN_'
if sc[i]=='B': sc_idstring='SIR_STEREO_B_JIAN_'
if sc[i]=='A': sc_string='STEREO-A'
if sc[i]=='B': sc_string='STEREO-B'
sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01'
#put all data for this event in a list
list4 = [sircat_id,sc_string,parse_time(sir_start_time).isot,parse_time(hss_start_time).isot, parse_time(sir_end_time).isot,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan]
#print(list1)
#append to full list
rows_list.append(list4)
########################## read raw Wind catalog
#Grandin et al. 2018 - OMNI
#removed 2 SIRs due to data gap of Wind in oct 2014
filewin='sircat/sources/grandin_2018_list_modified.txt'
wraw=np.loadtxt(filewin,skiprows=9)
print('load Grandin Earth HSS catalog from:', filewin)
print('Events in Wind SIR/HSS cat:', wraw.shape[0])
print()
#2 times: SIR/HSS start, HSS end (where speed again < 450km/s)
#begin with 2007
begin2007=np.where(wraw[:,1]>=2007)[0][0]
for i in np.arange(begin2007,len(wraw),1):
#SIR HSS start time y,m,d,h,m - minus 1 hour for Wind at L1, not magnetopause
wstart=datetime.datetime(wraw[i,1].astype(int),wraw[i,2].astype(int), wraw[i,3].astype(int),wraw[i,4].astype(int), 0)-datetime.timedelta(hours=1)
#SIR HSS end time y,m,d,h,m - minus 1 hour for Wind at L1, not magnetopause
wend=datetime.datetime(wraw[i,11].astype(int),wraw[i,12].astype(int), wraw[i,13].astype(int),wraw[i,14].astype(int), 0)-datetime.timedelta(hours=1)
sc_idstring='SIR_WIND_GRANDIN_'
id_time=parse_time(wstart).isot
sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01'
sc_string='Wind'
list5 = [sircat_id,sc_string,np.nan,parse_time(wstart).isot,np.nan,parse_time(wend).isot,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan]
#print(list2)
rows_list.append(list5)
########################## read MAVEN catalog
from heliocats import data as hd
importlib.reload(hd) #reload again while debugging
#this is a recarray
mavsir_all=hd.load_maven_sir_huang()
#check which events overlap with the available MAVEN data
mavsir_ind=np.where(mavsir_all.start < mav.time[-1])[0]
mavsir=mavsir_all[mavsir_ind]
print('Events in MAVEN SIR/HSS cat:', mavsir.shape[0])
print()
#go through all events
for i in mavsir_ind:
sc_idstring='SIR_MAVEN_HUANG_'
id_time=parse_time(mavsir.start[i][0]).isot
sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01'
sc_string='MAVEN'
list6 = [sircat_id,sc_string,parse_time(mavsir.start[i][0]).isot,parse_time(mavsir.si[i][0]).isot,parse_time(mavsir.end[i][0]).isot,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan]
#print(list3)
rows_list.append(list6)
################################### add new events **** to be done
#for measuring new events use this function from heliocats.plot
#plt.close('all')
#works in jupyter notebooks
#works in scripts
#matplotlib.use('qt5agg')
#plt.ion()
#STEREO-A
#hp.plot_insitu_measure(sta, '2018-Jan-01 12:00','2018-Feb-01 12:00', 'STEREO-A', 'results/')
#Wind
#hp.plot_insitu_measure(win, '2019-Jan-29','2019-Feb-28', 'Wind', 'results/')
################ make pandas data frame for master file
parameters =['sircat_id','sc_insitu','sir_start_time','hss_start_time','sir_end_time', 'hss_end_time','hss_vtmax_time','sc_heliodistance', 'sc_long_heeq', 'sc_lat_heeq',
'hss_vtmax','hss_vtmean','hss_vtstd','hss_btmax','hss_btmean',\
'hss_btstd','hss_bzmin', 'hss_bzmean','hss_bzstd','hss_duration',\
'sir_vtmax','sir_vtmean', 'sir_vtstd','sir_btmax','sir_btmean',\
'sir_btstd','sir_bzmin', 'sir_bzmean','sir_bzstd','sir_duration']
master=pd.DataFrame(rows_list,columns=parameters)
#sort by spacecraft indicator and start time
master=master.sort_values(['sc_insitu','hss_start_time'])
master = master.reset_index(drop=True) #drop extra index value
master
#save master file as Excel
file='sircat/HELIO4CAST_SIRCAT_v10_master.xlsx'
master.to_excel(file,sheet_name='SIRCATv1.0')
print()
print('SIRCAT master saved as '+file)
print('total events', master.shape[0])
print('done')
# ## (3) make SIRCAT
# In[418]:
from heliocats import cats as hc
importlib.reload(hc) #reload again while debugging
from heliocats import plot as hp
importlib.reload(hp) #reload again while debugging
#load master file
scat=hc.load_helio4cast_sircat_master_from_excel('sircat/HELIO4CAST_SIRCAT_v10_master.xlsx')
scat
####### 3a get indices for all spacecraft
wini=np.where(scat.sc_insitu == 'Wind')[:][0]
pspi=np.where(scat.sc_insitu == 'PSP')[:][0]
stai=np.where(scat.sc_insitu == 'STEREO-A')[:][0]
stbi=np.where(scat.sc_insitu == 'STEREO-B')[:][0]
mavi=np.where(scat.sc_insitu == 'MAVEN')[:][0]
print('done')
####### 3b get parameters for all spacecraft one after another
# remove indices if the events in the master file have changed
#os.system('rm sircat/indices_sircat/SIRCAT_indices_Wind.p')
#os.system('rm sircat/indices_sircat/SIRCAT_indices_STEREO-A.p')
#os.system('rm sircat/indices_sircat/SIRCAT_indices_STEREO-B.p')
#os.system('rm sircat/indices_sircat/SIRCAT_indices_MAVEN.p')
#os.system('rm sircat/indices_sircat/SIRCAT_indices_PSP.p')
#hss times
scat=hc.get_sircat_parameters(psp,pspi,scat,'PSP')
scat=hc.get_sircat_parameters(win,wini,scat,'Wind')
#sir times
scat=hc.get_sircat_parameters(mav,mavi,scat,'MAVEN')
scat=hc.get_sircat_parameters(stb,stbi,scat,'STEREO-B')
#both allen and jian cats
scat=hc.get_sircat_parameters(sta,stai,scat,'STEREO-A')
# ###### 3c make all plots if wanted
#matplotlib.use('Agg')
#hp.plot_sircat_events(sta,stai,scat,'STEREO-A',sirplotsdir)
#hp.plot_sircat_events(stb,stbi,scat,'STEREO-B',sirplotsdir)
#hp.plot_sircat_events(win,wini,scat,'Wind',sirplotsdir)
#hp.plot_sircat_events(mav,mavi,scat,'MAVEN',sirplotsdir)
print('done')
#kick out MAVEN events without data
############### sort SIRCAt by date
scat = scat.sort_values(by='hss_start_time',ascending=False)
scat = ic.reset_index(drop=True)
# ### (4) save SIRCAT
# ### 4a save header
# In[410]:
#save header and parameters as text file and prepare for html website
header='SIR CATALOGUE v1.0 \n\nThis is the HELIO4CAST stream interaction region (SIR) and high speed stream (HSS) catalog,\nbased on in situ magnetic field and bulk plasma observations in the heliosphere. \nIt is a merged catalog created from individual ones made by Robert Allen et al., Lan Jian et al., Maxim Grandin et al. and Hui Huang et al. (see references).\n\nThis is version 1.0, released 2020-06-10, updated '+last_update+' doi: 10.6084/m9.figshare.12416906 \n\nThe catalog is available as python pandas dataframe (pickle), json, csv, xlsx, txt, html at \nhttps://helioforecast.space/sircat \n\nNumber of events in SIRCAT: '+str(len(scat))+' \nICME observatories: Parker Solar Probe, Wind, STEREO-A, STEREO-B, MAVEN \nTime ranges: Parker Solar Probe: Oct 2018 - May 2020, Wind: Jan 2007 - Sep 2019, STEREO-A/B: Jan 2007 - Sep 2019, MAVEN: Dec 2014 - Jan 2018. \n\nAuthors: Christian Moestl, Andreas J. Weiss, R. L. Bailey, Martin A. Reiss, Space Research Institute, Austrian Academy of Sciences, Graz, Austria. \nRobert Allen, JHU/APL, USA; Lan Jian, NASA, USA; Maxim Grandin, University of Helsinki, Finland; Hui Huang, Beijing University, China. \n\nRules: If results are produced with this catalog for peer-reviewed scientific publications, \nplease contact [email protected], [email protected], [email protected], [email protected] for possible co-authorships. \n\nThis catalog has been made by getting the start and end times of each high speed stream from the \nindividual catalogs, and then calculating all parameters again consistently from the data by us. \nThe in situ data that were used for the creating catalog, with a size of 8 GB in total, including extra data \nfiles with magnetic field components in RTN coordinates and other spacecrat that are not used for producing this catalog, \ncan be downloaded in python pickle format as recarrays from https://doi.org/10.6084/m9.figshare.11973693.v7 \nThe python code for producing this catalog is available at https://github.com/cmoestl/heliocats sircat.ipynb \n\nEach sircat_id has a tag in it that indicates from which catalog the ICME times were taken: \n\nParker Solar Probe: Allen et al. 2021, tag: ALLEN, \nWind: Grandin et al. (2019), tag: GRANDIN \nSTEREO-A: Jian et al. (2019), tag: JIAN. \nSTEREO-B: Jian et al. (2019), tag: JIAN. \nMAVEN: Huang et al. (2019), tag: HUANG. \n\nReferences \nAllen et al. (2021), https://doi.org/10.1051/0004-6361/202039833 \nGrandin, M. et al. (2019), https://doi.org/10.1029/2018JA026396 \nJian, L. et al. (2019), https://doi.org/10.1007/s11207-019-1416-8 \nHuang, H. et al. (2019), https://doi.org/10.3847/1538-4357/ab25e9 \n\nComments: \n- The STEREO catalog contains the SIR start, stream interface and SIR end times. We use their stream interface time as our hss_start_time. \n- The MAVEN catalog has similar times as the STEREO catalog.\n- Earth SIR/HSS list: This catalog directly gives the hss_start_time and the hss_end_time, but no SIR times. \n- The times in the Earth SIR/HSS list have been modified to 1 hour earlier as these times were \noriginally given for the magnetopause, but the Wind spacecraft is located at the L1 point. \nOne hour is practically equivalent to the propagation time of a 400 km/s slow solar wind \nfrom the L1 point to the magnetopause.\n- Spacecraft positions are given in Heliocentric Earth Equatorial Coordinates (HEEQ) coordinates. \n- The coordinate system for all magnetic field components is SCEQ, except for Wind (HEEQ, which is the equivalent for SCEQ for Earth). \n Definition of SpaceCraft Equatorial Coordinates (SCEQ): \n Z is the solar rotation axis. \n Y is the cross product of Z and R, with R being the vector that points from the Sun to the spacecraft.\n X completes the right handed triad (and points away from the Sun). \nThis system is thus like HEEQ but centered on the respective in situ spacecraft, so the SCEQ X and Y \nbase vectors are rotated by the HEEQ longitude of the in situ spacecraft from HEEQ X and Y.\nThe Y vector is similar to the T vector in an RTN system for each spacecraft, but the X and Z vectors \nare rotated around Y compared to an RTN system. The differences between RTN and SCEQ for spacecraft within \na few degrees of the solar equatorial plane are very small (within a few 0.1 nT usually).\nWe choose SCEQ because it has the advantage that a comparison between multipoint CME events \nand for comparison to simulations there is always a similar reference plane (the solar equatorial plane). \n\n '
parameters_text='Parameters:\n00: sircat_id: The unique identifier for the observed stream interaction region (SIR). unit: string. \n01: sc insitu: The name of the in situ observing spacecraft. unit: string. \n02: sir_start_time: Stream interaction region start time. unit: UTC. \n03: hss_start_time: High speed stream start time, equal to the stream interface time (for STEREO, MAVEN catalogs). unit: UTC. \n04: sir_end_time: End time of the stream interaction region. unit: UTC. \n05: hss_end_time: High speed stream end time, criterion at Wind: speed < 450 km/s. unit: UTC. \n06: hss_vtmax_time: High speed stream maxmimum speed time. unit: UTC. \n07: sc_heliodistance: Heliocentric distance of the spacecraft at hss_start_time. unit: AU.\n08: sc_long_heeq: Heliospheric longitude of the spacecraft at hss_start_time, range [-180,180]. unit: degree (HEEQ).\n09: sc_lat_heeq: Heliospheric latitude of the spacecraft at hss_start_time, range [-90,90]. unit: degree (HEEQ).\n10: hss_vt_max: Maximum proton speed from hss_start_time to hss_end_time. unit: km/s.\n11: hss_vt_mean: Mean proton speed from hss_start_time to hss_end_time. unit: km/s.\n12: hss_vt_std: Standard deviation of proton speed from hss_start_time to hss_end_time. unit: km/s.\n13: hss_vt_mean: Mean proton speed from hss_start_time to hss_end_time. unit: km/s.\n14: hss_bt_max: Maximum total magnetic field from hss_start_time to hss_end_time. unit: nT.\n15: hss_bt_mean: Mean total magnetic field from hss_start_time to hss_end_time. unit: nT.\n16: hss_bt_std: Standard deviation of total magnetic field from hss_start_time to hss_end_time. unit: nT.\n17: hss_bz_min: Minimum Bz component (SCEQ) from hss_start_time to hss_end_time. unit: nT.\n18: hss_bz_mean: Mean Bz component (SCEQ) from hss_start_time to hss_end_time. unit: nT.\n19: hss_bz_std: Standard deviation of Bz component (SCEQ) from hss_start_time to hss_end_time. unit: nT.\n20: hss_duration: Duration of high speed stream from hss_start_time to hss_end_time. unit: hours.\n21: sir_vt_mean: Mean proton speed from hss_start_time to sir_end_time. unit: km/s.\n22: sir_vt_std: Standard deviation of proton speed from sir_start_time to hss_end_time. unit: km/s.\n23: sir_vt_mean: Mean proton speed from hss_start_time to sir_end_time. unit: km/s.\n24: sir_bt_max: Maximum total magnetic field from sir_start_time to hss_end_time. unit: nT.\n25: sir_bt_mean: Mean total magnetic field from sir_start_time to sir_end_time. unit: nT.\n26: sir_bt_std: Standard deviation of total magnetic field from sir_start_time to sir_end_time. unit: nT.\n27: sir_bz_min: Minimum Bz component (SCEQ) from sir_start_time to sir_end_time. unit: nT.\n28: sir_bz_mean: Mean Bz component (SCEQ) from sir_start_time to sir_end_time. unit: nT.\n29: sir_bz_std: Standard deviation of Bz component (SCEQ) from sir_start_time to sir_end_time. unit: nT.\n30: sir_duration: Duration of stream interaction region from sir_start_time to sir_end_time. unit: hours.\n\n\n'
print(header)
print(parameters_text)
#make header file
file='sircat/HELIO4CAST_SIRCAT_v10_header.txt'
with open(file, "w") as text_file:
text_file.write(header)
text_file.write(parameters_text)
print()
print('header saved as '+file)
print()
#Convert to html regarding line breaks, paragraph beginning and spaces
header_spaces=header.replace(" ", " ")
header_html= "<p>" +header_spaces.replace('\n', '<br>')+ "</p>"
parameters_spaces=parameters_text.replace(" ", " ")
parameters_html= "<p>" +parameters_text.replace('\n', '<br>')+ "</p>"
print('header converted to HTML')
print()
print()
# ### 4b save into different formats
# In[413]:
########## python formats
# save ICMECAT as pandas dataframe with times as datetime objects as pickle
file='sircat/HELIO4CAST_SIRCAT_v10_pandas.p'
pickle.dump([scat,header,parameters], open(file, 'wb'))
print('SIRCAT saved as '+file)
#load sircat as pandas dataframe
file='sircat/HELIO4CAST_SIRCAT_v10_pandas.p'
[scat_pandas,h,p]=pickle.load( open(file, 'rb'))
scat.keys()
scat
# # save SIRCAT as numpy array with times as matplotlib datetime as pickle
# scat_num=copy.deepcopy(scat)
# scat_num.icme_start_time=parse_time(scat_num.icme_start_time).plot_date
# scat_num.mo_start_time=parse_time(scat_num.mo_start_time).plot_date
# scat_num.mo_end_time=parse_time(scat_num.mo_end_time).plot_date
# #convert to recarray
# scat_num_rec=scat_num.to_records()
# #create structured array
# dtype1=[('index','i8'),('icmecat_id', '<U30'),('sc_insitu', '<U20')] +[(i, '<f8') for i in ic.keys()[2:len(ic.keys())]]
# scat_num_struct=np.array(scat_num_rec,dtype=dtype1)
# file='icmecat/HELIO4CAST_ICMECAT_v20_numpy.p'
# pickle.dump([scat_num,scat_num_struct,header,parameters], open(file, 'wb'))
# print('ICMECAT saved as '+file)
################ save to different formats
#get beginning of tags for STA to identify allen and jian events
tag_list=[]
for i in np.arange(0,len(scat)):
tag_list.append(scat.sircat_id[i][13]) #j
stai_jian=np.where(np.logical_and(scat.sc_insitu == 'STEREO-A',np.array(tag_list)=='J'))[:][0]
stai_allen=np.where(np.logical_and(scat.sc_insitu == 'STEREO-A',np.array(tag_list)=='A'))[:][0]
#get indices of all SIR spacecraft in SIRCAT
sir_sc=np.hstack([stai_jian,stbi,mavi])
#get indices of all HSS spacecraft in SIRCAT
hss_sc=np.hstack([pspi,wini,stai_allen])
#copy pandas dataframe first to change time format consistent with HELIO4CAST
scat_copy=copy.deepcopy(scat)
scat_copy.at[sir_sc,'sir_start_time']=parse_time(scat.sir_start_time[sir_sc]).isot
scat_copy.hss_start_time=parse_time(scat.hss_start_time).isot
scat_copy.at[sir_sc,'sir_end_time']=parse_time(scat.sir_end_time[sir_sc]).isot
scat_copy.at[hss_sc,'hss_end_time']=parse_time(scat.hss_end_time[hss_sc]).isot
#scat_copy.at[hss_sc,'hss_vtmax_time']=parse_time(scat.hss_vtmax_time[hss_sc]).isot
#change time format for sir
for i in sir_sc:
dum=scat_copy.sir_start_time[i]
scat_copy.at[i,'sir_start_time']=dum[0:16]+'Z'
dum=scat_copy.hss_start_time[i]
scat_copy.at[i,'hss_start_time']=dum[0:16]+'Z'
dum=scat_copy.sir_end_time[i]
scat_copy.at[i,'sir_end_time']=dum[0:16]+'Z'
for i in hss_sc:
dum=scat_copy.hss_start_time[i]
scat_copy.at[i,'hss_start_time']=dum[0:16]+'Z'
dum=scat_copy.hss_end_time[i]
scat_copy.at[i,'hss_end_time']=dum[0:16]+'Z'
#dum=scat_copy.hss_vtmax_time[i]
#scat_copy.at[i,'hss_vtmax_time']=dum[0:16]+'Z'
# for i in stbi:
# dum=scat_copy.sir_end_time[i]
# scat_copy.at[i,'sir_end_time']=dum[0:16]+'Z'
# for i in stai:
# dum=scat_copy.sir_end_time[i]
# scat_copy.at[i,'sir_end_time']=dum[0:16]+'Z'
#save as Excel
file='sircat/HELIO4CAST_SIRCAT_v10.xlsx'
scat_copy.to_excel(file,sheet_name='SIRCATv1.0')
print('SIRCAT saved as '+file)
#save as json
file='sircat/HELIO4CAST_SIRCAT_v10.json'
scat_copy.to_json(file)
print('SIRCAT saved as '+file)
#save as csv
file='sircat/HELIO4CAST_SIRCAT_v10.csv'
scat_copy.to_csv(file)
print('SIRCAT saved as '+file)
#save as txt
file='sircat/HELIO4CAST_SIRCAT_v10.txt'
np.savetxt(file, scat_copy.values.astype(str), fmt='%s' )
print('SIRCAT saved as '+file)
# In[415]:
#########################
# #########save into hdf5 format , use S for strings http://docs.h5py.org/en/stable/strings.html#what-about-numpy-s-u-type
# dtype2=[('index','i8'),('icmecat_id', 'S30'),('sc_insitu', 'S20')] +[(i, '<f8') for i in ic.keys()[2:len(ic.keys())]]
# ich5=np.array(scat_num_rec,dtype=dtype2)
# file='icmecat/HELIO4CAST_ICMECAT_v20.h5'
# f=h5py.File(file,mode='w')
# f["icmecat"]= ich5
# #add attributes
# #************************
# #***********************
# print('ICMECAT saved as '+file)
# f.close()
# #reading h5py files http://docs.h5py.org/en/latest/quick.html
# #fr = h5py.File('icmecat/HELIO4CAST_ICMECAT_v20.h5', 'r')
# #list(fr.keys())
# #ich5=fr['icmecat']
# #ich5['mo_bstd']
# #ich5.dtype
# #fr.close()
# ##################
# #save as .npy without pickle
# file='icmecat/HELIO4CAST_ICMECAT_v20_numpy.npy'
# np.save(file,ich5, allow_pickle=False)
# print('ICMECAT saved as '+file)
# #for loading do:
# #icnpy=np.load(file)
# #decode strings:
# #icnpy['icmecat_id'][0].decode()
#copy pandas dataframe first to change time format consistent with HELIO4CAST
scat_copy2=copy.deepcopy(scat)
scat_copy2.at[sir_sc,'sir_start_time']=parse_time(scat.sir_start_time[sir_sc]).iso
scat_copy2.hss_start_time=parse_time(scat.hss_start_time).iso
scat_copy2.at[sir_sc,'sir_end_time']=parse_time(scat.sir_end_time[sir_sc]).iso
scat_copy2.at[hss_sc,'hss_end_time']=parse_time(scat.hss_end_time[hss_sc]).iso
#scat_copy2.at[hss_sc,'hss_vtmax_time']=parse_time(scat.hss_vtmax_time[hss_sc]).iso
#change time format for sir
for i in sir_sc:
dum=scat_copy2.sir_start_time[i]
scat_copy2.at[i,'sir_start_time']=dum[0:16]
dum=scat_copy2.hss_start_time[i]
scat_copy2.at[i,'hss_start_time']=dum[0:16]
dum=scat_copy2.sir_end_time[i]
scat_copy2.at[i,'sir_end_time']=dum[0:16]
for i in hss_sc:
dum=scat_copy2.hss_start_time[i]
scat_copy2.at[i,'hss_start_time']=dum[0:16]
dum=scat_copy2.hss_end_time[i]
scat_copy2.at[i,'hss_end_time']=dum[0:16]
#dum=scat_copy2.hss_vtmax_time[i]
#scat_copy2.at[i,'hss_vtmax_time']=dum[0:16]
#save as json for webpage with different time format
file='sircat/HELIO4CAST_SIRCAT_v10_isot.json'
scat_copy2.to_json(file)
print('SIRCAT saved as '+file)
#save as html no header
file='sircat/HELIO4CAST_SIRCAT_v10_simple.html'
scat_copy.to_html(file)
print('SIRCAT saved as '+file)
############ save as html file with header
#save as html
file='sircat/HELIO4CAST_SIRCAT_v10.html'
#ic.to_html(file,justify='center')
#ichtml='{% extends "_base.html" %} \n \n {% block content %} \n \n \n '
ichtml = header_html
ichtml += parameters_html
ichtml += scat_copy.to_html()
#ichtml +='\n \n {% endblock %}'
with open(file,'w') as f:
f.write(ichtml)
f.close()
print('SIRCAT saved as '+file)
# ## 4c load ICMECAT pickle files
# In[416]:
#load sircat as pandas dataframe
file='sircat/HELIO4CAST_SIRCAT_v10_pandas.p'
[scat_pandas,h,p]=pickle.load( open(file, 'rb'))
scat.keys()
scat
#load icmecat as numpy array
# file='icmecat/HELIO4CAST_ICMECAT_v20_numpy.p'
# [ic_nprec,ic_np,h,p]=pickle.load( open(file, 'rb'))
# In[417]:
scat_pandas
scat_pandas.keys()
# In[ ]:
# In[ ]:
# In[ ]:
| 38.94227 | 4,576 | 0.698761 | [
"MIT"
] | cmoestl/heliocats | sircat.py | 39,800 | Python |
import pandas as pd
import numpy as np
from scipy import stats
def columns_views(player_1_df, player_2_df):
columns = list(player_1_df.columns)
if list(player_1_df.columns) == list(player_2_df.columns):
columns = list(player_1_df.columns)
player_1 = list(player_1_df.values[0])
player_2 = list(player_2_df.values[0])
views = []
for column, player1, player2 in zip(columns, player_1, player_2):
print('column : {} _ player1-{} , player2-{} < diff : {} >'.format(
column, player1, player2, abs(player1 - player2)
))
views.append(abs(player1 - player2))
print(views)
def convert_preferred_foot(df):
df['preferred_foot'] = df['preferred_foot'].replace('Right', 1)
df['preferred_foot'] = df['preferred_foot'].replace('Left', 2)
return df
def convert_work_rate(df):
convert = {
'High': 3,
'Medium': 2,
'Low': 1
}
work_rate = df['work_rate'].values[0].split('/')
attack = work_rate[0]
defense = work_rate[1]
df['attack'] = convert[attack]
df['defense'] = convert[defense]
# work_rateの削除処理
df = df.drop(columns='work_rate')
return df
def euclidean_distance(v1, v2):
# ユーグリッド距離を算出
# https://qiita.com/shim0mura/items/64918dad83d162ef2ac2#ユークリッド距離
# どちらも同じ値を返す
# distance = np.linalg.norm(v1 - v2)
distance = np.sqrt(np.power(v1 - v2, 2).sum())
# 0から1までの値で似ていれば似ているほど1に近くなる、みたいな類似度として分かりやすい値が欲しい。
# 0での除算エラーを防ぐためにこのdに1を足して逆数をとるとそのような値を取ることが出来る。
# 1/(1+d)
# print('distance', distance)
return 1 / (1 + distance)
def cos_similarity(v1, v2):
# Scipyを使ってコサイン類似度を求める方法
# import scipy.spatial.distance as dis
# print(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))
# print(dis.cosine(v1, v2))
# return dis.cosine(v1, v2)
# cos類似度を算出
return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
# ピアソンの積率相関係数
def pearson_product_moment_correlation_coefficien(v1, v2):
# corr = np.corrcoef(v1, v2)[0, 1]
corr = stats.pearsonr(v1, v2)
return corr
# スピアマンの順位相関係数
def spearman_rank_correlation_coefficient(v1, v2):
corr = stats.spearmanr(v1, v2)
return corr
# ケンドールの順位相関係数
def kendalltau_rank_correlation_coefficient(v1, v2):
corr = stats.kendalltau(v1, v2)
return corr
def similarity(v1_df, v2_df):
v1_value = v1_df.values[0]
v2_value = v2_df.values[0]
print('v1_value', v1_value)
print('v2_value', v2_value)
# リストをps.Seriesに変換
s1 = pd.Series(list(v1_value))
s2 = pd.Series(list(v2_value))
# 相関係数を計算
res = s1.corr(s2)
print(res)
corr = pearson_product_moment_correlation_coefficien(
v1_value, v2_value
)
print('pearson_product_moment_correlation_coefficien', corr)
corr = spearman_rank_correlation_coefficient(
v1_value, v2_value
)
print('spearman_rank_correlation_coefficient', corr)
corr = kendalltau_rank_correlation_coefficient(
v1_value, v2_value
)
print('kendalltau_rank_correlation_coefficient', corr)
e_distance = euclidean_distance(v1_value, v2_value)
print('e_distance', e_distance)
# return euclidean_distance(v1_value, v2_value)
# return res
return cos_similarity(v1_value, v2_value)
# 数値型の整形
def shaping_num(value):
if '+' in value:
value = str(value).split('+')
value = int(value[0]) + int(value[1])
return value
if '-' in value:
value = str(value).split('-')
value = int(value[0]) - int(value[1])
return value
return value
def need_columns(df):
columns = [
'height_cm',
'weight_kg',
'preferred_foot',
'weak_foot',
'skill_moves',
'work_rate',
'player_tags',
'pace',
'shooting',
'passing',
'dribbling',
'defending',
'physic',
'player_traits',
'attacking_crossing',
'attacking_finishing',
'attacking_heading_accuracy',
'attacking_short_passing',
'attacking_volleys',
'skill_dribbling',
'skill_curve',
'skill_fk_accuracy',
'skill_long_passing',
'skill_ball_control',
'movement_acceleration',
'movement_sprint_speed',
'movement_agility',
'movement_reactions',
'movement_balance',
'power_shot_power',
'power_jumping',
'power_stamina',
'power_strength',
'power_long_shots',
'mentality_aggression',
'mentality_interceptions',
'mentality_positioning',
'mentality_vision',
'mentality_penalties',
'mentality_composure',
'defending_marking',
'defending_standing_tackle',
'defending_sliding_tackle'
]
columns += [
'ls', 'st', 'rs',
'lw', 'lf', 'cf', 'rf', 'rw',
'lam', 'cam', 'ram',
'lm', 'lcm', 'cm', 'rcm', 'rm',
'lwb', 'ldm', 'cdm', 'rdm', 'rwb',
'lb', 'lcb', 'cb', 'rcb', 'rb'
]
# ls,st,rs,lw,lf,cf,rf,rw,
# lam,cam,ram,lm,lcm,cm,rcm,rm,
# lwb,ldm,cdm,rdm,rwb,lb,lcb,cb,rcb,rb
return df[columns]
def convert_num_values(player_1_df, player_2_df):
num_values = [
'pace',
'shooting',
'passing',
'dribbling',
'defending',
'physic',
'attacking_crossing',
'attacking_finishing',
'attacking_heading_accuracy',
'attacking_short_passing',
'attacking_volleys',
'skill_dribbling',
'skill_curve',
'skill_fk_accuracy',
'skill_long_passing',
'skill_ball_control',
'movement_acceleration',
'movement_sprint_speed',
'movement_agility',
'movement_reactions',
'movement_balance',
'power_shot_power',
'power_jumping',
'power_stamina',
'power_strength',
'power_long_shots',
'mentality_aggression',
'mentality_interceptions',
'mentality_positioning',
'mentality_vision',
'mentality_penalties',
'mentality_composure',
'defending_marking',
'defending_standing_tackle',
'defending_sliding_tackle'
]
num_values += [
'ls', 'st', 'rs',
'lw', 'lf', 'cf', 'rf', 'rw',
'lam', 'cam', 'ram',
'lm', 'lcm', 'cm', 'rcm', 'rm',
'lwb', 'ldm', 'cdm', 'rdm', 'rwb',
'lb', 'lcb', 'cb', 'rcb', 'rb'
]
for v in num_values:
# player1のデータの数値の整形
value = player_1_df[v].values.astype(str)[0]
value = shaping_num(str(value))
# player_1_df[v] = float(value) * 0.01
player_1_df[v] = float(value)
# player2のデータの数値の整形
value = player_2_df[v].values.astype(str)[0]
value = shaping_num(str(value))
# player_2_df[v] = float(value) * 0.01
player_2_df[v] = float(value)
return player_1_df, player_2_df
def convert_traits(player_1_df, player_2_df):
# 選手特性関連の処理
traits_list = [
'Backs Into Player', # FIFA 18だけの項目
'Bicycle Kicks',
'Chip Shot',
'Dives Into Tackles',
'Early Crosser',
'Fancy Passes',
'Finesse Shot',
'Flair',
'Giant Throw-In',
'GK Cautious With Crosses',
'GK Comes For Crosses',
'GK Flat Kick',
'GK Long Thrower',
'GK Save With Foot',
'Injury Prone',
'Leadership',
'Long Passer',
'Long Shot Taker',
'Long Throw-In',
'One Club Player',
'Outside Foot Shot',
'Play Maker',
'Power Header',
'Rushes Out Of Goal',
'Second Wind',
'Set Play Specialist',
'Solid Player',
'Speed Dribbler',
'Swerve',
'Takes Powerful Driven Free Kicks',
'Team Player',
'Technical Dribbler'
]
player_1_df_player_traits = player_1_df['player_traits']
player_2_df_player_traits = player_2_df['player_traits']
player_1_df = player_1_df.drop(columns='player_traits')
player_2_df = player_2_df.drop(columns='player_traits')
for trait in traits_list:
trait_value = 0
for p_trait in player_1_df_player_traits.values[0].split(','):
if trait in p_trait:
trait_value = 1
break
player_1_df[trait] = trait_value
trait_value = 0
for p_trait in player_2_df_player_traits.values[0].split(','):
if trait in p_trait:
trait_value = 1
break
player_2_df[trait] = trait_value
return player_1_df, player_2_df
def players_comparison(player_1, player_2):
df = pd.read_csv('data/players_18.csv')
player_1_df = df.query('sofifa_id == {}'.format(player_1))
player_2_df = df.query('sofifa_id == {}'.format(player_2))
# david_silva = df.query('sofifa_id == 189881')
player_1_df = need_columns(player_1_df)
player_2_df = need_columns(player_2_df)
# num_valuesの変換処理
player_1_df, player_2_df = convert_num_values(player_1_df, player_2_df)
# 選手特性関連の処理
player_1_df, player_2_df = convert_traits(player_1_df, player_2_df)
# 選手タグ関連の処理
player_1_df = player_1_df.drop(columns='player_tags')
player_2_df = player_2_df.drop(columns='player_tags')
# 利き足の変換
player_1_df = convert_preferred_foot(player_1_df)
player_2_df = convert_preferred_foot(player_2_df)
# 攻撃/守備の優先度の変換
player_1_df = convert_work_rate(player_1_df)
player_2_df = convert_work_rate(player_2_df)
# print(player_1_df.values[0])
# print(player_2_df.values)
cos = similarity(player_1_df, player_2_df)
print('cos', cos)
# カラムの表示
# columns_views(player_1_df, player_2_df)
shinji_kagawa = 189358
david_silva = 178088
# david_silva = 41
# 香川真司 : 189358
# 本田圭佑 : 186581
# 清武弘嗣 : 210126
# イニエスタ: 41
# スモーリング : 189881
# セルヒオ・ラモス : 155862
# マリオ・ゲッツェ : 192318
# ユリアン・ヴァイグル : 222028
# ファン・マタ : 178088
# イスコ : 197781
# ダビド・シルバ : 168542
# マルク・バルトラ : 198141
# ロメル・ルカク : 192505
# デブルイネ : 192985
# モドリッチ : 177003
# クロース : 182521
# ラキティッチ : 168651
# ウサマ・デンベレ : 231443
# リオネル・メッシ : 158023
# フンメルス : 178603
# ピケ: 152729
# ボアテング : 183907
# メスト・エジル : 176635
# マルコ・ロイス : 188350
# イヴァン・ペリシッチ : 181458
# トーマス・ミュラー : 189596
# オスカル : 188152
# ヤルモレンコ : 194794
# エデン・アザール : 183277
# ネイマール : 190871
# ロッベン : 9014
# サラー : 209331
# ハリー・ケイン : 202126
# ムバッペ : 231747
# グリーズマン : 194765
# ジェラール・ピケ : 152729
players_comparison(shinji_kagawa, david_silva)
# columns_views(shinji_kagawa, david_silva)
# Weak Foot(逆足)
# https://www.fifplay.com/encyclopedia/weak-foot/
# Work Rate(作業率)
# https://www.fifplay.com/encyclopedia/work-rate/
# ユークリッド距離 vs コサイン類似度
# https://enjoyworks.jp/tech-blog/2242
| 24.175947 | 79 | 0.609857 | [
"Apache-2.0"
] | shimakaze-git/football-data-analysis | playstyle_similar/playstyle_similar2.py | 11,887 | Python |
import virtualbox, json, pprint, configparser, time, psutil, sys
from pypresence import Presence
class RichPresence:
def __init__(self):
# Initialize the VirtualBox instance, config, and assets.
self.virtualbox = virtualbox.VirtualBox()
self.config = configparser.ConfigParser()
self.config.read("config.ini")
self.assets = json.load(open("assets.json", "r"))
# Initialize the Rich Presence.
client_id = self.config["Rich Presence"]["client_id"]
self.RPC = Presence(client_id)
self.RPC.connect()
# Initialize format dictionary.
self.format_dict = {"start": time.time()}
while True:
# Check if VirtualBox is running, and that the current OS is Windows.
# [TODO] Add support for other operating systems.
if (
"VirtualBox.exe" in (p.name() for p in psutil.process_iter())
or "VirtualBoxVM.exe" in (p.name() for p in psutil.process_iter())
) and (sys.platform.startswith("win32")):
# Generate the list of machines.
self.machine_list = self.generate_machine_list()
# Set the previous format dictionary, and then update the current one.
self.previous_format_dict = self.format_dict
# Generate the format dictionary using the list of machines.
self.format_dict = self.generate_format_dict(
machine_list=self.machine_list,
previous_format_dict=self.previous_format_dict,
)
# Generate the presence dictionary using the format dictionary and the previous format dictionary.
self.presence_dict = self.generate_presence_dict(
format_dict=self.format_dict
)
# Update the Rich Presence.
self.RPC.update(**self.presence_dict)
# Print the current presence to the terminal.
# [TODO] Print the presence dictionary more neatly
pprint.pprint(self.presence_dict)
print("--------------------")
# Stop updating the Rich Presence if VirtualBox is not running.
elif sys.platform.startswith("win32"):
print("VirtualBox is not running")
self.RPC.clear()
# Exit the program if the user is not on Windows.
else:
print("Sorry, your platform is not supported.")
exit()
time.sleep(15)
def generate_machine_list(self):
# Initialize list to store machine information.
machine_list = []
# Get information for each machine.
machines = self.virtualbox.machines
machine_names = [machine.name for machine in machines]
machine_states = [machine.state for machine in machines]
machine_operating_systems = [machine.os_type_id for machine in machines]
# Iterate through the machines and store information about them in the machine list.
for machine_index in range(len(machines)):
# Initialize dictionary to store machine information.
machine_list.append({})
# Obtain OS and architecture information.
os_version, architecture = self.generate_os_and_architecture(
machine_operating_systems[-1]
)
# Assign the corresponding information to the keys in the dictionary
machine_list[-1]["name"] = machine_names[-1]
machine_list[-1]["architecture"] = architecture
machine_list[-1]["state"] = str(machine_states[-1])
# Iterate through assets and find the correct OS.
for os in self.assets["operating systems"]:
# If the OS version is found in any of the OS dictionaries, set the version to that key.
if os_version in self.assets["operating systems"][os]["versions"]:
machine_list[-1]["os"] = os
machine_list[-1]["os version"] = os_version
return machine_list
def generate_os_and_architecture(self, os_type_id: str):
# Split OS type ID to obtain the OS and architecture.
if "_" in os_type_id:
self.oa_operating_system, self.oa_architecture = os_type_id.split("_", 1)
# If an architecture is not stated, it is 32-bit.
else:
self.oa_operating_system = os_type_id
self.oa_architecture = "32"
return self.oa_operating_system, self.oa_architecture
def generate_format_dict(
self, machine_list: list[dict], previous_format_dict: dict
):
# Store previous start time and remove it from the dictionary, to help with
previous_start = previous_format_dict.pop("start")
# Initialize dictionary to store Rich Presence formatting.
format_dict = {}
# Assume there is no machine active.
format_dict["machine active"] = False
# Iterate through machine dictionary and find a machine that is online.
for machine in machine_list:
if machine["state"] == "FirstOnline":
# Recognize that the user is in a machine.
format_dict["machine active"] = True
# Fill the rest of the formatting dictionary with information from the machine dictionary.
format_dict["os name"] = machine["os"]
format_dict["os version"] = machine["os version"]
format_dict["os version name"] = self.assets["operating systems"][
machine["os"]
]["versions"][machine["os version"]]["name"]
format_dict["os version image"] = self.assets["operating systems"][
machine["os"]
]["versions"][machine["os version"]]["image"]
format_dict["architecture"] = machine["architecture"]
format_dict["architecture image"] = machine["architecture"]
# End the loop now that we have found the active machine.
break
format_dict["icon"] = "icon"
# If the format dictionary has not changed, then use the same start time as last time.
if format_dict == previous_format_dict:
format_dict["start"] = previous_start
# If the format dictionary has changed since the last loop, then reset the timer.
else:
# Set the start time of the Rich Presence to now.
format_dict["start"] = time.time()
return format_dict
def generate_presence_dict(self, format_dict: dict):
# Initialize dictionary to store the Rich Presence.
presence_dict = {}
# If there is an active machine, display it on the presence.
if format_dict["machine active"] == True:
# For each field in the config, set the Rich Presence to show that the user is in a machine.
for field in self.config["In Machine"]:
presence_dict[field] = self.config["In Machine"][field].format(
**format_dict
)
# Set the start time using the format dictionary.
presence_dict["start"] = format_dict["start"]
# For each field in the config, set the Rich Presence to show that the user is in the menu.
else:
# Fill each presence dictionary field with the corresponding formatting set by the user in the config.
for field in self.config["In Menu"]:
presence_dict[field] = self.config["In Menu"][field].format(
**format_dict
)
# If the user is in the menu, there is no need to show the time elapsed.
presence_dict["start"] = None
# Set all empty strings or empty lists to None.
for field in presence_dict:
if presence_dict[field] == "" or presence_dict[field] == []:
presence_dict[field] = None
return presence_dict
RichPresence()
| 38.786047 | 115 | 0.584483 | [
"MIT"
] | Nitrodium/virtualbox-rich-presence | main.py | 8,339 | Python |
import cv2
import numpy as np
import definitions
from playgrounds.core.features import Feature
from playgrounds.keras_models.features.girl_boy.workers.custom_workers import CustomWorker1
from playgrounds.opencv import face_detection
from playgrounds.utilities import opencv_utilities
class GenderClassifier(Feature):
def __init__(self):
super().__init__()
self.workers = {
"custom1": CustomWorker1()
}
def runFeature(self, worker, inputData, inType ="image"):
self.worker = self.workers.get(worker)
func = self.inputTypes.get(inType)
func(worker, inputData)
def trainFeature(self, worker):
self.worker = self.workers.get(worker)
self.worker.train()
def runOnVideo(self, worker, inputData):
self.worker = self.workers.get(worker)
self.worker.buildModel()
cap = cv2.VideoCapture(inputData)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
path = definitions.ROOT_DIR + "/outputs/tiny_yolo/" + opencv_utilities.getFileNameFromPath(inputData)
out = cv2.VideoWriter(path, fourcc, 20.0, (854, 356))
while cap.isOpened():
ret, frame = cap.read()
self.getGender(frame)
# out.write(frame)
cv2.imshow("V", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def getGender(self, img):
faces = opencv_utilities.getFaceFromImage(img)
for (x, y, width, height) in faces:
if x > 40 and y > 40:
x = x-40
y = y-40
width += 40
height += 40
crop_img = img[y:y + height, x:x + width]
crop_img = cv2.resize(crop_img, (64, 64))
crop_img = crop_img / 255
crop_img = np.expand_dims(crop_img, axis=0)
text = self.worker.predict(crop_img)
if text > 0.6:
text = "Man"
elif text < 0.6:
text = "Woman"
cv2.rectangle(img, (x, y), (x + width, y + height), (0, 255, 0), 1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, text, (x, y), font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
| 33.470588 | 109 | 0.577329 | [
"Apache-2.0"
] | enohoxha/AxonPy | playgrounds/keras_models/features/girl_boy/girl_boy_feature.py | 2,276 | Python |
#!/usr/bin/env python3
from . import util
import json
from electrum_civx.network import filter_protocol
peers = filter_protocol(util.get_peers())
results = util.send_request(peers, 'blockchain.estimatefee', [2])
print(json.dumps(results, indent=4))
| 31.125 | 65 | 0.787149 | [
"MIT"
] | checho1989/electrum-civx | electrum/scripts/estimate_fee.py | 249 | Python |
'''
实验名称:以太网MQTT通信
版本:v1.0
日期:2020.12
作者:01Studio
说明:通过Socket编程实现以太MQTT通信 订阅者(subscribe)。
'''
import network,usocket,time
from simple import MQTTClient
from tftlcd import LCD43M
#定义常用颜色
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
BLACK = (0,0,0)
#4.3寸LCD初始化
d = LCD43M(portrait=1)
d.fill((255,255,255)) #填充白色
#socket数据接收中断标志位
socket_node = 0
#以太网初始化
nic = network.Ethernet()
nic.active(True)
nic.ifconfig('dhcp')
#设置MQTT回调函数,有信息时候执行
def MQTT_callback(topic, msg):
print('topic: {}'.format(topic))
print('msg: {}'.format(msg))
#判断网络是否连接成功
if nic.isconnected():
print(nic.ifconfig()) #打印IP信息
#显示标题
d.printStr('01Studio Network', 40, 10, BLACK, size=4)
#显示IP信息
d.printStr('IP: ' + nic.ifconfig()[0], 10, 100, BLACK, size=3)
d.printStr('Subnet: ' + nic.ifconfig()[1], 10, 150, BLACK, size=3)
d.printStr('Gateway: ' + nic.ifconfig()[2], 10, 200, BLACK, size=3)
#MQTT配置
SERVER = 'mqtt.p2hp.com'
PORT = 1883
CLIENT_ID = '01Studio-pyBoard' # 客户端ID
TOPIC = '/public/01Studio/1' # TOPIC名称
client = MQTTClient(CLIENT_ID, SERVER, PORT)
client.set_callback(MQTT_callback) #配置回调函数
client.connect()
client.subscribe(TOPIC) #订阅主题
while (True):
client.check_msg() #检测是否收到信息,收到则执行回调函数打印。
time.sleep_ms(300) #接收间隔
| 20.169231 | 71 | 0.656751 | [
"MIT"
] | 01studio-lab/MicroPython-Examples | 哥伦布(STM32F407)/3.通讯实验/2.以太网/3.MQTT通信/2.订阅者(subscribe)/main.py | 1,599 | Python |
from math import ceil
import pytest
from scipy.stats import norm, randint
import numpy as np
from sklearn.datasets import make_classification
from sklearn.dummy import DummyClassifier
from sklearn.experimental import enable_halving_search_cv # noqa
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import HalvingGridSearchCV
from sklearn.model_selection import HalvingRandomSearchCV
from sklearn.model_selection import KFold, ShuffleSplit
from sklearn.svm import LinearSVC
from sklearn.model_selection._search_successive_halving import (
_SubsampleMetaSplitter,
_top_k,
)
class FastClassifier(DummyClassifier):
"""Dummy classifier that accepts parameters a, b, ... z.
These parameter don't affect the predictions and are useful for fast
grid searching."""
def __init__(
self, strategy="stratified", random_state=None, constant=None, **kwargs
):
super().__init__(
strategy=strategy, random_state=random_state, constant=constant
)
def get_params(self, deep=False):
params = super().get_params(deep=deep)
for char in range(ord("a"), ord("z") + 1):
params[chr(char)] = "whatever"
return params
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize(
"aggressive_elimination,"
"max_resources,"
"expected_n_iterations,"
"expected_n_required_iterations,"
"expected_n_possible_iterations,"
"expected_n_remaining_candidates,"
"expected_n_candidates,"
"expected_n_resources,",
[
# notice how it loops at the beginning
# also, the number of candidates evaluated at the last iteration is
# <= factor
(True, "limited", 4, 4, 3, 1, [60, 20, 7, 3], [20, 20, 60, 180]),
# no aggressive elimination: we end up with less iterations, and
# the number of candidates at the last iter is > factor, which isn't
# ideal
(False, "limited", 3, 4, 3, 3, [60, 20, 7], [20, 60, 180]),
# # When the amount of resource isn't limited, aggressive_elimination
# # has no effect. Here the default min_resources='exhaust' will take
# # over.
(True, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]),
(False, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]),
],
)
def test_aggressive_elimination(
Est,
aggressive_elimination,
max_resources,
expected_n_iterations,
expected_n_required_iterations,
expected_n_possible_iterations,
expected_n_remaining_candidates,
expected_n_candidates,
expected_n_resources,
):
# Test the aggressive_elimination parameter.
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": ("l1", "l2"), "b": list(range(30))}
base_estimator = FastClassifier()
if max_resources == "limited":
max_resources = 180
else:
max_resources = n_samples
sh = Est(
base_estimator,
param_grid,
aggressive_elimination=aggressive_elimination,
max_resources=max_resources,
factor=3,
)
sh.set_params(verbose=True) # just for test coverage
if Est is HalvingRandomSearchCV:
# same number of candidates as with the grid
sh.set_params(n_candidates=2 * 30, min_resources="exhaust")
sh.fit(X, y)
assert sh.n_iterations_ == expected_n_iterations
assert sh.n_required_iterations_ == expected_n_required_iterations
assert sh.n_possible_iterations_ == expected_n_possible_iterations
assert sh.n_resources_ == expected_n_resources
assert sh.n_candidates_ == expected_n_candidates
assert sh.n_remaining_candidates_ == expected_n_remaining_candidates
assert ceil(sh.n_candidates_[-1] / sh.factor) == sh.n_remaining_candidates_
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize(
"min_resources,"
"max_resources,"
"expected_n_iterations,"
"expected_n_possible_iterations,"
"expected_n_resources,",
[
# with enough resources
("smallest", "auto", 2, 4, [20, 60]),
# with enough resources but min_resources set manually
(50, "auto", 2, 3, [50, 150]),
# without enough resources, only one iteration can be done
("smallest", 30, 1, 1, [20]),
# with exhaust: use as much resources as possible at the last iter
("exhaust", "auto", 2, 2, [333, 999]),
("exhaust", 1000, 2, 2, [333, 999]),
("exhaust", 999, 2, 2, [333, 999]),
("exhaust", 600, 2, 2, [200, 600]),
("exhaust", 599, 2, 2, [199, 597]),
("exhaust", 300, 2, 2, [100, 300]),
("exhaust", 60, 2, 2, [20, 60]),
("exhaust", 50, 1, 1, [20]),
("exhaust", 20, 1, 1, [20]),
],
)
def test_min_max_resources(
Est,
min_resources,
max_resources,
expected_n_iterations,
expected_n_possible_iterations,
expected_n_resources,
):
# Test the min_resources and max_resources parameters, and how they affect
# the number of resources used at each iteration
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": [1, 2], "b": [1, 2, 3]}
base_estimator = FastClassifier()
sh = Est(
base_estimator,
param_grid,
factor=3,
min_resources=min_resources,
max_resources=max_resources,
)
if Est is HalvingRandomSearchCV:
sh.set_params(n_candidates=6) # same number as with the grid
sh.fit(X, y)
expected_n_required_iterations = 2 # given 6 combinations and factor = 3
assert sh.n_iterations_ == expected_n_iterations
assert sh.n_required_iterations_ == expected_n_required_iterations
assert sh.n_possible_iterations_ == expected_n_possible_iterations
assert sh.n_resources_ == expected_n_resources
if min_resources == "exhaust":
assert sh.n_possible_iterations_ == sh.n_iterations_ == len(sh.n_resources_)
@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV))
@pytest.mark.parametrize(
"max_resources, n_iterations, n_possible_iterations",
[
("auto", 5, 9), # all resources are used
(1024, 5, 9),
(700, 5, 8),
(512, 5, 8),
(511, 5, 7),
(32, 4, 4),
(31, 3, 3),
(16, 3, 3),
(4, 1, 1), # max_resources == min_resources, only one iteration is
# possible
],
)
def test_n_iterations(Est, max_resources, n_iterations, n_possible_iterations):
# test the number of actual iterations that were run depending on
# max_resources
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=1)
param_grid = {"a": [1, 2], "b": list(range(10))}
base_estimator = FastClassifier()
factor = 2
sh = Est(
base_estimator,
param_grid,
cv=2,
factor=factor,
max_resources=max_resources,
min_resources=4,
)
if Est is HalvingRandomSearchCV:
sh.set_params(n_candidates=20) # same as for HalvingGridSearchCV
sh.fit(X, y)
assert sh.n_required_iterations_ == 5
assert sh.n_iterations_ == n_iterations
assert sh.n_possible_iterations_ == n_possible_iterations
@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV))
def test_resource_parameter(Est):
# Test the resource parameter
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": [1, 2], "b": list(range(10))}
base_estimator = FastClassifier()
sh = Est(base_estimator, param_grid, cv=2, resource="c", max_resources=10, factor=3)
sh.fit(X, y)
assert set(sh.n_resources_) == set([1, 3, 9])
for r_i, params, param_c in zip(
sh.cv_results_["n_resources"],
sh.cv_results_["params"],
sh.cv_results_["param_c"],
):
assert r_i == params["c"] == param_c
with pytest.raises(
ValueError, match="Cannot use resource=1234 which is not supported "
):
sh = HalvingGridSearchCV(
base_estimator, param_grid, cv=2, resource="1234", max_resources=10
)
sh.fit(X, y)
with pytest.raises(
ValueError,
match=(
"Cannot use parameter c as the resource since it is part "
"of the searched parameters."
),
):
param_grid = {"a": [1, 2], "b": [1, 2], "c": [1, 3]}
sh = HalvingGridSearchCV(
base_estimator, param_grid, cv=2, resource="c", max_resources=10
)
sh.fit(X, y)
@pytest.mark.parametrize(
"max_resources, n_candidates, expected_n_candidates",
[
(512, "exhaust", 128), # generate exactly as much as needed
(32, "exhaust", 8),
(32, 8, 8),
(32, 7, 7), # ask for less than what we could
(32, 9, 9), # ask for more than 'reasonable'
],
)
def test_random_search(max_resources, n_candidates, expected_n_candidates):
# Test random search and make sure the number of generated candidates is
# as expected
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": norm, "b": norm}
base_estimator = FastClassifier()
sh = HalvingRandomSearchCV(
base_estimator,
param_grid,
n_candidates=n_candidates,
cv=2,
max_resources=max_resources,
factor=2,
min_resources=4,
)
sh.fit(X, y)
assert sh.n_candidates_[0] == expected_n_candidates
if n_candidates == "exhaust":
# Make sure 'exhaust' makes the last iteration use as much resources as
# we can
assert sh.n_resources_[-1] == max_resources
@pytest.mark.parametrize(
"param_distributions, expected_n_candidates",
[
({"a": [1, 2]}, 2), # all lists, sample less than n_candidates
({"a": randint(1, 3)}, 10), # not all list, respect n_candidates
],
)
def test_random_search_discrete_distributions(
param_distributions, expected_n_candidates
):
# Make sure random search samples the appropriate number of candidates when
# we ask for more than what's possible. How many parameters are sampled
# depends whether the distributions are 'all lists' or not (see
# ParameterSampler for details). This is somewhat redundant with the checks
# in ParameterSampler but interaction bugs were discovered during
# developement of SH
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=0)
base_estimator = FastClassifier()
sh = HalvingRandomSearchCV(base_estimator, param_distributions, n_candidates=10)
sh.fit(X, y)
assert sh.n_candidates_[0] == expected_n_candidates
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize(
"params, expected_error_message",
[
({"scoring": {"accuracy", "accuracy"}}, "Multimetric scoring is not supported"),
(
{"resource": "not_a_parameter"},
"Cannot use resource=not_a_parameter which is not supported",
),
(
{"resource": "a", "max_resources": 100},
"Cannot use parameter a as the resource since it is part of",
),
({"max_resources": "not_auto"}, "max_resources must be either"),
({"max_resources": 100.5}, "max_resources must be either"),
({"max_resources": -10}, "max_resources must be either"),
({"min_resources": "bad str"}, "min_resources must be either"),
({"min_resources": 0.5}, "min_resources must be either"),
({"min_resources": -10}, "min_resources must be either"),
(
{"max_resources": "auto", "resource": "b"},
"max_resources can only be 'auto' if resource='n_samples'",
),
(
{"min_resources": 15, "max_resources": 14},
"min_resources_=15 is greater than max_resources_=14",
),
({"cv": KFold(shuffle=True)}, "must yield consistent folds"),
({"cv": ShuffleSplit()}, "must yield consistent folds"),
({"refit": "whatever"}, "refit is expected to be a boolean"),
],
)
def test_input_errors(Est, params, expected_error_message):
base_estimator = FastClassifier()
param_grid = {"a": [1]}
X, y = make_classification(100)
sh = Est(base_estimator, param_grid, **params)
with pytest.raises(ValueError, match=expected_error_message):
sh.fit(X, y)
@pytest.mark.parametrize(
"params, expected_error_message",
[
(
{"n_candidates": "exhaust", "min_resources": "exhaust"},
"cannot be both set to 'exhaust'",
),
({"n_candidates": "bad"}, "either 'exhaust' or a positive integer"),
({"n_candidates": 0}, "either 'exhaust' or a positive integer"),
],
)
def test_input_errors_randomized(params, expected_error_message):
# tests specific to HalvingRandomSearchCV
base_estimator = FastClassifier()
param_grid = {"a": [1]}
X, y = make_classification(100)
sh = HalvingRandomSearchCV(base_estimator, param_grid, **params)
with pytest.raises(ValueError, match=expected_error_message):
sh.fit(X, y)
@pytest.mark.parametrize(
"fraction, subsample_test, expected_train_size, expected_test_size",
[
(0.5, True, 40, 10),
(0.5, False, 40, 20),
(0.2, True, 16, 4),
(0.2, False, 16, 20),
],
)
def test_subsample_splitter_shapes(
fraction, subsample_test, expected_train_size, expected_test_size
):
# Make sure splits returned by SubsampleMetaSplitter are of appropriate
# size
n_samples = 100
X, y = make_classification(n_samples)
cv = _SubsampleMetaSplitter(
base_cv=KFold(5),
fraction=fraction,
subsample_test=subsample_test,
random_state=None,
)
for train, test in cv.split(X, y):
assert train.shape[0] == expected_train_size
assert test.shape[0] == expected_test_size
if subsample_test:
assert train.shape[0] + test.shape[0] == int(n_samples * fraction)
else:
assert test.shape[0] == n_samples // cv.base_cv.get_n_splits()
@pytest.mark.parametrize("subsample_test", (True, False))
def test_subsample_splitter_determinism(subsample_test):
# Make sure _SubsampleMetaSplitter is consistent across calls to split():
# - we're OK having training sets differ (they're always sampled with a
# different fraction anyway)
# - when we don't subsample the test set, we want it to be always the same.
# This check is the most important. This is ensured by the determinism
# of the base_cv.
# Note: we could force both train and test splits to be always the same if
# we drew an int seed in _SubsampleMetaSplitter.__init__
n_samples = 100
X, y = make_classification(n_samples)
cv = _SubsampleMetaSplitter(
base_cv=KFold(5), fraction=0.5, subsample_test=subsample_test, random_state=None
)
folds_a = list(cv.split(X, y, groups=None))
folds_b = list(cv.split(X, y, groups=None))
for (train_a, test_a), (train_b, test_b) in zip(folds_a, folds_b):
assert not np.all(train_a == train_b)
if subsample_test:
assert not np.all(test_a == test_b)
else:
assert np.all(test_a == test_b)
assert np.all(X[test_a] == X[test_b])
@pytest.mark.parametrize(
"k, itr, expected",
[
(1, 0, ["c"]),
(2, 0, ["a", "c"]),
(4, 0, ["d", "b", "a", "c"]),
(10, 0, ["d", "b", "a", "c"]),
(1, 1, ["e"]),
(2, 1, ["f", "e"]),
(10, 1, ["f", "e"]),
(1, 2, ["i"]),
(10, 2, ["g", "h", "i"]),
],
)
def test_top_k(k, itr, expected):
results = { # this isn't a 'real world' result dict
"iter": [0, 0, 0, 0, 1, 1, 2, 2, 2],
"mean_test_score": [4, 3, 5, 1, 11, 10, 5, 6, 9],
"params": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
got = _top_k(results, k=k, itr=itr)
assert np.all(got == expected)
@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV))
def test_cv_results(Est):
# test that the cv_results_ matches correctly the logic of the
# tournament: in particular that the candidates continued in each
# successive iteration are those that were best in the previous iteration
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(0)
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": ("l1", "l2"), "b": list(range(30))}
base_estimator = FastClassifier()
# generate random scores: we want to avoid ties, which would otherwise
# mess with the ordering and make testing harder
def scorer(est, X, y):
return rng.rand()
sh = Est(base_estimator, param_grid, factor=2, scoring=scorer)
if Est is HalvingRandomSearchCV:
# same number of candidates as with the grid
sh.set_params(n_candidates=2 * 30, min_resources="exhaust")
sh.fit(X, y)
# non-regression check for
# https://github.com/scikit-learn/scikit-learn/issues/19203
assert isinstance(sh.cv_results_["iter"], np.ndarray)
assert isinstance(sh.cv_results_["n_resources"], np.ndarray)
cv_results_df = pd.DataFrame(sh.cv_results_)
# just make sure we don't have ties
assert len(cv_results_df["mean_test_score"].unique()) == len(cv_results_df)
cv_results_df["params_str"] = cv_results_df["params"].apply(str)
table = cv_results_df.pivot(
index="params_str", columns="iter", values="mean_test_score"
)
# table looks like something like this:
# iter 0 1 2 3 4 5
# params_str
# {'a': 'l2', 'b': 23} 0.75 NaN NaN NaN NaN NaN
# {'a': 'l1', 'b': 30} 0.90 0.875 NaN NaN NaN NaN
# {'a': 'l1', 'b': 0} 0.75 NaN NaN NaN NaN NaN
# {'a': 'l2', 'b': 3} 0.85 0.925 0.9125 0.90625 NaN NaN
# {'a': 'l1', 'b': 5} 0.80 NaN NaN NaN NaN NaN
# ...
# where a NaN indicates that the candidate wasn't evaluated at a given
# iteration, because it wasn't part of the top-K at some previous
# iteration. We here make sure that candidates that aren't in the top-k at
# any given iteration are indeed not evaluated at the subsequent
# iterations.
nan_mask = pd.isna(table)
n_iter = sh.n_iterations_
for it in range(n_iter - 1):
already_discarded_mask = nan_mask[it]
# make sure that if a candidate is already discarded, we don't evaluate
# it later
assert (
already_discarded_mask & nan_mask[it + 1] == already_discarded_mask
).all()
# make sure that the number of discarded candidate is correct
discarded_now_mask = ~already_discarded_mask & nan_mask[it + 1]
kept_mask = ~already_discarded_mask & ~discarded_now_mask
assert kept_mask.sum() == sh.n_candidates_[it + 1]
# make sure that all discarded candidates have a lower score than the
# kept candidates
discarded_max_score = table[it].where(discarded_now_mask).max()
kept_min_score = table[it].where(kept_mask).min()
assert discarded_max_score < kept_min_score
# We now make sure that the best candidate is chosen only from the last
# iteration.
# We also make sure this is true even if there were higher scores in
# earlier rounds (this isn't generally the case, but worth ensuring it's
# possible).
last_iter = cv_results_df["iter"].max()
idx_best_last_iter = cv_results_df[cv_results_df["iter"] == last_iter][
"mean_test_score"
].idxmax()
idx_best_all_iters = cv_results_df["mean_test_score"].idxmax()
assert sh.best_params_ == cv_results_df.iloc[idx_best_last_iter]["params"]
assert (
cv_results_df.iloc[idx_best_last_iter]["mean_test_score"]
< cv_results_df.iloc[idx_best_all_iters]["mean_test_score"]
)
assert (
cv_results_df.iloc[idx_best_last_iter]["params"]
!= cv_results_df.iloc[idx_best_all_iters]["params"]
)
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
def test_base_estimator_inputs(Est):
# make sure that the base estimators are passed the correct parameters and
# number of samples at each iteration.
pd = pytest.importorskip("pandas")
passed_n_samples_fit = []
passed_n_samples_predict = []
passed_params = []
class FastClassifierBookKeeping(FastClassifier):
def fit(self, X, y):
passed_n_samples_fit.append(X.shape[0])
return super().fit(X, y)
def predict(self, X):
passed_n_samples_predict.append(X.shape[0])
return super().predict(X)
def set_params(self, **params):
passed_params.append(params)
return super().set_params(**params)
n_samples = 1024
n_splits = 2
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": ("l1", "l2"), "b": list(range(30))}
base_estimator = FastClassifierBookKeeping()
sh = Est(
base_estimator,
param_grid,
factor=2,
cv=n_splits,
return_train_score=False,
refit=False,
)
if Est is HalvingRandomSearchCV:
# same number of candidates as with the grid
sh.set_params(n_candidates=2 * 30, min_resources="exhaust")
sh.fit(X, y)
assert len(passed_n_samples_fit) == len(passed_n_samples_predict)
passed_n_samples = [
x + y for (x, y) in zip(passed_n_samples_fit, passed_n_samples_predict)
]
# Lists are of length n_splits * n_iter * n_candidates_at_i.
# Each chunk of size n_splits corresponds to the n_splits folds for the
# same candidate at the same iteration, so they contain equal values. We
# subsample such that the lists are of length n_iter * n_candidates_at_it
passed_n_samples = passed_n_samples[::n_splits]
passed_params = passed_params[::n_splits]
cv_results_df = pd.DataFrame(sh.cv_results_)
assert len(passed_params) == len(passed_n_samples) == len(cv_results_df)
uniques, counts = np.unique(passed_n_samples, return_counts=True)
assert (sh.n_resources_ == uniques).all()
assert (sh.n_candidates_ == counts).all()
assert (cv_results_df["params"] == passed_params).all()
assert (cv_results_df["n_resources"] == passed_n_samples).all()
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
def test_groups_support(Est):
# Check if ValueError (when groups is None) propagates to
# HalvingGridSearchCV and HalvingRandomSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=50, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 50)
clf = LinearSVC(random_state=0)
grid = {"C": [1]}
group_cvs = [
LeaveOneGroupOut(),
LeavePGroupsOut(2),
GroupKFold(n_splits=3),
GroupShuffleSplit(random_state=0),
]
error_msg = "The 'groups' parameter should not be None."
for cv in group_cvs:
gs = Est(clf, grid, cv=cv)
with pytest.raises(ValueError, match=error_msg):
gs.fit(X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit(random_state=0)]
for cv in non_group_cvs:
gs = Est(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
@pytest.mark.parametrize("SearchCV", [HalvingRandomSearchCV, HalvingGridSearchCV])
def test_min_resources_null(SearchCV):
"""Check that we raise an error if the minimum resources is set to 0."""
base_estimator = FastClassifier()
param_grid = {"a": [1]}
X = np.empty(0).reshape(0, 3)
search = SearchCV(base_estimator, param_grid, min_resources="smallest")
err_msg = "min_resources_=0: you might have passed an empty dataset X."
with pytest.raises(ValueError, match=err_msg):
search.fit(X, [])
@pytest.mark.parametrize("SearchCV", [HalvingGridSearchCV, HalvingRandomSearchCV])
def test_select_best_index(SearchCV):
"""Check the selection strategy of the halving search."""
results = { # this isn't a 'real world' result dict
"iter": np.array([0, 0, 0, 0, 1, 1, 2, 2, 2]),
"mean_test_score": np.array([4, 3, 5, 1, 11, 10, 5, 6, 9]),
"params": np.array(["a", "b", "c", "d", "e", "f", "g", "h", "i"]),
}
# we expect the index of 'i'
best_index = SearchCV._select_best_index(None, None, results)
assert best_index == 8
| 35.587819 | 88 | 0.648358 | [
"BSD-3-Clause"
] | 3021104750/scikit-learn | sklearn/model_selection/tests/test_successive_halving.py | 25,125 | Python |
# -*- coding: utf-8 -*-
# Copyright 2018 Spanish National Research Council (CSIC)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Given two dates and region, download N Sentinel Collections scenes from ESA
Sentinel dataHUB.
The downloaded Sentinel collection scenes are compatible with:
S2MSI1C: Top-of-atmosphere reflectances in cartographic geometry
or S2MSI2A: Bottom-of-atmosphere reflectance in cartographic geometry
Parameters
----------
inidate: datetime.strptime("YYYY-MM-dd", "%Y-%m-%d")
enddate: datetime.strptime("YYYY-MM-dd", "%Y-%m-%d")
region: name of one reservoir saved in the "coord_reservoirs.json" file
coordinates : dict. Coordinates of the region to search.
Example: {"W": -2.830, "S": 41.820, "E": -2.690, "N": 41.910}}
platform : str. Satellite to use from the Sentinel family
producttype : str. Dataset type.
cloud: int
path : path
Author: Daniel García Díaz
Email: [email protected]
Institute of Physics of Cantabria (IFCA)
Advanced Computing and e-Science
Date: Sep 2018
"""
#imports apis
import requests
import os
# Subfunctions
from wq_sat.utils import config
class download_sentinel:
def __init__(self, inidate, enddate, region, coordinates=None, platform='Sentinel-2', producttype="S2MSI1C", cloud=100,
output_path=None):
self.session = requests.Session()
#Search parameters
self.inidate = inidate
self.enddate = enddate
self.coord = coordinates
self.producttype = producttype
self.platform = platform
self.region = region
self.cloud = int(cloud)
#work path
if output_path:
self.output_path = os.path.join(output_path, self.region)
else:
path = config.get_data_path()
self.output_path = os.path.join(path, self.region)
if not os.path.isdir(self.output_path):
os.mkdir(self.output_path)
#ESA APIs
self.api_url = 'https://scihub.copernicus.eu/apihub/'
self.credentials = config.load_credentials()['sentinel']
def search(self, omit_corners=True):
# Post the query to Copernicus
query = {'footprint': '"Intersects(POLYGON(({0} {1},{2} {1},{2} {3},{0} {3},{0} {1})))"'.format(self.coord['W'],
self.coord['S'],
self.coord['E'],
self.coord['N']),
'producttype': self.producttype,
'platformname': self.platform,
'beginposition': '[{} TO {}]'.format(self.inidate, self.enddate),
'cloudcoverpercentage': '[0 TO {}]'.format(self.cloud)
}
data = {'format': 'json',
'start': 0, # offset
'rows': 100,
'limit': 100,
'orderby': '',
'q': ' '.join(['{}:{}'.format(k, v) for k, v in query.items()])
}
response = self.session.post(self.api_url + 'search?',
data=data,
auth=(self.credentials['user'], self.credentials['password']),
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'})
response.raise_for_status()
# Parse the response
json_feed = response.json()['feed']
if 'entry' in json_feed.keys():
results = json_feed['entry']
if isinstance(results, dict): # if the query returns only one product, products will be a dict not a list
results = [results]
else:
results = []
# Remove results that are mainly corners
def keep(r):
for item in r['str']:
if item['name'] == 'size':
units = item['content'].split(' ')[1]
mult = {'KB': 1, 'MB': 1e3, 'GB': 1e6}[units]
size = float(item['content'].split(' ')[0]) * mult
break
if size > 0.5e6: # 500MB
return True
else:
return False
results[:] = [r for r in results if keep(r)]
print('Found {} results from Sentinel'.format(json_feed['opensearch:totalResults']))
print('Retrieving {} results'.format(len(results)))
return results
def download(self):
#results of the search
results = self.search()
if not isinstance(results, list):
results = [results]
downloaded_data = [f for f in os.listdir(self.output_path) if os.path.isfile(os.path.join(self.output_path, f))]
s2_tiles = []
for r in results:
url, tile_id = r['link'][0]['href'], r['title']
save_dir = os.path.join(self.output_path, '{}.zip'.format(tile_id))
if '{}.tif'.format(tile_id) in downloaded_data:
print ('File {} already downloaded'.format(tile_id))
continue
print('Downloading {} ...'.format(tile_id))
s2_tiles.append(tile_id)
response = self.session.get(url, stream=True, allow_redirects=True, auth=(self.credentials['user'],
self.credentials['password']))
with open(save_dir, 'wb') as f:
f.write(response.content)
return s2_tiles | 37.969512 | 123 | 0.546009 | [
"Apache-2.0"
] | garciadd/wq_sat | wq_sat/satellites/sentinel_download.py | 6,229 | Python |
from autosar.writer.writer_base import ElementWriter
import autosar.constant
class XMLConstantWriter(ElementWriter):
def __init__(self,version, patch):
super().__init__(version, patch)
def getSupportedXML(self):
return ['Constant']
def getSupportedCode(self):
return []
def writeElementXML(self, elem):
if type(elem).__name__ == 'Constant':
return self.writeConstantXML(elem)
else:
return None
def writeElementCode(self, elem, localvars):
raise NotImplementedError('writeElementCode')
def writeConstantXML(self,elem):
lines = []
assert(isinstance(elem,autosar.constant.Constant))
lines.append('<CONSTANT-SPECIFICATION>')
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%elem.name,1))
if elem.adminData is not None:
lines.extend(self.indent(self.writeAdminDataXML(elem.adminData),1))
if self.version>=4.0:
lines.extend(self.indent(self._writeValueXMLV4(elem.value),1))
else:
lines.extend(self.indent(self._writeValueXMLV3(elem.value),1))
lines.append('</CONSTANT-SPECIFICATION>')
return lines
def _writeValueXMLV3(self,elem):
lines=[]
lines.append('<VALUE>')
lines.extend(self.indent(self._writeLiteralValueXML(elem),1))
lines.append('</VALUE>')
return lines
def _writeLiteralValueXML(self,elem):
if isinstance(elem,autosar.constant.IntegerValue):
return self._writeIntegerLiteralXML(elem)
elif isinstance(elem,autosar.constant.RecordValue):
return self._writeRecordSpecificationXML(elem)
elif isinstance(elem,autosar.constant.StringValue):
return self._writeStringLiteralXML(elem)
elif isinstance(elem,autosar.constant.BooleanValue):
return self._writeBooleanLiteralXML(elem)
elif isinstance(elem,autosar.constant.ArrayValue):
return self._writeArraySpecificationXML(elem)
else:
raise NotImplementedError(type(elem))
def _writeIntegerLiteralXML(self,elem):
assert(isinstance(elem,autosar.constant.IntegerValue))
lines=[]
lines.append('<INTEGER-LITERAL>')
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%elem.name,1))
tag = elem.rootWS().find(elem.typeRef).tag(self.version)
lines.append(self.indent('<TYPE-TREF DEST="%s">%s</TYPE-TREF>'%(tag,elem.typeRef),1))
lines.append(self.indent('<VALUE>%d</VALUE>'%elem.value,1))
lines.append('</INTEGER-LITERAL>')
return lines
def _writeRecordSpecificationXML(self,elem):
assert(isinstance(elem,autosar.constant.RecordValue))
lines=[]
lines.append('<RECORD-SPECIFICATION>')
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%elem.name,1))
tag = elem.rootWS().find(elem.typeRef).tag(self.version)
lines.append(self.indent('<TYPE-TREF DEST="%s">%s</TYPE-TREF>'%(tag,elem.typeRef),1))
if len(elem.elements)==0: lines.append('<ELEMENTS/>')
else:
lines.append(self.indent('<ELEMENTS>',1))
for childElem in elem.elements:
lines.extend(self.indent(self._writeLiteralValueXML(childElem),2))
lines.append(self.indent('</ELEMENTS>',1))
lines.append('</RECORD-SPECIFICATION>')
return lines
def _writeStringLiteralXML(self,elem):
assert(isinstance(elem,autosar.constant.StringValue))
lines=[]
lines.append('<STRING-LITERAL>')
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%elem.name,1))
tag = elem.rootWS().find(elem.typeRef).tag(self.version)
lines.append(self.indent('<TYPE-TREF DEST="%s">%s</TYPE-TREF>'%(tag,elem.typeRef),1))
lines.append(self.indent('<VALUE>%s</VALUE>'%elem.value,1))
lines.append('</STRING-LITERAL>')
return lines
def _writeBooleanLiteralXML(self,elem):
assert(isinstance(elem,autosar.constant.BooleanValue))
lines=[]
lines.append('<BOOLEAN-LITERAL>')
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%elem.name,1))
tag = elem.rootWS().find(elem.typeRef).tag(self.version)
lines.append(self.indent('<TYPE-TREF DEST="%s">%s</TYPE-TREF>'%(tag,elem.typeRef),1))
lines.append(self.indent('<VALUE>%s</VALUE>'%('true' if elem.value is True else 'false'),1))
lines.append('</BOOLEAN-LITERAL>')
return lines
def _writeArraySpecificationXML(self,elem):
assert(isinstance(elem,autosar.constant.ArrayValue))
lines=[]
lines.append('<ARRAY-SPECIFICATION>')
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%elem.name,1))
tag = elem.rootWS().find(elem.typeRef).tag(self.version)
lines.append(self.indent('<TYPE-TREF DEST="%s">%s</TYPE-TREF>'%(tag,elem.typeRef),1))
if len(elem.elements)==0: lines.append('<ELEMENTS/>')
else:
lines.append(self.indent('<ELEMENTS>',1))
for childElem in elem.elements:
lines.extend(self.indent(self._writeLiteralValueXML(childElem),2))
lines.append(self.indent('</ELEMENTS>',1))
lines.append('</ARRAY-SPECIFICATION>')
return lines
def _writeValueXMLV4(self, value):
lines=[]
lines.append('<VALUE-SPEC>')
lines.extend(self.indent(self.writeValueSpecificationXML(value),1))
lines.append('</VALUE-SPEC>')
return lines
class CodeConstantWriter(ElementWriter):
def __init__(self,version, patch):
super().__init__(version, patch)
def getSupportedXML(self):
return []
def getSupportedCode(self):
return ['Constant']
def writeElementXML(self, elem):
raise NotImplementedError('writeElementXML')
def writeElementCode(self, elem, localvars):
if type(elem).__name__ == 'Constant':
return self.writeConstantCode(elem, localvars)
else:
return None
def writeConstantCode(self, constant, localvars):
lines=[]
ws=localvars['ws']
if not isinstance(constant, autosar.constant.Constant):
raise ValueError('expected type autosar.constant.Constant')
if constant.value is not None:
dataType = ws.find(constant.value.typeRef, role='DataType')
constructor=None
if dataType is None:
raise ValueError('invalid reference: '+constant.value.typeRef)
if isinstance(constant.value, autosar.constant.ArrayValue):
initValue = self._writeArrayValueConstantCode(constant.value, localvars)
elif isinstance(constant.value, autosar.constant.IntegerValue):
initValue = self._writeIntegerValueConstantCode(constant.value, localvars)
elif isinstance(constant.value, autosar.constant.StringValue):
initValue = self._writeStringValueConstantCode(constant.value, localvars)
elif isinstance(constant.value, autosar.constant.BooleanValue):
initValue = self._writeBooleanValueConstantCode(constant.value, localvars)
elif isinstance(constant.value, autosar.constant.RecordValue):
initValue = self._writeRecordValueConstantCode(constant.value, localvars)
else:
raise ValueError('unknown value type: '+type(constant.value))
params=[repr(constant.name)]
if ws.roles['DataType'] is not None:
params.append(repr(dataType.name)) #use name only
else:
params.append(repr(dataType.ref)) #use full reference
if initValue is not None:
if isinstance(initValue, list):
lines.extend(self.writeDictCode('initValue', initValue))
params.append('initValue')
else:
params.append(initValue)
else:
print(constant.name)
if constant.adminData is not None:
param = self.writeAdminDataCode(constant.adminData, localvars)
assert(len(param)>0)
params.append('adminData='+param)
lines.append("package.createConstant(%s)"%(', '.join(params)))
return lines
def _writeArrayValueConstantCode(self, value, localvars):
ws=localvars['ws']
assert(isinstance(value, autosar.constant.ArrayValue))
params=[]
for elem in value.elements:
if isinstance(elem, autosar.constant.ArrayValue):
initValue = self._writeArrayValueConstantCode(elem, localvars)
elif isinstance(elem, autosar.constant.IntegerValue):
initValue = self._writeIntegerValueConstantCode(elem, localvars)
elif isinstance(elem, autosar.constant.StringValue):
initValue = self._writeStringValueConstantCode(elem, localvars)
elif isinstance(elem, autosar.constant.BooleanValue):
initValue = self._writeBooleanValueConstantCode(elem, localvars)
elif isinstance(elem, autosar.constant.RecordValue):
initValue = self._writeRecordValueConstantCode(elem, localvars)
if isinstance(initValue, list): initValue="{%s}"%(', '.join(initValue)) #join any inner record init values
else:
raise ValueError('unknown value type: '+type(constant.value))
params.append(initValue)
if len(params)>0:
return "[%s]"%(', '.join(params))
return None
def _writeIntegerValueConstantCode(self, value, localvars):
return str(value.value)
def _writeStringValueConstantCode(self, value, localvars):
return repr(value.value)
def _writeBooleanValueConstantCode(self, value, localvars):
return str(value.value)
def _writeRecordValueConstantCode(self, value, localvars):
ws=localvars['ws']
assert(isinstance(value, autosar.constant.RecordValue))
params=[]
for elem in value.elements:
if isinstance(elem, autosar.constant.ArrayValue):
initValue = self._writeArrayValueConstantCode(elem, localvars)
elif isinstance(elem, autosar.constant.IntegerValue):
initValue = self._writeIntegerValueConstantCode(elem, localvars)
elif isinstance(elem, autosar.constant.StringValue):
initValue = self._writeStringValueConstantCode(elem, localvars)
elif isinstance(elem, autosar.constant.BooleanValue):
initValue = self._writeBooleanValueConstantCode(elem, localvars)
elif isinstance(elem, autosar.constant.RecordValue):
initValue = self._writeRecordValueConstantCode(elem, localvars)
if isinstance(initValue, list): initValue="{%s}"%(', '.join(initValue)) #join any inner record init values
else:
raise ValueError('unknown value type: '+type(constant.value))
params.append('"%s": %s'%(elem.name, initValue))
if len(params)>0:
text = "{%s}"%(', '.join(params))
if len(text)>200: #line will be way too long
return params
else:
return text
return None | 44.522088 | 119 | 0.650731 | [
"MIT"
] | Amineahd/autosar | autosar/writer/constant_writer.py | 11,086 | Python |
def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'Reserved'},
{'abbr': 'sfc', 'code': 1, 'title': 'Ground or water surface'},
{'abbr': 2, 'code': 2, 'title': 'Cloud base level'},
{'abbr': 3, 'code': 3, 'title': 'Level of cloud tops'},
{'abbr': 4, 'code': 4, 'title': 'Level of 0 degree C isotherm'},
{'abbr': 5,
'code': 5,
'title': 'Level of adiabatic condensation lifted from the surface'},
{'abbr': 6, 'code': 6, 'title': 'Maximum wind level'},
{'abbr': 7, 'code': 7, 'title': 'Tropopause'},
{'abbr': 'sfc', 'code': 8, 'title': 'Nominal top of the atmosphere'},
{'abbr': 9, 'code': 9, 'title': 'Sea bottom'},
{'abbr': 10, 'code': 10, 'title': 'Entire atmosphere'},
{'abbr': 11, 'code': 11, 'title': 'Cumulonimbus (CB) base', 'units': 'm'},
{'abbr': 12, 'code': 12, 'title': 'Cumulonimbus (CB) top', 'units': 'm'},
{'abbr': 20, 'code': 20, 'title': 'Isothermal level', 'units': 'K'},
{'abbr': 'pl', 'code': 100, 'title': 'Isobaric surface', 'units': 'Pa'},
{'abbr': 'sfc', 'code': 101, 'title': 'Mean sea level'},
{'abbr': 102,
'code': 102,
'title': 'Specific altitude above mean sea level',
'units': 'm'},
{'abbr': 'sfc',
'code': 103,
'title': 'Specified height level above ground',
'units': 'm'},
{'abbr': 104, 'code': 104, 'title': 'Sigma level', 'units': 'sigma value'},
{'abbr': 'ml', 'code': 105, 'title': 'Hybrid level'},
{'abbr': 'sfc',
'code': 106,
'title': 'Depth below land surface',
'units': 'm'},
{'abbr': 'pt', 'code': 107, 'title': 'Isentropic (theta) level', 'units': 'K'},
{'abbr': 108,
'code': 108,
'title': 'Level at specified pressure difference from ground to level',
'units': 'Pa'},
{'abbr': 'pv',
'code': 109,
'title': 'Potential vorticity surface',
'units': 'K m2 kg-1 s-1'},
{'abbr': 110, 'code': 110, 'title': 'Reserved'},
{'abbr': 111, 'code': 111, 'title': 'Eta level'},
{'abbr': 112, 'code': 112, 'title': 'Reserved'},
{'abbr': 113, 'code': 113, 'title': 'Logarithmic hybrid coordinate'},
{'abbr': 117, 'code': 117, 'title': 'Mixed layer depth', 'units': 'm'},
{'abbr': 'hhl', 'code': 118, 'title': 'Hybrid height level'},
{'abbr': 'hpl', 'code': 119, 'title': 'Hybrid pressure level'},
{'abbr': 150, 'code': 150, 'title': 'Generalized vertical height coordinate'},
{'abbr': 160, 'code': 160, 'title': 'Depth below sea level m'},
{'abbr': 161, 'code': 161, 'title': 'Depth below water surface', 'units': 'm'},
{'abbr': 162, 'code': 162, 'title': 'Lake or river bottom'},
{'abbr': 163, 'code': 163, 'title': 'Bottom of sediment layer'},
{'abbr': 164,
'code': 164,
'title': 'Bottom of thermally active sediment layer'},
{'abbr': 165,
'code': 165,
'title': 'Bottom of sediment layer penetrated by thermal wave'},
{'abbr': 166, 'code': 166, 'title': 'Mixing layer'},
{'abbr': None, 'code': 255, 'title': 'Missing'})
| 55.412698 | 91 | 0.464337 | [
"Apache-2.0"
] | ecmwf/pyeccodes | pyeccodes/defs/grib2/tables/8/4_5_table.py | 3,491 | Python |
# INIT
data = []
numeric = []
normal = []
keyL = []
KeyL = []
key = input("Enter Key Value: ")
# File - Load Function
def load(file):
handle = open(file)
return handle.read()
# Text Format
def form(file):
format = load(file)
format = format.replace(' ', '')
format = format.replace(',', '')
format = format.replace('-', '')
format = format.replace('–', '')
format = format.replace('—', '')
format = format.replace('.', '')
format = format.replace(';', '')
format = format.replace('\n', '')
return format
#ADDS TO LIST
for letter in format:
data.append(letter)
#REMOVE NUM
for letter in data:
global numbers
numbers = ""
if not letter.isdigit():
normal.append(letter)
else:
numeric.append(letter)
numbers = ''.join(numeric)
return format, numbers
#Mod Inv
def modInverse(a, m):
a = a % m
for x in range(1, m):
if ((a * x) % m == 1):
return x
return 1
#Calc dif
def dif(a,b):
if a > b:
return a - b
if a < b:
return b - a
else:
return 0
#Key Creator
def getKey(key):
lenKey = len(key)
lenPtext = len(normal)
difP = lenPtext/lenKey #Calc diff of Plain text
if difP % 1 == 0:
KEY = ""
difP = difP
KEY = key*difP
keyL.append(KEY)
else:
KEY = ""
difP = int(difP)+1
KEY = key*difP
keyL.append(KEY)
for word in keyL:
for letter in word:
KeyL.append(letter)
i = 0
for i in range(2):
print(i)
print("test")
form('project2plaintext.txt.txt')
#print(len(normal))
#print(numbers)
#print(dif(len(getKey(key)),len(normal)))
getKey(key)
print(KeyL)
| 19.084211 | 53 | 0.519581 | [
"MIT"
] | vandewinckel/Python | crypto/vigenere/crypto.py | 1,817 | Python |
# -*- coding: utf-8 -*-
from django.apps import AppConfig
class SchoolConfig(AppConfig):
name = "school"
| 15.857143 | 33 | 0.675676 | [
"MIT"
] | ajaykrprajapati/django_orm | django_orm/school/apps.py | 111 | Python |
# Math Module Part 2
import math
# Factorial & Square Root
print(math.factorial(3))
print(math.sqrt(64))
# Greatest Common Denominator GCD
print(math.gcd(52, 8))
print(math.gcd(8, 52))
print(8/52)
print(2/13)
# Degrees and Radians
print(math.radians(360))
print(math.degrees(math.pi * 2))
| 17.222222 | 34 | 0.680645 | [
"MIT"
] | JeffreyAsuncion/LearningPythonStdLib | Chapter02/02_02.py | 310 | Python |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='wafer project using mlops',
author='monika',
license='MIT',
)
| 18.727273 | 44 | 0.660194 | [
"MIT"
] | monika2910/wafer_main | setup.py | 206 | Python |
# Petit exercice utilisant la bibliothèque graphique tkinter
from tkinter import *
from random import randrange
# --- définition des fonctions gestionnaires d'événements : ---
def drawline():
"Tracé d'une ligne dans le canevas can1"
global x1, y1, x2, y2, coul
can1.create_line(x1,y1,x2,y2,width=2,fill=coul)
# modification des coordonnées pour la ligne suivante :
y2, y1 = y2+10, y1-10
def changecolor():
"Changement aléatoire de la couleur du tracé"
global coul
pal = ['purple', 'cyan', 'maroon', 'green', 'red', 'blue', 'orange', 'yellow']
c = randrange(8) # => génère un nombre aléatoire de 0 à 7
coul = pal[c]
# ------ Programme principal -------
# les variables suivantes seront utilisées de manière globale :
x1, y1, x2, y2 = 10, 190, 190, 10 # coordonnées de la ligne
coul = 'dark green' # couleur de la ligne
# Création du widget principal ("maître") :
fen1 = Tk()
# création des widgets "esclaves" :
can1 = Canvas(fen1, bg='dark grey', height=200, width=200)
can1.pack(side=LEFT)
bou1 = Button(fen1, text='Quitter', command=fen1.quit)
bou1.pack(side=BOTTOM)
bou2 = Button(fen1, text='Tracer une ligne', command=drawline)
bou2.pack()
bou3 = Button(fen1, text='Autre couleur', command=changecolor)
bou3.pack()
fen1.mainloop() # démarrage du réceptionnaire d’événements
fen1.destroy() # destruction (fermeture) de la fenêtre | 42.666667 | 83 | 0.682528 | [
"Apache-2.0"
] | geocot/coursPython | Exemples cours 4/TK_Line.py | 1,433 | Python |
# -*- coding:utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
SECRET_KEY = 'psst'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'hfut_auth'
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'hfut_auth.backends.HFUTBackend'
)
# default
HFUT_AUTH_CAMPUS = 'ALL'
| 19.965517 | 82 | 0.606218 | [
"MIT"
] | er1iang/django-hfut | test_settings.py | 579 | Python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import os
from tabulate import tabulate # type: ignore
import onnx
from onnx import defs, helper
_all_schemas = defs.get_all_schemas()
class AttrCoverage(object):
def __init__(self):
self.name = None
self.values = set()
def add(self, attr):
assert self.name in [None, attr.name]
self.name = attr.name
value = helper.get_attribute_value(attr)
# Turn list into tuple so we can put it into set
# As value can be string, don't blindly turn `collections.Iterable`
# into tuple.
if isinstance(value, list):
value = tuple(value)
self.values.add(str(value))
class NodeCoverage(object):
def __init__(self):
self.op_type = None
self.attr_coverages = defaultdict(AttrCoverage)
def add(self, node):
assert self.op_type in [None, node.op_type]
if self.op_type is None:
self.op_type = node.op_type
self.schema = defs.get_schema(self.op_type)
for attr in node.attribute:
self.attr_coverages[attr.name].add(attr)
class Coverage(object):
def __init__(self):
self.buckets = {
'loaded': defaultdict(NodeCoverage),
'passed': defaultdict(NodeCoverage),
}
def add_node(self, node, bucket):
self.buckets[bucket][node.op_type].add(node)
def add_graph(self, graph, bucket):
for node in graph.node:
self.add_node(node, bucket)
def add_model(self, model, bucket):
self.add_graph(model.graph, bucket)
def add_proto(self, proto, bucket):
assert isinstance(proto, onnx.ModelProto)
self.add_model(proto, bucket)
def report_text(self, writer):
writer.write('---------- onnx coverage: ----------\n')
writer.write('Operators (passed/loaded/total): {}/{}/{}\n'.format(
len(self.buckets['passed']),
len(self.buckets['loaded']),
len(_all_schemas)))
writer.write('------------------------------------\n')
rows = []
for op_cov in self.buckets['passed'].values():
covered_attrs = [
'{}: {}'.format(attr_cov.name, len(attr_cov.values))
for attr_cov in op_cov.attr_coverages.values()]
uncovered_attrs = [
'{}: 0'.format(attr)
for attr in op_cov.schema.attributes
if attr not in op_cov.attr_coverages
]
attrs = sorted(covered_attrs) + sorted(uncovered_attrs)
if attrs:
attrs_column = os.linesep.join(attrs)
else:
attrs_column = 'No attributes'
rows.append([op_cov.op_type, attrs_column])
writer.write(tabulate(
rows,
headers=['Operator', 'Attributes\n(name: #values)'],
tablefmt='plain'))
| 30.989899 | 75 | 0.592243 | [
"MIT"
] | KeDengMS/onnx | onnx/backend/test/report/coverage.py | 3,068 | Python |
# -*- coding: utf-8 -*-
"""poc.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fTzz1aT2sb8oAXRO1-dr6O_IR6dof36e
A simple example for deep-learning-based non-rigid image registration
with the MNIST dataset.
**README:** If the below error occurs, run the whole notebook again (Ctrl+F9).
```
ValueError: tf.function-decorated function tried to create variables on non-first call.
```
"""
import tensorflow as tf
import tensorflow.keras.layers as layers
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
print(tf.keras.backend.image_data_format())
"""Loss functions"""
@tf.function
def mse_loss(static, moving):
"""Computes the mean squared error (MSE) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Mean squared error between the static and the moving images,
averaged over the batch.
"""
loss = tf.reduce_mean(tf.square(moving - static)) # shape ()
return loss
@tf.function
def ncc_loss(static, moving):
"""Computes the normalized cross-correlation (NCC) loss.
Currently, only 4-D inputs are supported.
Parameters
----------
static : tf.Tensor, shape (N, H, W, C)
The static image to which the moving image is aligned.
moving : tf.Tensor, shape (N, H, W, C)
The moving image, the same shape as the static image.
Returns
-------
loss : tf.Tensor, shape ()
Normalized cross-correlation loss between the static and the
moving images, averaged over the batch. Range is [-1.0, 1.0].
The best value is -1 (perfect match) and the worst is 1.
References
----------
.. [1] `Wikipedia entry for the Cross-correlation
<https://en.wikipedia.org/wiki/Cross-correlation>`_
"""
eps = tf.constant(1e-9, 'float32')
static_mean = tf.reduce_mean(static, axis=[1, 2], keepdims=True)
moving_mean = tf.reduce_mean(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_std = tf.math.reduce_std(static, axis=[1, 2], keepdims=True)
moving_std = tf.math.reduce_std(moving, axis=[1, 2], keepdims=True)
# shape (N, 1, 1, C)
static_hat = (static - static_mean)/(static_std + eps)
moving_hat = (moving - moving_mean)/(moving_std + eps)
# shape (N, H, W, C)
ncc = tf.reduce_mean(static_hat * moving_hat) # shape ()
loss = -ncc
return loss
"""Define the model """
def simple_cnn(input_shape=(32, 32, 2)):
"""Creates a 2-D convolutional encoder-decoder network.
Parameters
----------
input_shape : sequence of ints, optional
Input data shape of the form (H, W, C). Default is (32, 32, 2).
Returns
-------
model
An instance of Keras' Model class.
Notes
-----
Given a concatenated pair of static and moving images as input, the
CNN computes a dense displacement field that is used to warp the
moving image to match with the static image.
The number of channels in the output (displacement field) is equal
to the dimensionality of the input data. For 3-D volumes, it is 3,
and for 2-D images, it is 2. The first channel comprises
displacement in the x-direction and the second comprises
displacement in the y-direction.
"""
out_channels = 2
inputs = layers.Input(shape=input_shape)
# encoder
x = layers.Conv2D(32, kernel_size=3, strides=2, padding='same',
activation='relu')(inputs) # 32 --> 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.MaxPool2D(pool_size=2)(x) # 16 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.MaxPool2D(pool_size=2)(x) # 8 --> 4
x = layers.Conv2D(128, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 4
x = layers.BatchNormalization()(x) # 4
# decoder
x = layers.Conv2DTranspose(64, kernel_size=2, strides=2,
padding='same')(x) # 4 --> 8
x = layers.Conv2D(64, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 8
x = layers.BatchNormalization()(x) # 8
x = layers.Conv2DTranspose(32, kernel_size=2, strides=2,
padding='same')(x) # 8 --> 16
x = layers.Conv2D(32, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 16
x = layers.BatchNormalization()(x) # 16
x = layers.Conv2DTranspose(16, kernel_size=2, strides=2,
padding='same')(x) # 16 --> 32
x = layers.Conv2D(16, kernel_size=3, strides=1, padding='same',
activation='relu')(x) # 32
x = layers.BatchNormalization()(x) # 32
x = layers.Conv2D(out_channels, kernel_size=1, strides=1,
padding='same')(x) # 32
# Create the model.
model = tf.keras.Model(inputs, x, name='simple_cnn')
return model
"""
Differntiable image sampling
References:
1. https://github.com/tensorflow/models/blob/master/research/transformer/spatial_transformer.py
2. Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. https://arxiv.org/pdf/1506.02025.pdf
3. *Spatial* Transformer Networks by Kushagra Bhatnagar https://link.medium.com/0b2OrmqVO5
"""
@tf.function
def grid_sample(moving, grid):
"""Given a moving image and a sampling grid as input, computes the
transformed image by sampling the moving image at locations given by
the grid.
Currently, only 2-D images, i.e., 4-D inputs are supported.
Parameters
----------
moving : tf.Tensor, shape (N, H, W, C)
The moving image.
grid : tf.Tensor, shape (N, H, W, C)
A tensor of sampling points (x, y). The x and y values should be
normalized to [-1.0, 1.0] range.
Returns
-------
moved : tf.Tensor, shape (N, H, W, C)
The transformed image.
Notes
-----
Let M be the moving image of shape (H, W, C), T be the transformed
image of the same shape and G be the 2-D sampling grid of shape
(H, W, 2). The value of T at a location (x, y) is T[y, x, :] =
M[y', x', :] where [x', y'] = G[y, x, :].
Further, [x', y'] = [x + dx, y + dy] where [dx, dy] are the
displacements outputted by the CNN. When dx and dy are 0, the
sampling grid G is a regular grid and the transformed image is the
same as the moving image.
Since the sampling point (x + dx, y + dy) can be non-integral, the
value M[y', x'] is calculated using bi-linear interpolation.
References
----------
.. [1] `Jaderberg, Max, Karen Simonyan, and Andrew Zisserman. "Spatial
transformer networks." Advances in neural information processing
systems. 2015. <https://arxiv.org/abs/1506.02025>`_
.. [2] `TensorFlow implementation of spatial transformer networks.
<https://github.com/tensorflow/models/tree/master/research/transformer>`_
.. [3] `Spatial Transformer Networks by Kushagra Bhatnagar
<https://link.medium.com/0b2OrmqVO5>`_
"""
nb, nh, nw, nc = moving.shape
x = grid[..., 0] # shape (N, H, W)
y = grid[..., 1]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
# Scale x and y from [-1.0, 1.0] to [0, W] and [0, H] respectively.
x = (x + 1.0) * 0.5 * tf.cast(nw, 'float32')
y = (y + 1.0) * 0.5 * tf.cast(nh, 'float32')
y_max = tf.cast(nh - 1, 'int32')
x_max = tf.cast(nw - 1, 'int32')
zero = tf.constant(0, 'int32')
# The value at (x, y) is a weighted average of the values at the
# four nearest integer locations: (x0, y0), (x1, y0), (x0, y1) and
# (x1, y1) where x0 = floor(x), x1 = ceil(x).
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
# Make sure indices are within the boundaries of the image.
x0 = tf.clip_by_value(x0, zero, x_max)
x1 = tf.clip_by_value(x1, zero, x_max)
y0 = tf.clip_by_value(y0, zero, y_max)
y1 = tf.clip_by_value(y1, zero, y_max)
# Collect indices of the four corners.
b = tf.ones_like(x0) * tf.reshape(tf.range(nb), [nb, 1, 1])
idx_a = tf.stack([b, y0, x0], axis=-1) # all top-left corners
idx_b = tf.stack([b, y1, x0], axis=-1) # all bottom-left corners
idx_c = tf.stack([b, y0, x1], axis=-1) # all top-right corners
idx_d = tf.stack([b, y1, x1], axis=-1) # all bottom-right corners
# shape (N, H, W, 3)
# Collect values at the corners.
moving_a = tf.gather_nd(moving, idx_a) # all top-left values
moving_b = tf.gather_nd(moving, idx_b) # all bottom-left values
moving_c = tf.gather_nd(moving, idx_c) # all top-right values
moving_d = tf.gather_nd(moving, idx_d) # all bottom-right values
# shape (N, H, W, C)
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
# Calculate the weights.
wa = tf.expand_dims((x1_f - x) * (y1_f - y), axis=-1)
wb = tf.expand_dims((x1_f - x) * (y - y0_f), axis=-1)
wc = tf.expand_dims((x - x0_f) * (y1_f - y), axis=-1)
wd = tf.expand_dims((x - x0_f) * (y - y0_f), axis=-1)
# Calculate the weighted sum.
moved = tf.add_n([wa * moving_a, wb * moving_b, wc * moving_c,
wd * moving_d])
return moved
@tf.function
def regular_grid(shape):
"""Returns a batch of 2-D regular grids.
Currently, only 2-D regular grids are supported.
Parameters
----------
shape : sequence of ints, shape (3, )
The desired regular grid shape of the form (N, H, W).
Returns
-------
grid : tf.Tensor, shape (N, H, W, 2)
A batch of 2-D regular grids, values normalized to [-1.0, 1.0]
range.
Notes
-----
Sampling using the regular grid is an identity transformation, i.e.,
it results in the same input and output images.
References
----------
.. [1] `NumPy, "numpy.meshgrid"
<https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html>`_
.. [2] `NumPy, "numpy.indices"
<https://numpy.org/doc/stable/reference/generated/numpy.indices.html>`_
"""
nb, nh, nw = shape
x = tf.linspace(-1.0, 1.0, nw) # shape (W, )
y = tf.linspace(-1.0, 1.0, nh) # shape (H, )
X, Y = tf.meshgrid(x, y) # shape (H, W), both X and Y
grid = tf.stack([X, Y], axis=-1)
grid = tf.expand_dims(grid, axis=0) # shape (1, H, W, 2)
# Repeat the grids along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
grid = tf.tile(grid, multiples)
return grid
"""Training and testing functions"""
@tf.function
def train_step(model, moving, static, criterion, optimizer):
"""A generic training procedure for one iteration.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
optimizer
An optimzer.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Define the GradientTape context for automatic differentiation.
with tf.GradientTape() as tape:
# Get the deformation field
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
# Compute gradients.
grads = tape.gradient(loss, model.trainable_variables)
# Update the trainable parameters.
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
@tf.function
def test_step(model, moving, static, criterion):
"""A generic testing procedure.
Parameters
----------
model
A convolutional encoder-decoder network.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
criterion
The loss function.
Returns
-------
loss : tf.Tensor, shape ()
The average loss for the batch.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation field.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grid.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving image using the new sampling grid.
moved = grid_sample(moving, grid_new)
# Compute the loss.
loss = criterion(moved, static)
return loss
"""Data loading"""
def load_data(label=2):
"""Loads the MNIST dataset and preprocesses it: scales to [0.0, 1.0]
range, resizes the images from (28, 28) to (32, 32) and filters the
dataset to keep images of just one class.
Parameters
----------
label : {2, 0, 1, 3, 4, 5, 6, 7, 8, 9}, default 2
The class of images to train and test on.
Returns
-------
(x_train, x_test) : tuple of ndarrays
NumPy arrays of training and testing images.
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Discard digits which are not equal to label.
ids_train = np.where(y_train == label)
ids_test = np.where(y_test == label)
x_train = x_train[ids_train]
x_test = x_test[ids_test]
# Scale the image to [0, 1] range.
x_train = x_train.astype(np.float32) / 255.0
x_test = x_test.astype(np.float32) / 255.0
# Add the channel dim at the end. (N, H, W) --> (N, H, W, 1)
x_train = x_train[..., None]
x_test = x_test[..., None]
# Resize images from (28, 28) to (32, 32).
x_train = tf.image.resize(x_train, (32, 32))
x_test = tf.image.resize(x_test, (32, 32))
return x_train, x_test
"""Sample results"""
def plot_images(model, moving, static):
"""Visualize some images after training.
Parameters
----------
model
The trained model.
moving : tf.Tensor, shape (N, H, W, C)
A batch of moving images.
static : tf.Tensor, shape (1, H, W, C)
The static image.
"""
nb, nh, nw, nc = moving.shape
# Repeat the static image along the batch dim.
multiples = tf.constant([nb, 1, 1, 1], tf.int32)
static = tf.tile(static, multiples)
# Get the deformation fields for the batch.
inputs = tf.concat([moving, static], axis=-1)
deformation = model(inputs, training=False)
# Compute the new sampling grids.
grid = regular_grid([nb, nh, nw])
grid_new = grid + deformation
grid_new = tf.clip_by_value(grid_new, -1, 1)
# Sample the moving images using the new sampling grids.
moved = grid_sample(moving, grid_new)
# Convert the tensors to 8-bit images.
moved = moved.numpy().squeeze(axis=-1) * 255.0
moved = moved.astype(np.uint8)
moving = moving.numpy().squeeze(axis=-1) * 255.0
moving = moving.astype(np.uint8)
static = static.numpy().squeeze(axis=-1) * 255.0
static = static.astype(np.uint8)
# Plot images.
fig = plt.figure(figsize=(3 * 1.7, nb * 1.7))
titles_list = ['Static', 'Moved', 'Moving']
images_list = [static, moved, moving]
for i in range(nb):
for j in range(3):
ax = fig.add_subplot(nb, 3, i * 3 + j + 1)
if i == 0:
ax.set_title(titles_list[j], fontsize=20)
ax.set_axis_off()
ax.imshow(images_list[j][i], cmap='gray')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
class Args():
batch_size = 8
epochs = 50
lr = 0.004
label = 7 # which digit images to train on?
num_samples = 5 # number of sample results to show
save_model = False
args = Args()
# Load preprocessed training and testing data.
x_train, x_test = load_data(label=args.label)
# Randomly select an image as the static image from the test set.
# idx = np.random.randint(x_test.shape[0])
# static = tf.expand_dims(x_test[idx], axis=0)
static = tf.expand_dims(x_test[0], axis=0)
# Select some images from the test set to show sample results.
# ids = tf.constant(np.random.choice(x_test.shape[0], replace=False,
# size=args.num_samples))
# x_sample = tf.gather(x_test, ids)
x_sample = x_test[:args.num_samples]
# Shuffle and batch the dataset.
from_tensor_slices = tf.data.Dataset.from_tensor_slices
# x_train = from_tensor_slices(x_train).shuffle(10000).batch(args.batch_size)
# x_test = from_tensor_slices(x_test).shuffle(10000).batch(args.batch_size)
x_train = from_tensor_slices(x_train).batch(args.batch_size)
x_test = from_tensor_slices(x_test).batch(args.batch_size)
# Create a model instance.
model = simple_cnn(input_shape=(32, 32, 2))
model.summary()
tf.keras.utils.plot_model(model, show_shapes=True, dpi=50)
# Select optimizer and loss function.
optimizer = tf.keras.optimizers.SGD(learning_rate=args.lr)
criterion = ncc_loss # normalized_cross_correlation_loss() # or mse_loss
# Define the metrics to track training and testing losses.
m_train = tf.keras.metrics.Mean(name='loss_train')
m_test = tf.keras.metrics.Mean(name='loss_test')
# Train and evaluate the model.
for epoch in range(args.epochs):
m_train.reset_states()
m_test.reset_states()
for i, moving in enumerate(x_train):
loss_train = train_step(model, moving, static, criterion,
optimizer)
m_train.update_state(loss_train)
for i, moving in enumerate(x_test):
loss_test = test_step(model, moving, static, criterion)
m_test.update_state(loss_test)
print('Epoch: %3d/%d\tTrain Loss: %.6f\tTest Loss: %.6f'
% (epoch + 1, args.epochs, m_train.result(), m_test.result()))
print('\n')
# Show sample results.
plot_images(model, x_sample, static)
# Save the trained model.
if args.save_model:
model.save('saved_models/simple_cnn')
| 33.92555 | 101 | 0.605237 | [
"Apache-2.0"
] | jerinka/voxelmorph_demo | register_basics.py | 20,050 | Python |
# import moonshine as ms
# from moonshine.curves import discount_factor
from .curves import get_discount_factor
from .instruments import price_cashflow
def egg(num_eggs: int) -> None:
"""prints the number of eggs.
Arguments:
num_eggs {int} -- The number of eggs
Returns:
None.
"""
print(f"We have {num_eggs} eggs")
def main() -> None:
discount_factor = get_discount_factor(0.02, 20)
price = price_cashflow(10, 0.02, 10)
bumped_price = 2.0 + price
bumped_price += price + 3
egg(123)
print(price)
print(discount_factor)
print(bumped_price)
if __name__ == "__main__":
main()
| 17.315789 | 51 | 0.651976 | [
"MIT"
] | CatchemAl/moonshine | src/moonshine/__main__.py | 658 | Python |
import pandas as pd
import streamlit as st
from awesome_table import AwesomeTable
from awesome_table.column import (Column, ColumnDType)
from sample import data as sample_data
st.set_page_config(page_title='AwesomeTable by @caiofaar', page_icon='📊', layout='wide')
st.title('AwesomeTable with Search')
AwesomeTable(pd.json_normalize(sample_data), columns=[
Column(name='id', label='ID'),
Column(name='name', label='Name'),
Column(name='job_title', label='Job Title'),
Column(name='avatar', label='Avatar'),
Column(name='_url.social_media', label='Social Media', dtype=ColumnDType.ICONBUTTON, icon='fa-solid fa-share-nodes'), ## From FontAwesome v6.0.0
Column(name='_url.document', label='Document', dtype=ColumnDType.DOWNLOAD),
], show_search=True) | 45.529412 | 148 | 0.745478 | [
"MIT"
] | caiodearaujo/streamlit-awesome-table | samples/with_search/__init__.py | 777 | Python |
import json
from decimal import Decimal
from django.core.paginator import Paginator
from django.db import transaction
from django.http import HttpResponseForbidden, JsonResponse
from django.shortcuts import render
# Create your views here.
from django.utils import timezone
from django.views import View
from django_redis import get_redis_connection
from goods.models import SKU
from meiduo_mall.utils.response_code import RETCODE
from meiduo_mall.utils.views import LoginRequiredMixin
from orders.models import OrderInfo, OrderGoods
from users.models import Address
import logging
logger = logging.getLogger('django')
class OrderSettlementView(LoginRequiredMixin,View):
"""结算订单"""
def get(self,request):
"""提供订单结算页面"""
# 获取登录用户
user = request.user
# 查询地址信息
try:
addresses = Address.objects.filter(user=user,is_deleted=False)
except Address.DoesNotExist:
# 如果地址为空,渲染模板时会判断,并跳转到地址编辑页面
addresses = None
# 从redis购物车中查询被勾选的商品信息
redis_conn = get_redis_connection('carts')
item_dict = redis_conn.hgetall('carts_%s' % user.id)
cart_selected = redis_conn.smembers('selected_%s' % user.id)
cart = {}
for sku_id in cart_selected:
cart[int(sku_id)] = int(item_dict[sku_id])
# 准备初始值
total_count = 0
total_amount = Decimal(0.00)
# 查询商品信息
skus = SKU.objects.filter(id__in=cart.keys())
for sku in skus:
sku.count = cart[sku.id]
sku.amount = sku.count * sku.price
# 计算总数量和总金额
total_count += sku.count
total_amount += sku.amount
# 补充运费
freight = Decimal('10.00')
# 渲染界面
context = {
'addresses':addresses,
'skus':skus,
'total_count': total_count,
'total_amount': total_amount,
'freight': freight,
'payment_amount': total_amount + freight
}
return render(request,'place_order.html',context)
def post(self,request):
"""保存订单信息和订单商品信息"""
# 获取当前要保存的订单数据
json_dict = json.loads(request.body)
address_id = json_dict.get('address_id')
pay_method = json_dict.get('pay_method')
# 校验参数
if not all([address_id, pay_method]):
return HttpResponseForbidden('缺少必传参数')
# 判断address_id是否合法
try:
address = Address.objects.get(id=address_id)
except Exception:
return HttpResponseForbidden('参数address_id错误')
# 判断pay_method是否合法
if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'],
OrderInfo.PAY_METHODS_ENUM['ALIPAY']]:
return HttpResponseForbidden('参数pay_method错误')
# 获取登录用户
user = request.user
# 生成订单编号:年月日时分秒+用户编号
order_id = timezone.localtime().strftime('%Y%m%d%H%M%S') + ('%09d' % user.id)
# 显式的开启一个事务
with transaction.atomic():
# 创建事务保存点
save_id = transaction.savepoint()
# 暴力回滚
try:
# 保存订单基本信息OrderInfo
order = OrderInfo.objects.create(
order_id = order_id,
user = user,
address = address,
total_count=0,
total_amount=Decimal('0'),
freight=Decimal('10.00'),
pay_method=pay_method,
status=OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method == OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else OrderInfo.ORDER_STATUS_ENUM['UNSEND']
)
# 从redis读取购物车中被勾选的商品
redis_conn = get_redis_connection('carts')
item_dict = redis_conn.hgetall('carts_%s' % user.id)
cart_selected = redis_conn.smembers('selected_%s' % user.id)
carts = {}
for sku_id in cart_selected:
carts[int(sku_id)] = int(item_dict[sku_id])
# 获取选中的商品id
sku_ids = carts.keys()
# 遍历购物车中被勾选的商品信息
for sku_id in sku_ids:
# TODO1: 增加的代码:增加一个死循环
while True:
# 查询SKU信息
sku = SKU.objects.get(id=sku_id)
# TODO2: 增加的代码:读取原始库存
origin_stock = sku.stock
origin_sales = sku.sales
# 判断SKU库存
sku_count = carts[sku_id]
# if sku_count > sku.stock:
if sku_count > origin_stock:
# 事务回滚
transaction.savepoint_rollback(save_id)
return JsonResponse({'code': RETCODE.STOCKERR,
'errmsg': '库存不足'})
# SKU减少库存,增加销量
# sku.stock -= sku_count
# sku.sales += sku_count
# sku.save()
# TODO3: 增加的代码:乐观锁更新库存和销量
# 计算差值
new_stock = origin_stock - sku_count
new_sales = origin_sales + sku_count
result = SKU.objects.filter(id=sku_id,stock=origin_stock).update(stock=new_stock,sales=new_sales)
#如果下单失败,但是库存足够时,继续下单,直到下单成功或者库存不足为止
if result == 0:
# 跳过当前循环的剩余语句,然后继续进行下一轮循环
continue
# 修改SPU销量
sku.goods.sales += sku_count
sku.goods.save()
# 保存订单商品信息OrderGoods
OrderGoods.objects.create(
order = order,
sku = sku,
count = sku_count,
price = sku.price
)
# 保存商品订单中总价和总数量
order.total_count += sku_count
order.total_amount += (sku_count * sku.price)
# TODO4:增加的代码:
# 下单成功或者失败就跳出循环
break
# 添加邮费和保存订单信息
order.total_amount += order.freight
order.save()
except Exception as e:
logger.error(e)
transaction.savepoint_rollback(save_id)
return JsonResponse({
'code': RETCODE.DBERR,
'errmsg': '下单失败'})
# 提交订单成功,显式的提交一次事务
transaction.savepoint_commit(save_id)
# 清楚购物车中已结算的商品
pl = redis_conn.pipeline()
pl.hdel('carts_%s' % user.id,*cart_selected)
pl.srem('selected_%s' % user.id,*cart_selected)
pl.execute()
# 响应提交订单结果
return JsonResponse({'code': RETCODE.OK,
'errmsg': '下单成功',
'order_id': order.order_id})
class OrderSuccessView(LoginRequiredMixin,View):
def get(self, request):
order_id = request.GET.get('order_id')
payment_amount = request.GET.get('payment_amount')
pay_method = request.GET.get('pay_method')
context = {
'order_id': order_id,
'payment_amount': payment_amount,
'pay_method': pay_method
}
return render(request, 'order_success.html', context)
class UserOrderInfoView(LoginRequiredMixin,View):
def get(self,request,page_num):
# 1.获取所有的订单
orders = OrderInfo.objects.filter(user=request.user).order_by('-create_time')
# 2.遍历获取每一个订单
for order in orders:
# 3.给每个订单绑定属性:status_name,pay_method_name,sku_list
order.status_name = OrderInfo.ORDER_STATUS_CHOICES[order.status-1][1]
order.pay_method_name = OrderInfo.PAY_METHOD_CHOICES[order.pay_method-1][1]
order.sku_list = []
# 4.给sku_list赋值 往里家sku
# 5.获取订单商品所有对象
lines = order.skus.all()
# 6.遍历每一个订单商品,获取具体商品对象(sku表)
for line in lines:
sku = line.sku
# 7.给商品绑定count,amount
sku.count = line.count
sku.amount = sku.price * sku.count
# 8.给sku_list赋值
order.sku_list.append(sku)
# 9.调用分页器前将page_num转为整型
page_num = int(page_num)
try:
# 10.生成一个分页器对象(orders是一个对象列表,2是每页显示的数量)
paginator = Paginator(orders, 2)
# 取某一页 返回某一页的所有对象
page_orders = paginator.page(page_num)
# 总页数
total_page = paginator.num_pages
except Exception as e:
return HttpResponseForbidden('分页失败')
# 11.拼接参数
context = {
'total_page':total_page,
'page_orders':page_orders,
'page_num':page_num
}
# 12.返回
return render(request,'user_center_order.html',context) | 31.847222 | 161 | 0.520279 | [
"MIT"
] | Gdavid123/md_project | meiduo_mall/meiduo_mall/apps/orders/views.py | 10,324 | Python |
import discord
from discord.ext import commands
class Mod:
"""Useful moderation commands to keep the server under control."""
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.guild_only()
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, user: discord.Member, *, reason: str = None):
"""Kicks a user from the server."""
if user == ctx.author:
return await ctx.send("Kicking yourself? smh.")
if user == self.bot.user:
return await ctx.send("I can't kick myself.")
res = f", for reason: `{reason}`" if reason else ""
try:
await user.kick(reason=reason)
await ctx.send(f"Kicked {user}{res}")
except discord.Forbidden:
await ctx.send("I don't have permissions to kick that user.")
except Exception as e:
raise e
@commands.command()
@comnands.guild_only()
@commands.has_permissions(manage_messages=True)
async def purge(self, ctx, amount):
"""Purges X amount of messages from a channel"""
try:
amount = int(amount)
except ValueError:
return await ctx.send("Enter a number only!")
try:
await ctx.channel.purge(limit=amount+1)
await ctx.send(f"Purged **{amount}** messages", delete_after=3)
except discord.Forbidden:
await ctx.send(f"I need the `Manage Messages` permission to do this.")
def setup(bot):
bot.add_cog(Mod(bot)) | 36 | 82 | 0.605297 | [
"MIT"
] | bananaboy21/LadyBug-Bot | cogs/mod.py | 1,548 | Python |
from argparse import ArgumentParser
from google.cloud.speech import SpeechClient, types, enums
from pyaudio import PyAudio, paInt16, paContinue
from six.moves.queue import Queue, Empty
from sys import stdout
import socket
# from os import environ
# environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'C:/Users/kwea123/Downloads/MyProject-e85ed8c91456.json'
# Audio recording parameters
RATE = 44100
CHUNK = int(RATE / 1000) # 100ms
class MicrophoneStream(object):
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, rate, chunk):
self._rate = rate
self._chunk = chunk
# Create a thread-safe buffer of audio data
self._buff = Queue()
self.closed = True
def __enter__(self):
self._audio_interface = PyAudio()
self._audio_stream = self._audio_interface.open(
format=paInt16,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1, rate=self._rate,
input=True, frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, paContinue
def generator(self):
while not self.closed:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except Empty:
break
yield b''.join(data)
def listen_print_loop(responses, print_locally=True, sock=None):
num_chars_printed = 0
for response in responses:
if not response.results:
continue
# The `results` list is consecutive. For streaming, we only care about
# the first result being considered, since once it's `is_final`, it
# moves on to considering the next utterance.
result = response.results[0]
if not result.alternatives:
continue
# Display the transcription of the top alternative.
transcript = result.alternatives[0].transcript
# If the previous result was longer than this one, we need to print
# some extra spaces to overwrite the previous result
overwrite_chars = ' ' * (num_chars_printed - len(transcript))
if sock is not None:
sock.send(bytes(transcript, "utf-8"))
if print_locally: # print the result on the console.
if not result.is_final:
stdout.write(transcript + overwrite_chars + '\r')
stdout.flush()
num_chars_printed = len(transcript)
else:
print(transcript + overwrite_chars)
num_chars_printed = 0
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--debug", action="store_true",
help="show speech recognition result on the console",
default=False)
parser.add_argument("--connect", action="store_true",
help="connect to unity",
default=False)
parser.add_argument("--lang_code", type=str,
help="the language code of your language",
default="zh-tw")
args = parser.parse_args()
if args.connect:
address = ('127.0.0.1', 5067)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
else:
sock = None
client = SpeechClient()
config = types.RecognitionConfig(encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=RATE,
language_code=args.lang_code)
streaming_config = types.StreamingRecognitionConfig(config=config, interim_results=True)
print("%s recognition started!"%args.lang_code)
while True:
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
requests = (types.StreamingRecognizeRequest(audio_content=content)
for content in audio_generator)
try:
responses = client.streaming_recognize(streaming_config, requests)
listen_print_loop(responses, print_locally=args.debug, sock=sock)
except KeyboardInterrupt:
break
except: # ignore "400 Exceeded maximum allowed stream duration of 305 seconds."
continue
if sock is not None:
sock.close() | 37.681818 | 102 | 0.608478 | [
"MIT"
] | kwea123/Unity_live_caption | googlesr.py | 5,803 | Python |
Subsets and Splits