blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f67969324b7b88047980838503dae8b978c474ed
|
1a86413d97fdbb3228ea3f00444881f723d73652
|
/land_ingest/psv_processor/generate-new-files.py
|
12054ecdecbdb716b7e7c277b1060c16ebf83ce7
|
[
"BSD-3-Clause"
] |
permissive
|
glamod/glamod-land-ingest
|
f7105f953660231fac434034a088c8118dcd9b63
|
a30c1030d39b5c5b1688d78db082c09167c2bd9c
|
refs/heads/master
| 2020-05-04T07:55:49.956203 | 2019-04-02T12:53:58 | 2019-04-02T12:53:58 | 179,037,431 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 16,820 |
py
|
import pandas as pd
import argparse
import sys
import os
import re
import datetime as dt
from collections import OrderedDict
from pandas.testing import assert_frame_equal
BASE_PATH = '/gws/nopw/j04/c3s311a_lot2/data/glamod_land_delivery_2018_12_31_Beta/'
FIXED_PATH = '/gws/nopw/j04/c3s311a_lot2/data/beta_fix7/'
STATION_FILE = BASE_PATH + 'station_configuration/station_configuration_Beta.psv'
TEST_MODE = False
def parse_args():
parser = argparse.ArgumentParser(description='Fix land data')
parser.add_argument('-index', dest='idx', type=int, required=True, help='Job index number')
parser.add_argument('-header_file_count', dest='header_file_count', type=int, required=True, help='Header files to process')
parser.add_argument('-n_parallel_jobs', dest='n_parallel_jobs', type = int, required=True, help='Number of parallel jobs to run on LOTUS')
args = parser.parse_args()
return args
def check_path(path):
if not os.path.isdir(path):
os.makedirs(path)
def get_station_configuration_table():
# Load station configuration file (all columns as object to avoid type promotion)
# Read in station config file, this only needs to be done once
station_configuration = pd.read_csv(STATION_FILE, sep='|', dtype=object)
if 'source_id ' in station_configuration.columns:
station_configuration = station_configuration.rename({'source_id ': 'source_id'})
station_configuration['source_id'] = station_configuration['source_id'].apply(lambda x: x.strip())
# rename record_number to station_record_number
station_configuration = station_configuration.rename(columns={
'record_number': 'station_record_number',
'primary_id':'primary_station_id'})
return station_configuration
def main():
station_configuration = get_station_configuration_table()
# TEST_MODE = True
# Run in test mode if file_list is defined
if TEST_MODE:
test_file = 'header_table/monthly/header_table_BETA_USW00003867_1.psv'
file_list = [test_file]
file_idx = [0]
else:
args = parse_args()
file_idx = range(args.idx - 1, args.header_file_count, args.n_parallel_jobs)
with open('header_table_files.txt') as f:
file_list = [i.strip() for i in f.readlines()]
COUNTER = 0
for idx in file_idx:
COUNTER += 1
# Uncomment next line to stop after one iteration
if TEST_MODE and COUNTER > 1: return
header_file = file_list[idx]
# get ID from header file name
filename = os.path.basename(header_file)
station_id = re.match('^header_table_BETA_(.+)_\d+\.psv', filename).groups()[0]
# Get observations file based on header file
observation_file = header_file.replace('header_table', 'observations_table')
observation_file = observation_file.replace('observations_table_BETA', 'observation_table_BETA')
print('Processing: ' + header_file)
# Load header table (all object)
# E.g.: header_file = 'header_table/daily/T3_protect/header_table_BETA_SWE00139284_1.psv'
header_path = BASE_PATH + header_file
header_table = pd.read_csv(header_path, sep='|', dtype=object)
if len(header_table) == 0:
print('[WARNING] No records in header table so exiting: {}'.format(header_path))
continue
print('Header file: {}'.format(header_path))
if 'source_id ' in header_table.columns:
header_table = header_table.rename(columns = {'source_id ': 'source_id'})
# header table has extraneous space at end of source_id field, remove
header_table['source_id'] = header_table['source_id'].apply(lambda x: x.strip())
if 'report_meaning_of_time_stamp' in header_table.columns:
header_table = header_table.rename(columns={'report_meaning_of_time_stamp': 'report_meaning_of_timestamp'})
# load observations table (all object)
# E.g.: observation_file = '/observations_table/daily/T3_protect/observation_table_BETA_SWE00139284_1.psv'
observations_path = BASE_PATH + observation_file
print('Observations file: {}'.format(observations_path))
observations_table = pd.read_csv(observations_path, sep='|', dtype=object)
if len(observations_table) == 0:
print('[WARNING] No records in observations table so exiting: {}'.format(observations_path))
continue
if 'source_id ' in observations_table.columns:
observations_table = observations_table.rename(columns={'source_id ': 'source_id'})
# observations table has extraneous space at end of source_id field, remove
observations_table['source_id'] = observations_table['source_id'].apply(lambda x: x.strip())
# Select row(s) from Source Configuration that match station id and record number
station_config_subset = station_configuration.loc[(station_configuration['primary_station_id'] == station_id)].copy()
if len(station_config_subset) == 0:
raise Exception('Cannot find station ID in Source Configuration: {}'.format(station_id, record_number))
ot = observations_table
ht = header_table
scs = station_config_subset
# Check that 'source_id' is same in station_configuration, header_table and observations_table
# if len(header_table['source_id'].unique()) >
try:
scs_ids = set([int(src) for src in station_config_subset['source_id']])
ht_ids = set([int(src) for src in header_table['source_id'].unique()])
ot_ids = set([int(src) for src in observations_table['source_id'].unique()])
if (not ht_ids.issubset(scs_ids)) or (not ot_ids.issubset(scs_ids)):
assert(len(scs_ids) == 1)
src_id = scs_ids[0]
# Make an exception for monthly files - overwrite "source_id" using Station Config value
if "/monthly/" in header_path:
print('[INFO] Overwriting "source_id" using Station Configuration value in Header/Obs tables: {}'.format(src_id))
header_table['source_id'] = str(src_id)
observations_table['source_id'] = str(src_id)
else:
assert(ht_ids.issubset(scs_ids) and ot_ids.issubset(scs_ids))
except Exception as err:
print('[ERROR] Cannot match "source_id" in files:')
print(' {} :: {}'.format(scs_ids, STATION_FILE))
print(' {} :: {}'.format(ht_ids, header_path))
print(' {} :: {}'.format(ot_ids, observations_path))
continue
# Before merge of Header table and Station Configuration subset, check they will merge properly
if not set(header_table['station_record_number']).intersection(set(station_config_subset['station_record_number'])):
print('[WARNING] No valid merge between Header and Station Config tables on: "station_record_number". {}'.format(header_path))
continue
# merge tables
master_table = header_table.merge(station_config_subset, how='left',
on=['primary_station_id', 'station_record_number'],
suffixes=('_head','_station'))
mt = master_table
if pd.np.nan in list(mt['longitude'].unique()) or pd.np.nan in list(mt['latitude'].unique()):
print('[WARNING] NANs in master table "longitude" or "latitude"! {}'.format(header_path))
continue
# Rename to 'source_id' so we can join on that and 'report_id'
master_table = master_table.rename(columns={'source_id_station': 'source_id'})
# merge observation and header table
master_table = master_table.merge(observations_table, how='outer',
on=['report_id'], suffixes=('', '_obs'))
# check we have data
if master_table.shape[0] == 0:
print('[WARNING] Master table has no content: {}'.format(header_path))
continue
# rename master columns
master_table = master_table.rename(columns={
# 'source_id_station': 'source_id',
'data_policy_licence_station': 'data_policy_licence',
'station_crs': 'crs',
'operating_territory': 'sub_region'
})
# Filter out any records without an observation_id - they will never work
print('[WARNING] Filtering out any master table records without an Observation ID')
master_table = master_table.loc[master_table['observation_id'].notnull()]
if 'sub_daily' in header_file:
report_type = 0 # SYNOP
elif 'daily' in header_file:
report_type = 3 # DAILY
elif 'monthly' in header_file:
report_type = 2 # CLIMAT
else:
raise ('Error, bad report_type')
master_table['report_type'] = report_type
# null columns to add
master_table = master_table.assign(station_speed = '')
master_table = master_table.assign(station_course = '')
master_table = master_table.assign(station_heading = '')
master_table = master_table.assign(source_record_id = '')
master_table = master_table.assign(secondary_variable = '')
master_table = master_table.assign(code_table = '')
master_table = master_table.assign(z_coordinate_method = '')
master_table = master_table.assign(sensor_id = '')
master_table = master_table.assign(sensor_automation_status = '')
master_table = master_table.assign(exposure_of_sensor = '')
master_table = master_table.assign(original_code_table = '')
master_table = master_table.assign(processing_code = '')
master_table = master_table.assign(adjustment_id = '' )
master_table = master_table.assign(traceability = '')
master_table = master_table.assign(advanced_qc = '')
master_table = master_table.assign(advanced_uncertainty = '')
master_table = master_table.assign(advanced_homogenisation = '')
master_table = master_table.assign(z_coordinate = '')
master_table = master_table.assign(z_coordinate_type = '')
master_table = master_table.assign(spatial_representativeness = '')
# add location column
location = master_table.apply(lambda x: 'SRID=4326;POINT({0} {1})'.format(x['longitude'], x['latitude']), axis = 1)
master_table = master_table.assign(location=location)
# replace array fields with {} (all empty for beta release)
master_table['application_area'] = '{}'
master_table['observing_programme'] = '{}'
master_table['events_at_station'] = '{}'
master_table['duplicates'] = '{}'
master_table['processing_codes'] = '{}'
master_table = master_table.replace('', 'NULL')
master_table = master_table.fillna('NULL')
print('[INFO] Table sizes: Master = {}, Header = {}, Obs = {}'.format(len(master_table), len(ht), len(ot)))
# add location to header table
# write out new files
header_columns = [
'report_id','region', 'sub_region', 'application_area', 'observing_programme', 'report_type',
'station_name','station_type','platform_type','platform_sub_type','primary_station_id','station_record_number',
'primary_station_id_scheme','longitude','latitude','location_accuracy','location_method','location_quality',
'crs','station_speed','station_course','station_heading','height_of_station_above_local_ground',
'height_of_station_above_sea_level', 'height_of_station_above_sea_level_accuracy', 'sea_level_datum',
'report_meaning_of_timestamp','report_timestamp','report_duration','report_time_accuracy','report_time_quality',
'report_time_reference','profile_id','events_at_station','report_quality','duplicate_status','duplicates',
'record_timestamp','history','processing_level','processing_codes','source_id','source_record_id','location'
]
# add report_type, station_type and location to observations table
observation_columns = [
'observation_id','report_id','data_policy_licence','date_time','date_time_meaning','observation_duration',
'longitude','latitude','crs','z_coordinate','z_coordinate_type','observation_height_above_station_surface',
'observed_variable','secondary_variable','observation_value','value_significance','secondary_value','units',
'code_table','conversion_flag','location_method','location_accuracy','z_coordinate_method','bbox_min_longitude',
'bbox_max_longitude','bbox_min_latitude','bbox_max_latitude','spatial_representativeness','quality_flag',
'numerical_precision','sensor_id','sensor_automation_status','exposure_of_sensor','original_precision',
'original_units','original_code_table','original_value','conversion_method','processing_code','processing_level',
'adjustment_id','traceability','advanced_qc','advanced_uncertainty','advanced_homogenisation','source_id',
'report_type','station_type','location'
]
# Construct and write header file (by year)
header_table_out = master_table[header_columns].copy()
header_path_new = FIXED_PATH + header_file
write_outputs_by_year(header_table_out, header_path_new, input_file=header_path)
# Construct and write observations file (by year)
observations_table_out = master_table[observation_columns].copy()
observations_table_out = observations_table_out.rename(columns = {'location_accuracy': 'location_precision'})
observations_path_new = FIXED_PATH + observation_file
write_outputs_by_year(observations_table_out, observations_path_new, input_file=observations_path)
def write_outputs_by_year(df, output_file_base, input_file):
fdir, fname = os.path.split(output_file_base)
# Drop duplicates in table
df.drop_duplicates(inplace=True)
# Get year range and then split DataFrame into years and write each year
# to file: "<base_name>.<year>.psv"
# First: convert field to datetime
if fname.startswith('header_table_'):
time_field = 'report_timestamp'
sort_fields = ['report_id', time_field]
table_type = 'Header'
else:
time_field = 'date_time'
sort_fields = ['observation_id', time_field]
table_type = 'Observations'
try:
df[time_field] = pd.to_datetime(df[time_field], utc=True)
except Exception as err:
print('df[time_field] = pd.to_datetime(df[time_field], utc=True)')
# import pdb; pdb.set_trace()
raise Exception('Failed to find valid time field: {}\nReview file: {}'.format(time_field, input_file))
# Now work with time series from that field
time_series = df[time_field]
start_year = time_series.min().year
end_year = time_series.max().year
# Set up some temporary lists to check consistency
dfs = OrderedDict()
record_count = 0
for year in range(start_year, end_year + 1):
# print('[INFO] Filter by year: {}'.format(year))
_df = df[df[time_field].dt.year == year]
record_count += len(_df)
dfs[year] = _df
print('[INFO] Checking original Table matches those split by time...')
if record_count != len(df):
raise Exception('Split of records did not match original table size, for: {}'.format(output_file_base))
# Test that the original data matches the split data (when sorted by time field)
remade_df = pd.concat(dfs.values())
remade_df.sort_values(sort_fields, inplace=True)
df_sorted = df.sort_values(sort_fields)
print('[WARNING] Frame sizes: remade_df: {}, df_sorted (orig): {}'.format(len(remade_df), len(df_sorted)))
try:
assert_frame_equal(df_sorted, remade_df)
except Exception as err:
raise Exception('Remade data frame does not match original, for: {}'.format(output_file_base))
# Make sure output directory exists
output_dir = os.path.join(fdir, fname.replace('.psv', ''))
check_path(output_dir)
# Now loop through each and write out
for year, _df in dfs.items():
# Ignore empty years
if len(_df) == 0: continue
fname_year = fname.replace('.psv', '.{}.psv'.format(year))
out_path = os.path.join(output_dir, fname_year)
_df.to_csv(out_path, sep='|', index=False, date_format='%Y-%m-%d %H:%M:%S%z')
print('[INFO] {} file saved to: {}'.format(table_type, out_path))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
1653c4111074a8ee0622a4f24e3279ad845f4ce0
|
5bd624a1c1d4834b49fe5e2d2bf9446d08a36161
|
/pytype/pytd/pytd.py
|
a8d781d94d18c8d3df397214701e996872e9cfe9
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
tharvik/pytype
|
ce3c7eca73082b047508df715ce7d179f28e15ba
|
df526720c96b08d328b2214d08eaa67ca342b01a
|
refs/heads/master
| 2020-12-01T09:32:24.823351 | 2016-07-12T00:29:39 | 2016-07-12T14:18:14 | 63,402,024 | 0 | 0 | null | 2016-07-15T07:41:27 | 2016-07-15T07:41:27 | null |
UTF-8
|
Python
| false | false | 14,304 |
py
|
# -*- coding:utf-8; python-indent:2; indent-tabs-mode:nil -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Our way of using namedtuple is confusing pylint.
# pylint: disable=no-member
"""AST representation of a pytd file."""
import itertools
from pytype.pytd.parse import node
class TypeDeclUnit(node.Node('name',
'constants', 'classes', 'functions', 'aliases')):
"""Module node. Holds module contents (constants / classes / functions).
Attributes:
name: Name of this module, or None for the top-level module.
constants: Iterable of module-level constants.
functions: Iterable of functions defined in this type decl unit.
classes: Iterable of classes defined in this type decl unit.
aliases: Iterable of aliases (or imports) for types in other modules.
"""
__slots__ = ()
def Lookup(self, name):
"""Convenience function: Look up a given name in the global namespace.
Tries to find a constant, function or class by this name.
Args:
name: Name to look up.
Returns:
A Constant, Function or Class.
Raises:
KeyError: if this identifier doesn't exist.
"""
# TODO(kramm): Put constants, functions, classes and aliases into a
# combined dict.
try:
return self._name2item[name]
except AttributeError:
self._name2item = {}
for x in self.constants + self.functions + self.classes + self.aliases:
self._name2item[x.name] = x
return self._name2item[name]
# The hash/eq/ne values are used for caching and speed things up quite a bit.
def __hash__(self):
return id(self)
def __eq__(self, other):
return id(self) == id(other)
def __ne__(self, other):
return id(self) != id(other)
def ASTeq(self, other):
# Used in tests.
return (self.constants == other.constants and
self.classes == other.classes and
self.functions == other.functions and
self.aliases == other.aliases)
class Constant(node.Node('name', 'type')):
__slots__ = ()
class Alias(node.Node('name', 'type')):
"""An alias (symbolic link) for a class implemented in some other module.
Unlike Constant, the Alias is the same type, as opposed to an instance of that
type. It's generated, among others, from imports - e.g. "from x import y as z"
will create a local alias "z" for "x.y".
"""
__slots__ = ()
class Class(node.Node('name', 'parents', 'methods', 'constants', 'template')):
"""Represents a class declaration.
Used as dict/set key, so all components must be hashable.
Attributes:
name: Class name (string)
parents: The super classes of this class (instances of pytd.TYPE).
methods: Tuple of methods, classmethods, staticmethods
(instances of pytd.Function).
constants: Tuple of constant class attributes (instances of pytd.Constant).
template: Tuple of pytd.TemplateItem instances.
"""
# TODO(kramm): Rename "parents" to "bases". "Parents" is confusing since we're
# in a tree.
__slots__ = ()
def Lookup(self, name):
"""Convenience function: Look up a given name in the class namespace.
Tries to find a method or constant by this name in the class.
Args:
name: Name to look up.
Returns:
A Constant or Function instance.
Raises:
KeyError: if this identifier doesn't exist in this class.
"""
# TODO(kramm): Remove this. Make methods and constants dictionaries.
try:
return self._name2item[name]
except AttributeError:
self._name2item = {}
for x in self.methods + self.constants:
self._name2item[x.name] = x
return self._name2item[name]
STATICMETHOD, CLASSMETHOD, METHOD = 'staticmethod', 'classmethod', 'method'
class Function(node.Node('name', 'signatures', 'kind')):
"""A function or a method, defined by one or more PyTD signatures.
Attributes:
name: The name of this function.
signatures: Tuple of possible parameter type combinations for this function.
kind: The type of this function. One of: STATICMETHOD, CLASSMETHOD, METHOD
"""
__slots__ = ()
class ExternalFunction(Function):
"""A function or a method, defined by PYTHONCODE (see parse/parser.py).
Attributes:
name: The name of this function.
signatures: Empty tuple of signatures.
"""
__slots__ = ()
class Signature(node.Node('params', 'return_type', 'exceptions', 'template',
'has_optional')):
"""Represents an individual signature of a function.
For overloaded functions, this is one specific combination of parameters.
For non-overloaded functions, there is a 1:1 correspondence between function
and signature.
Attributes:
name: The name of this function.
params: The list of parameters for this function definition.
return_type: The return type of this function.
exceptions: List of exceptions for this function definition.
template: names for bindings for bounded types in params/return_type
has_optional: Do we have optional parameters ("...")?
"""
__slots__ = ()
class Parameter(node.Node('name', 'type')):
"""Represents a parameter of a function definition.
Attributes:
name: The name of the parameter.
type: The type of the parameter.
"""
__slots__ = ()
class OptionalParameter(Parameter):
"""Represents an optional parameter of a function definition.
Can never be mutable.
Attributes:
name: The name of the parameter.
type: The type of the parameter.
"""
__slots__ = ()
# Conceptually, this is a subtype of Parameter:
class MutableParameter(node.Node('name', 'type', 'new_type')):
"""Represents a parameter that's modified by the function.
Can never be optional.
Attributes:
name: The name of the parameter.
type: The type of the parameter.
new_type: The type the parameter will have after the function is called.
"""
__slots__ = ()
class TypeParameter(node.Node('name')):
"""Represents a type parameter.
A type parameter is a bound variable in the context of a function or class
definition. It specifies an equivalence between types.
For example, this defines a identity function:
def f<T>(x: T) -> T
"""
__slots__ = ()
class TemplateItem(node.Node('type_param')):
"""Represents template name for generic types.
This is used for classes and signatures. The 'template' field of both is
a list of TemplateItems. Note that *using* the template happens through
TypeParameters. E.g. in:
class A<T>:
def f(T x) -> T
both the "T"s in the definition of f() are using pytd.TypeParameter to refer
to the TemplateItem in class A's template.
Attributes:
type_param: the TypeParameter instance used. This is the actual instance
that's used wherever this type parameter appears, e.g. within a class.
"""
__slots__ = ()
@property
def name(self):
return self.type_param.name
# Types can be:
# 1.) NamedType:
# Specifies a type by name (i.e., a string)
# 2.) NativeType
# Points to a Python type. (int, float etc.)
# 3.) ClassType
# Points back to a Class in the AST. (This makes the AST circular)
# 4.) GenericType
# Contains a base type and parameters.
# 5.) UnionType / IntersectionType
# Can be multiple types at once.
# 6.) NothingType / AnythingType
# Special purpose types that represent nothing or everything.
# 7.) TypeParameter
# A placeholder for a type.
# 8.) Scalar
# A singleton type. Not currently used, but supported by the parser.
# 9.) ExternalType:
# A type in another module. We may only know the name.
# For 1-3, the file visitors.py contains tools for converting between the
# corresponding AST representations.
class NamedType(node.Node('name')):
"""A type specified by name and, optionally, the module it is in."""
__slots__ = ()
def __str__(self):
return self.name
class NativeType(node.Node('python_type')):
"""A type specified by a native Python type. Used during runtime checking."""
__slots__ = ()
class ClassType(node.Node('name')):
"""A type specified through an existing class node."""
# This type is different from normal nodes:
# (a) It's mutable, and there are functions
# (parse/visitors.py:InPlaceFillInClasses) that modify a tree in place.
# (b) Because it's mutable, it's not actually using the tuple/Node interface
# to store things (in particular, the pointer to the existing class).
# (c) Visitors will not process the "children" of this node. Since we point
# to classes that are back at the top of the tree, that would generate
# cycles.
__slots__ = ()
def __new__(pycls, name, cls=None): # pylint: disable=bad-classmethod-argument
self = super(ClassType, pycls).__new__(pycls, name)
# self.cls potentially filled in later (by visitors.InPlaceFillInClasses)
self.cls = cls
return self
# __eq__ is inherited (using tuple equality + requiring the two classes
# be the same)
def __str__(self):
return str(self.cls.name) if self.cls else self.name
def __repr__(self):
return '{type}{cls}({name})'.format(
type=type(self).__name__, name=self.name,
cls='<unresolved>' if self.cls is None else '')
class FunctionType(node.Node('name', 'function')):
"""The type of a function. E.g. the type of 'x' in 'x = lambda y: y'."""
__slots__ = ()
class ExternalType(node.Node('name')):
"""A type specified by name and the module it is in."""
def __new__(pycls, name, module): # pylint: disable=bad-classmethod-argument
self = super(ExternalType, pycls).__new__(pycls, name)
self.module = module
return self
def __str__(self):
return self.module + '.' + self.name
def __repr__(self):
return 'ExternalType(%r, %r)' % (self.name, self.module)
class AnythingType(node.Node()):
"""A type we know nothing about yet ('?' in pytd)."""
__slots__ = ()
class NothingType(node.Node()):
"""An "impossible" type, with no instances ('nothing' in pytd).
Also known as the "uninhabited" type, or, in type systems, the "bottom" type.
For representing empty lists, and functions that never return.
"""
__slots__ = ()
class Scalar(node.Node('value')):
__slots__ = ()
class UnionType(node.Node('type_list')):
"""A union type that contains all types in self.type_list."""
__slots__ = ()
# NOTE: type_list is kept as a tuple, to preserve the original order
# even though in most respects it acts like a frozenset.
# It also flattens the input, such that printing without
# parentheses gives the same result.
def __new__(cls, type_list):
assert type_list # Disallow empty unions. Use NothingType for these.
flattened = itertools.chain.from_iterable(
t.type_list if isinstance(t, UnionType) else [t] for t in type_list)
return super(UnionType, cls).__new__(cls, tuple(flattened))
def __hash__(self):
# See __eq__ - order doesn't matter, so use frozenset
return hash(frozenset(self.type_list))
def __eq__(self, other):
if self is other:
return True
if isinstance(other, UnionType):
# equality doesn't care about the ordering of the type_list
return frozenset(self.type_list) == frozenset(other.type_list)
return NotImplemented
def __ne__(self, other):
return not self == other
# TODO(kramm): Do we still need this?
class IntersectionType(node.Node('type_list')):
"""An intersection type that contains all types in self.type_list."""
__slots__ = ()
# NOTE: type_list is kept as a tuple, to preserve the original order
# even though in most respects it acts like a frozenset.
# It also flattens the input, such that printing without
# parentheses gives the same result.
def __new__(cls, type_list):
flattened = itertools.chain.from_iterable(
t.type_list if isinstance(t, IntersectionType) else [t]
for t in type_list)
return super(IntersectionType, cls).__new__(cls, tuple(flattened))
def __hash__(self):
# See __eq__ - order doesn't matter, so use frozenset
return hash(frozenset(self.type_list))
def __eq__(self, other):
if self is other:
return True
if isinstance(other, IntersectionType):
# equality doesn't care about the ordering of the type_list
return frozenset(self.type_list) == frozenset(other.type_list)
return NotImplemented
def __ne__(self, other):
return not self == other
class GenericType(node.Node('base_type', 'parameters')):
"""Generic type. Takes a base type and type paramters.
This corresponds to the syntax: type<type1,>, type<type1, type2> (etc.).
Attributes:
base_type: The base type. Instance of Type.
parameters: Type parameters. Tuple of instances of Type.
"""
__slots__ = ()
class HomogeneousContainerType(GenericType):
"""Special generic type for homogeneous containers. Only has one type param.
This differs from GenericType in that it assumes *all* items in a container
will be the same type. The syntax is type<t>. (Vs type<t,> for GenericType.)
"""
__slots__ = ()
@property
def element_type(self):
return self.parameters[0]
# So we can do "isinstance(node, pytd.TYPE)":
TYPE = (NamedType, NativeType, ClassType, AnythingType, UnionType,
NothingType, GenericType, TypeParameter, Scalar,
IntersectionType, ExternalType)
# Types that can be a base type of GenericType:
GENERIC_BASE_TYPE = (NamedType, ClassType, ExternalType)
def Print(n, print_format=None):
"""Convert a PYTD node to a string."""
# TODO(kramm): fix circular import
from pytype.pytd import utils # pylint: disable=g-import-not-at-top
return utils.Print(n, print_format)
|
[
"[email protected]"
] | |
feccd33264447c92abf14e4458ead9f06a2faa3f
|
d4f1bd5e52fe8d85d3d0263ede936928d5811bff
|
/Python/Problem Solving/ETC_algorithm_problem/5-11-2 max heap.py
|
d2ff607ea6054165b71bd00604e33c7b4fd62e91
|
[] |
no_license
|
ambosing/PlayGround
|
37f7d071c4402599995a50cac1e7f1a85c6d10dd
|
0d5262dbb2fa2128ecb3fd969244fa647b104928
|
refs/heads/master
| 2023-04-08T04:53:31.747838 | 2023-03-23T06:32:47 | 2023-03-23T06:32:47 | 143,112,370 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 181 |
py
|
import heapq
h = []
while True:
n = int(input())
if n == -1:
break
elif n == 0:
print(heapq.heappop(h)[1])
else:
heapq.heappush(h, (-n, n))
|
[
"[email protected]"
] | |
db2d73da906d6f138497d0b5d139584dad651cf8
|
fd9cc4cdf9daca76b048b1697ba2aa3a51c17c76
|
/FlaskWebAPI/application/entities/countries/interface_test.py
|
fb5bbe432aa43c3ea02a30d987efb08b4173ef08
|
[] |
no_license
|
dangost/PythonWebService
|
cdb40ebce00060591c6ce00f4dafdd109427b8a8
|
37c1438b6c26df0c0a440dd3a637d2633e94f1b2
|
refs/heads/master
| 2022-12-12T20:45:52.817606 | 2020-08-31T14:27:46 | 2020-08-31T14:27:46 | 289,210,673 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 82 |
py
|
from application.entities.countries.interface import BaseCountriesRepository
|
[
"[email protected]"
] | |
d38db2fa69e1cec7a307c36300e70bf1c784d05c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04043/s722084447.py
|
63049b4fc35548c7e3243a14c504967fc3f86476
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 148 |
py
|
A, B, C = map(int, input().split())
num = [0]*11
num[A] += 1
num[B] += 1
num[C] += 1
ans = 'YES' if num[5] == 2 and num[7] == 1 else 'NO'
print(ans)
|
[
"[email protected]"
] | |
9f0b2d91751a39fb0928cb1695e4ef33be1ad02d
|
d260f1492f1d3cffb72bd4e8c67da7b0724fa5d5
|
/kubeflow/fairing/preprocessors/full_notebook.py
|
84ff8f31a9bf417bb912c022cc61bddfa05ca6e0
|
[
"Apache-2.0"
] |
permissive
|
wyw64962771/fairing
|
3be92ab22d596a360c6f8d70f678b3ada265e649
|
0cc639870ea3f773c5ae8a53c0ab16d4cda2ea6c
|
refs/heads/master
| 2020-08-19T13:28:24.778189 | 2019-10-17T12:08:39 | 2019-10-17T12:08:39 | 215,924,578 | 1 | 0 |
Apache-2.0
| 2019-10-18T02:23:58 | 2019-10-18T02:23:58 | null |
UTF-8
|
Python
| false | false | 2,380 |
py
|
import os
from kubeflow.fairing.preprocessors.base import BasePreProcessor
from kubeflow.fairing.constants import constants
from kubeflow.fairing.notebook import notebook_util
class FullNotebookPreProcessor(BasePreProcessor):
""" The Full notebook preprocess for the context which comes from BasePreProcessor.
:param BasePreProcessor: a context that gets sent to the builder for the docker build and
sets the entrypoint
"""
# TODO: Allow configuration of errors / timeout options
def __init__(self,
notebook_file=None,
output_file="fairing_output_notebook.ipynb",
input_files=None,
command=None,
path_prefix=constants.DEFAULT_DEST_PREFIX,
output_map=None):
""" Init the full notebook preprocess.
:param notebook_file: the jupyter notebook file.
:param output_file: the output file, the defaut name is 'fairing_output_notebook.ipynb'.
:param input_files: the source files to be processed.
:param command: the command to pass to the builder.
:param path_prefix: the defaut destion path prefix '/app/'.
:param output_map: a dict of files to be added without preprocessing.
"""
if notebook_file is None and notebook_util.is_in_notebook():
notebook_file = notebook_util.get_notebook_name()
if notebook_file is None:
raise ValueError('A notebook_file must be provided.')
relative_notebook_file = notebook_file
# Convert absolute notebook path to relative path
if os.path.isabs(notebook_file[0]):
relative_notebook_file = os.path.relpath(notebook_file)
if command is None:
command = ["papermill", relative_notebook_file, output_file, "--log-output"]
input_files = input_files or []
if relative_notebook_file not in input_files:
input_files.append(relative_notebook_file)
super().__init__(
executable=None,
input_files=input_files,
command=command,
output_map=output_map,
path_prefix=path_prefix)
def set_default_executable(self):
""" Ingore the default executable setting for the full_notebook preprocessor.
"""
pass
|
[
"[email protected]"
] | |
50d1f8e859d7710b2a71797166f82bbf97dcfb1f
|
df1ed60ce7d95a31565c5963ccda404d16b780ba
|
/src/h02_learn/dataset/dep_label.py
|
2667a9d3378c4777ae28ec7222d485504c635aef
|
[
"MIT"
] |
permissive
|
imperialite/info-theoretic-probing
|
471a3c726e8b4e433ae8acaa070fbd964c6640a1
|
70414d5466e8c372187730c018064dd9309dd09a
|
refs/heads/master
| 2022-04-23T00:53:23.283886 | 2020-04-27T16:19:07 | 2020-04-27T16:19:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,315 |
py
|
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
import torch
from torch.utils.data import Dataset
from h01_data.process import get_data_file_base as get_file_names
from util import constants
from util import util
from .pos_tag import PosTagDataset
class DepLabelDataset(PosTagDataset):
# pylint: disable=too-many-instance-attributes
def load_data_index(self):
data_ud = util.read_data(self.input_name_base % (self.mode, 'ud'))
x_raw, y_raw = [], []
for sentence_ud, words in data_ud:
for i, token in enumerate(sentence_ud):
head = token['head']
rel = token['rel']
if rel == "_" or rel == "root":
continue
x_raw_tail = words[i]
x_raw_head = words[head - 1]
x_raw += [[x_raw_tail, x_raw_head]]
y_raw += [rel]
x_raw = np.array(x_raw)
y_raw = np.array(y_raw)
return x_raw, y_raw
def load_index(self, x_raw, words=None):
if words is None:
words = []
new_words = sorted(list(set(np.unique(x_raw)) - set(words)))
if new_words:
words = np.concatenate([words, new_words])
words_dict = {word: i for i, word in enumerate(words)}
x = np.array([[words_dict[token] for token in tokens] for tokens in x_raw])
self.x = torch.from_numpy(x)
self.words = words
self.n_words = len(words)
def load_data(self):
data_ud = util.read_data(self.input_name_base % (self.mode, 'ud'))
data_embeddings = util.read_data(self.input_name_base % (self.mode, self.representation))
x_raw, y_raw = [], []
for (sentence_ud, words), (sentence_emb, _) in zip(data_ud, data_embeddings):
for i, token in enumerate(sentence_ud):
head = token['head']
rel = token['rel']
if rel == "_" or rel == "root":
continue
x_raw_tail = sentence_emb[i]
x_raw_head = sentence_emb[head - 1]
x_raw += [np.concatenate([x_raw_tail, x_raw_head])]
y_raw += [rel]
x_raw = np.array(x_raw)
y_raw = np.array(y_raw)
return x_raw, y_raw
|
[
"[email protected]"
] | |
e8f98be5b7ab2b73eb661006b68ed05132abcd26
|
1bfad01139237049eded6c42981ee9b4c09bb6de
|
/RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/bgpipv6evpnvpws.py
|
b056247e24115b64997226aec74465e157386f30
|
[
"MIT"
] |
permissive
|
kakkotetsu/IxNetwork
|
3a395c2b4de1488994a0cfe51bca36d21e4368a5
|
f9fb614b51bb8988af035967991ad36702933274
|
refs/heads/master
| 2020-04-22T09:46:37.408010 | 2019-02-07T18:12:20 | 2019-02-07T18:12:20 | 170,284,084 | 0 | 0 |
MIT
| 2019-02-12T08:51:02 | 2019-02-12T08:51:01 | null |
UTF-8
|
Python
| false | false | 37,758 |
py
|
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class BgpIPv6EvpnVpws(Base):
"""The BgpIPv6EvpnVpws class encapsulates a user managed bgpIPv6EvpnVpws node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the BgpIPv6EvpnVpws property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'bgpIPv6EvpnVpws'
def __init__(self, parent):
super(BgpIPv6EvpnVpws, self).__init__(parent)
@property
def BgpAsPathSegmentList(self):
"""An instance of the BgpAsPathSegmentList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpaspathsegmentlist.BgpAsPathSegmentList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpaspathsegmentlist import BgpAsPathSegmentList
return BgpAsPathSegmentList(self)
@property
def BgpClusterIdList(self):
"""An instance of the BgpClusterIdList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpclusteridlist.BgpClusterIdList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpclusteridlist import BgpClusterIdList
return BgpClusterIdList(self)
@property
def BgpCommunitiesList(self):
"""An instance of the BgpCommunitiesList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpcommunitieslist.BgpCommunitiesList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpcommunitieslist import BgpCommunitiesList
return BgpCommunitiesList(self)
@property
def BgpExtendedCommunitiesList(self):
"""An instance of the BgpExtendedCommunitiesList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpextendedcommunitieslist.BgpExtendedCommunitiesList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpextendedcommunitieslist import BgpExtendedCommunitiesList
return BgpExtendedCommunitiesList(self)
@property
def BroadcastDomainV6Vpws(self):
"""An instance of the BroadcastDomainV6Vpws class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.broadcastdomainv6vpws.BroadcastDomainV6Vpws)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.broadcastdomainv6vpws import BroadcastDomainV6Vpws
return BroadcastDomainV6Vpws(self)._select()
@property
def Connector(self):
"""An instance of the Connector class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector.Connector)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector import Connector
return Connector(self)
@property
def Tag(self):
"""An instance of the Tag class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag.Tag)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag import Tag
return Tag(self)
@property
def Active(self):
"""Activate/Deactivate Configuration
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('active')
@property
def AdRouteLabel(self):
"""AD Route Label
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('adRouteLabel')
@property
def AdvertiseL3vniSeparately(self):
"""Advertise L3 Route Separately
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('advertiseL3vniSeparately')
@property
def AggregatorAs(self):
"""Aggregator AS
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('aggregatorAs')
@property
def AggregatorId(self):
"""Aggregator ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('aggregatorId')
@property
def AsSetMode(self):
"""AS# Set Mode
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('asSetMode')
@property
def AutoConfigPMSITunnelId(self):
"""Auto Configure PMSI Tunnel ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('autoConfigPMSITunnelId')
@property
def AutoConfigureRdIpAddress(self):
"""Auto-Configure RD IP Addresses
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('autoConfigureRdIpAddress')
@property
def BMacFirstLabel(self):
"""B MAC First Label
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('bMacFirstLabel')
@property
def BMacSecondLabel(self):
"""B MAC Second Label
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('bMacSecondLabel')
@property
def ConnectedVia(self):
"""List of layers this layer used to connect to the wire
Returns:
list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])
"""
return self._get_attribute('connectedVia')
@ConnectedVia.setter
def ConnectedVia(self, value):
self._set_attribute('connectedVia', value)
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
Returns:
number
"""
return self._get_attribute('count')
@property
def DescriptiveName(self):
"""Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but maybe offers more context
Returns:
str
"""
return self._get_attribute('descriptiveName')
@property
def EnableAggregatorId(self):
"""Enable Aggregator ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableAggregatorId')
@property
def EnableAsPathSegments(self):
"""Enable AS Path Segments
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableAsPathSegments')
@property
def EnableAtomicAggregate(self):
"""Enable Atomic Aggregate
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableAtomicAggregate')
@property
def EnableBMacSecondLabel(self):
"""Enable B MAC Second Label
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableBMacSecondLabel')
@property
def EnableCluster(self):
"""Enable Cluster
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableCluster')
@property
def EnableCommunity(self):
"""Enable Community
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableCommunity')
@property
def EnableExtendedCommunity(self):
"""Enable Extended Community
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableExtendedCommunity')
@property
def EnableL3TargetOnlyForRouteType5(self):
"""Enable L3 Target only for Route Type 5
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableL3TargetOnlyForRouteType5')
@property
def EnableL3vniTargetList(self):
"""Enable L3 Target List
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableL3vniTargetList')
@property
def EnableLocalPreference(self):
"""Enable Local Preference
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableLocalPreference')
@property
def EnableMultiExitDiscriminator(self):
"""Enable Multi Exit
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableMultiExitDiscriminator')
@property
def EnableNextHop(self):
"""Enable Next Hop
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableNextHop')
@property
def EnableOrigin(self):
"""Enable Origin
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableOrigin')
@property
def EnableOriginatorId(self):
"""Enable Originator ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableOriginatorId')
@property
def Errors(self):
"""A list of errors that have occurred
Returns:
list(dict(arg1:str[None|/api/v1/sessions/1/ixnetwork/?deepchild=*],arg2:list[str]))
"""
return self._get_attribute('errors')
@property
def EsiType(self):
"""ESI Type
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('esiType')
@property
def EsiValue(self):
"""ESI Value
Returns:
list(str)
"""
return self._get_attribute('esiValue')
@property
def ImportRtListSameAsExportRtList(self):
"""Import RT List Same As Export RT List
Returns:
bool
"""
return self._get_attribute('importRtListSameAsExportRtList')
@ImportRtListSameAsExportRtList.setter
def ImportRtListSameAsExportRtList(self, value):
self._set_attribute('importRtListSameAsExportRtList', value)
@property
def IncludePmsiTunnelAttribute(self):
"""Include PMSI Tunnel Attribute
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('includePmsiTunnelAttribute')
@property
def Ipv4NextHop(self):
"""IPv4 Next Hop
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('ipv4NextHop')
@property
def Ipv6NextHop(self):
"""IPv6 Next Hop
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('ipv6NextHop')
@property
def L3vniImportRtListSameAsL3vniExportRtList(self):
"""L3 Import RT List Same As L3 Export RT List
Returns:
bool
"""
return self._get_attribute('l3vniImportRtListSameAsL3vniExportRtList')
@L3vniImportRtListSameAsL3vniExportRtList.setter
def L3vniImportRtListSameAsL3vniExportRtList(self, value):
self._set_attribute('l3vniImportRtListSameAsL3vniExportRtList', value)
@property
def LocalPreference(self):
"""Local Preference
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('localPreference')
@property
def MultiExitDiscriminator(self):
"""Multi Exit
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('multiExitDiscriminator')
@property
def MulticastTunnelType(self):
"""Multicast Tunnel Type
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('multicastTunnelType')
@property
def Multiplier(self):
"""Number of layer instances per parent instance (multiplier)
Returns:
number
"""
return self._get_attribute('multiplier')
@Multiplier.setter
def Multiplier(self, value):
self._set_attribute('multiplier', value)
@property
def Name(self):
"""Name of NGPF element, guaranteed to be unique in Scenario
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def NoOfASPathSegmentsPerRouteRange(self):
"""Number Of AS Path Segments Per Route Range
Returns:
number
"""
return self._get_attribute('noOfASPathSegmentsPerRouteRange')
@NoOfASPathSegmentsPerRouteRange.setter
def NoOfASPathSegmentsPerRouteRange(self, value):
self._set_attribute('noOfASPathSegmentsPerRouteRange', value)
@property
def NoOfClusters(self):
"""Number of Clusters
Returns:
number
"""
return self._get_attribute('noOfClusters')
@NoOfClusters.setter
def NoOfClusters(self, value):
self._set_attribute('noOfClusters', value)
@property
def NoOfCommunities(self):
"""Number of Communities
Returns:
number
"""
return self._get_attribute('noOfCommunities')
@NoOfCommunities.setter
def NoOfCommunities(self, value):
self._set_attribute('noOfCommunities', value)
@property
def NoOfExtendedCommunity(self):
"""Number of Extended Communities
Returns:
number
"""
return self._get_attribute('noOfExtendedCommunity')
@NoOfExtendedCommunity.setter
def NoOfExtendedCommunity(self, value):
self._set_attribute('noOfExtendedCommunity', value)
@property
def NumBroadcastDomainV6(self):
"""The number of broadcast domain to be configured under EVI
Returns:
number
"""
return self._get_attribute('numBroadcastDomainV6')
@NumBroadcastDomainV6.setter
def NumBroadcastDomainV6(self, value):
self._set_attribute('numBroadcastDomainV6', value)
@property
def NumRtInExportRouteTargetList(self):
"""Number of RTs in Export Route Target List(multiplier)
Returns:
number
"""
return self._get_attribute('numRtInExportRouteTargetList')
@NumRtInExportRouteTargetList.setter
def NumRtInExportRouteTargetList(self, value):
self._set_attribute('numRtInExportRouteTargetList', value)
@property
def NumRtInImportRouteTargetList(self):
"""Number of RTs in Import Route Target List(multiplier)
Returns:
number
"""
return self._get_attribute('numRtInImportRouteTargetList')
@NumRtInImportRouteTargetList.setter
def NumRtInImportRouteTargetList(self, value):
self._set_attribute('numRtInImportRouteTargetList', value)
@property
def NumRtInL3vniExportRouteTargetList(self):
"""Number of RTs in L3 Export Route Target List(multiplier)
Returns:
number
"""
return self._get_attribute('numRtInL3vniExportRouteTargetList')
@NumRtInL3vniExportRouteTargetList.setter
def NumRtInL3vniExportRouteTargetList(self, value):
self._set_attribute('numRtInL3vniExportRouteTargetList', value)
@property
def NumRtInL3vniImportRouteTargetList(self):
"""Number of RTs in L3 Import Route Target List(multiplier)
Returns:
number
"""
return self._get_attribute('numRtInL3vniImportRouteTargetList')
@NumRtInL3vniImportRouteTargetList.setter
def NumRtInL3vniImportRouteTargetList(self, value):
self._set_attribute('numRtInL3vniImportRouteTargetList', value)
@property
def Origin(self):
"""Origin
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('origin')
@property
def OriginatorId(self):
"""Originator ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('originatorId')
@property
def OverridePeerAsSetMode(self):
"""Override Peer AS# Set Mode
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('overridePeerAsSetMode')
@property
def PmsiTunnelIDv4(self):
"""PMSI Tunnel ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('pmsiTunnelIDv4')
@property
def PmsiTunnelIDv6(self):
"""PMSI Tunnel ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('pmsiTunnelIDv6')
@property
def RdEvi(self):
"""RD EVI
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('rdEvi')
@property
def RdIpAddress(self):
"""RD IP Addresses
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('rdIpAddress')
@property
def SessionStatus(self):
"""Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
Returns:
list(str[down|notStarted|up])
"""
return self._get_attribute('sessionStatus')
@property
def SetNextHop(self):
"""Set Next Hop
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('setNextHop')
@property
def SetNextHopIpType(self):
"""Set Next Hop IP Type
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('setNextHopIpType')
@property
def StackedLayers(self):
"""List of secondary (many to one) child layer protocols
Returns:
list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])
"""
return self._get_attribute('stackedLayers')
@StackedLayers.setter
def StackedLayers(self, value):
self._set_attribute('stackedLayers', value)
@property
def StateCounts(self):
"""A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
Returns:
dict(total:number,notStarted:number,down:number,up:number)
"""
return self._get_attribute('stateCounts')
@property
def Status(self):
"""Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns:
str(configured|error|mixed|notStarted|started|starting|stopping)
"""
return self._get_attribute('status')
@property
def UpstreamDownstreamAssignedMplsLabel(self):
"""Upstream/Downstream Assigned MPLS Label
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('upstreamDownstreamAssignedMplsLabel')
@property
def UseIpv4MappedIpv6Address(self):
"""Use IPv4 Mapped IPv6 Address
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('useIpv4MappedIpv6Address')
@property
def UseUpstreamDownstreamAssignedMplsLabel(self):
"""Use Upstream/Downstream Assigned MPLS Label
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('useUpstreamDownstreamAssignedMplsLabel')
def add(self, ConnectedVia=None, ImportRtListSameAsExportRtList=None, L3vniImportRtListSameAsL3vniExportRtList=None, Multiplier=None, Name=None, NoOfASPathSegmentsPerRouteRange=None, NoOfClusters=None, NoOfCommunities=None, NoOfExtendedCommunity=None, NumBroadcastDomainV6=None, NumRtInExportRouteTargetList=None, NumRtInImportRouteTargetList=None, NumRtInL3vniExportRouteTargetList=None, NumRtInL3vniImportRouteTargetList=None, StackedLayers=None):
"""Adds a new bgpIPv6EvpnVpws node on the server and retrieves it in this instance.
Args:
ConnectedVia (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of layers this layer used to connect to the wire
ImportRtListSameAsExportRtList (bool): Import RT List Same As Export RT List
L3vniImportRtListSameAsL3vniExportRtList (bool): L3 Import RT List Same As L3 Export RT List
Multiplier (number): Number of layer instances per parent instance (multiplier)
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
NoOfASPathSegmentsPerRouteRange (number): Number Of AS Path Segments Per Route Range
NoOfClusters (number): Number of Clusters
NoOfCommunities (number): Number of Communities
NoOfExtendedCommunity (number): Number of Extended Communities
NumBroadcastDomainV6 (number): The number of broadcast domain to be configured under EVI
NumRtInExportRouteTargetList (number): Number of RTs in Export Route Target List(multiplier)
NumRtInImportRouteTargetList (number): Number of RTs in Import Route Target List(multiplier)
NumRtInL3vniExportRouteTargetList (number): Number of RTs in L3 Export Route Target List(multiplier)
NumRtInL3vniImportRouteTargetList (number): Number of RTs in L3 Import Route Target List(multiplier)
StackedLayers (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of secondary (many to one) child layer protocols
Returns:
self: This instance with all currently retrieved bgpIPv6EvpnVpws data using find and the newly added bgpIPv6EvpnVpws data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the bgpIPv6EvpnVpws data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, Errors=None, EsiValue=None, ImportRtListSameAsExportRtList=None, L3vniImportRtListSameAsL3vniExportRtList=None, Multiplier=None, Name=None, NoOfASPathSegmentsPerRouteRange=None, NoOfClusters=None, NoOfCommunities=None, NoOfExtendedCommunity=None, NumBroadcastDomainV6=None, NumRtInExportRouteTargetList=None, NumRtInImportRouteTargetList=None, NumRtInL3vniExportRouteTargetList=None, NumRtInL3vniImportRouteTargetList=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None):
"""Finds and retrieves bgpIPv6EvpnVpws data from the server.
All named parameters support regex and can be used to selectively retrieve bgpIPv6EvpnVpws data from the server.
By default the find method takes no parameters and will retrieve all bgpIPv6EvpnVpws data from the server.
Args:
ConnectedVia (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of layers this layer used to connect to the wire
Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but maybe offers more context
Errors (list(dict(arg1:str[None|/api/v1/sessions/1/ixnetwork/?deepchild=*],arg2:list[str]))): A list of errors that have occurred
EsiValue (list(str)): ESI Value
ImportRtListSameAsExportRtList (bool): Import RT List Same As Export RT List
L3vniImportRtListSameAsL3vniExportRtList (bool): L3 Import RT List Same As L3 Export RT List
Multiplier (number): Number of layer instances per parent instance (multiplier)
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
NoOfASPathSegmentsPerRouteRange (number): Number Of AS Path Segments Per Route Range
NoOfClusters (number): Number of Clusters
NoOfCommunities (number): Number of Communities
NoOfExtendedCommunity (number): Number of Extended Communities
NumBroadcastDomainV6 (number): The number of broadcast domain to be configured under EVI
NumRtInExportRouteTargetList (number): Number of RTs in Export Route Target List(multiplier)
NumRtInImportRouteTargetList (number): Number of RTs in Import Route Target List(multiplier)
NumRtInL3vniExportRouteTargetList (number): Number of RTs in L3 Export Route Target List(multiplier)
NumRtInL3vniImportRouteTargetList (number): Number of RTs in L3 Import Route Target List(multiplier)
SessionStatus (list(str[down|notStarted|up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
StackedLayers (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of secondary (many to one) child layer protocols
StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
Status (str(configured|error|mixed|notStarted|started|starting|stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns:
self: This instance with matching bgpIPv6EvpnVpws data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of bgpIPv6EvpnVpws data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the bgpIPv6EvpnVpws data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Active=None, AdRouteLabel=None, AdvertiseL3vniSeparately=None, AggregatorAs=None, AggregatorId=None, AsSetMode=None, AutoConfigPMSITunnelId=None, AutoConfigureRdIpAddress=None, BMacFirstLabel=None, BMacSecondLabel=None, EnableAggregatorId=None, EnableAsPathSegments=None, EnableAtomicAggregate=None, EnableBMacSecondLabel=None, EnableCluster=None, EnableCommunity=None, EnableExtendedCommunity=None, EnableL3TargetOnlyForRouteType5=None, EnableL3vniTargetList=None, EnableLocalPreference=None, EnableMultiExitDiscriminator=None, EnableNextHop=None, EnableOrigin=None, EnableOriginatorId=None, EsiType=None, IncludePmsiTunnelAttribute=None, Ipv4NextHop=None, Ipv6NextHop=None, LocalPreference=None, MultiExitDiscriminator=None, MulticastTunnelType=None, Origin=None, OriginatorId=None, OverridePeerAsSetMode=None, PmsiTunnelIDv4=None, PmsiTunnelIDv6=None, RdEvi=None, RdIpAddress=None, SetNextHop=None, SetNextHopIpType=None, UpstreamDownstreamAssignedMplsLabel=None, UseIpv4MappedIpv6Address=None, UseUpstreamDownstreamAssignedMplsLabel=None):
"""Base class infrastructure that gets a list of bgpIPv6EvpnVpws device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args:
PortNames (str): optional regex of port names
Active (str): optional regex of active
AdRouteLabel (str): optional regex of adRouteLabel
AdvertiseL3vniSeparately (str): optional regex of advertiseL3vniSeparately
AggregatorAs (str): optional regex of aggregatorAs
AggregatorId (str): optional regex of aggregatorId
AsSetMode (str): optional regex of asSetMode
AutoConfigPMSITunnelId (str): optional regex of autoConfigPMSITunnelId
AutoConfigureRdIpAddress (str): optional regex of autoConfigureRdIpAddress
BMacFirstLabel (str): optional regex of bMacFirstLabel
BMacSecondLabel (str): optional regex of bMacSecondLabel
EnableAggregatorId (str): optional regex of enableAggregatorId
EnableAsPathSegments (str): optional regex of enableAsPathSegments
EnableAtomicAggregate (str): optional regex of enableAtomicAggregate
EnableBMacSecondLabel (str): optional regex of enableBMacSecondLabel
EnableCluster (str): optional regex of enableCluster
EnableCommunity (str): optional regex of enableCommunity
EnableExtendedCommunity (str): optional regex of enableExtendedCommunity
EnableL3TargetOnlyForRouteType5 (str): optional regex of enableL3TargetOnlyForRouteType5
EnableL3vniTargetList (str): optional regex of enableL3vniTargetList
EnableLocalPreference (str): optional regex of enableLocalPreference
EnableMultiExitDiscriminator (str): optional regex of enableMultiExitDiscriminator
EnableNextHop (str): optional regex of enableNextHop
EnableOrigin (str): optional regex of enableOrigin
EnableOriginatorId (str): optional regex of enableOriginatorId
EsiType (str): optional regex of esiType
IncludePmsiTunnelAttribute (str): optional regex of includePmsiTunnelAttribute
Ipv4NextHop (str): optional regex of ipv4NextHop
Ipv6NextHop (str): optional regex of ipv6NextHop
LocalPreference (str): optional regex of localPreference
MultiExitDiscriminator (str): optional regex of multiExitDiscriminator
MulticastTunnelType (str): optional regex of multicastTunnelType
Origin (str): optional regex of origin
OriginatorId (str): optional regex of originatorId
OverridePeerAsSetMode (str): optional regex of overridePeerAsSetMode
PmsiTunnelIDv4 (str): optional regex of pmsiTunnelIDv4
PmsiTunnelIDv6 (str): optional regex of pmsiTunnelIDv6
RdEvi (str): optional regex of rdEvi
RdIpAddress (str): optional regex of rdIpAddress
SetNextHop (str): optional regex of setNextHop
SetNextHopIpType (str): optional regex of setNextHopIpType
UpstreamDownstreamAssignedMplsLabel (str): optional regex of upstreamDownstreamAssignedMplsLabel
UseIpv4MappedIpv6Address (str): optional regex of useIpv4MappedIpv6Address
UseUpstreamDownstreamAssignedMplsLabel (str): optional regex of useUpstreamDownstreamAssignedMplsLabel
Returns:
list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def FetchAndUpdateConfigFromCloud(self, Mode):
"""Executes the fetchAndUpdateConfigFromCloud operation on the server.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/globals?deepchild=*|/api/v1/sessions/1/ixnetwork/topology?deepchild=*)): The method internally sets Arg1 to the current href for this instance
Mode (str):
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('FetchAndUpdateConfigFromCloud', payload=locals(), response_object=None)
def RestartDown(self):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('RestartDown', payload=locals(), response_object=None)
def RestartDown(self, SessionIndices):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('RestartDown', payload=locals(), response_object=None)
def RestartDown(self, SessionIndices):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('RestartDown', payload=locals(), response_object=None)
def Start(self):
"""Executes the start operation on the server.
Start selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Start(self, SessionIndices):
"""Executes the start operation on the server.
Start selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Start(self, SessionIndices):
"""Executes the start operation on the server.
Start selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Start', payload=locals(), response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stop selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
def Stop(self, SessionIndices):
"""Executes the stop operation on the server.
Stop selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
def Stop(self, SessionIndices):
"""Executes the stop operation on the server.
Stop selected protocols.
Args:
Arg1 (list(str[None|/api/v1/sessions/1/ixnetwork/topology])): The method internally sets Arg1 to the encapsulated list of hrefs for this instance
SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self
return self._execute('Stop', payload=locals(), response_object=None)
|
[
"[email protected]"
] | |
806c4c878888711dca3ec79b8fe335bae9900008
|
430b9e03e36e355bba475df49505011f99fa0819
|
/web/第4课:页面交互操作/d5_鼠标操作.py
|
7d4eb25bce43899556d622d03e2e63d1e93a663c
|
[] |
no_license
|
gaoyang1224/mysite
|
b43e5d5e378b810b94dd60ffcac1c992173cc11a
|
72150c67b9590b0498241a1eacb2669a836520ff
|
refs/heads/master
| 2023-05-01T21:42:40.096287 | 2021-05-20T14:40:30 | 2021-05-20T14:40:30 | 368,254,604 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 575 |
py
|
import time
from selenium import webdriver
from selenium.webdriver import ActionChains
driver = webdriver.Firefox()
driver.implicitly_wait(4)
driver.get('file:///D:/classes/web_auto_testing/%E7%AC%AC4%E8%AF%BE%EF%BC%9A%E9%A1%B5%E9%9D%A2%E4%BA%A4%E4%BA%92%E6%93%8D%E4%BD%9C/alert_demo.html')
# 复杂版:
# 初始化 ActionChains: 动作链条,
action = ActionChains(driver)
# 定位一个元素
h2 = driver.find_element('xpath', '//h2')
# click 操作
action.click(h2).perform()
time.sleep(5)
# 简单
# h2 = driver.find_element('xpath', '//h2')
# h2.click()
|
[
"[email protected]"
] | |
a4da331d66a9bc9ab226ec4306a45994e44a8df7
|
3e59c64c78aa3ffc4ca6ee358ee1a3ba61e2d4af
|
/energy/activation.py
|
596fbb09332eba316b94d644fc50b0773c482779
|
[
"MIT"
] |
permissive
|
pminervini/DeepKGC
|
de35f75fac9c64ca6e09e4ab244552792669678d
|
ed55d0a28d7607324def7c48ebde98786c11d5e1
|
refs/heads/master
| 2016-09-06T02:36:47.748324 | 2015-07-06T12:35:07 | 2015-07-06T12:35:07 | 38,617,255 | 5 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 366 |
py
|
# -*- coding: utf-8 -*-
import theano.tensor as T
# Activation functions
def htanh(x):
return -1. * (x < -1.) + x * (x < 1.) * (x >= -1.) + 1. * (x >= 1)
def hsigm(x):
return x * (x < 1) * (x > 0) + 1. * (x >= 1)
def rect(x):
return x * (x > 0)
def sigm(x):
return T.nnet.sigmoid(x)
def tanh(x):
return T.tanh(x)
def lin(x):
return x
|
[
"[email protected]"
] | |
d42e178adedceb3d83a4176e7940c42721a0994f
|
a2b23a8ab40a01903438b22cf964704ad90ea414
|
/0x0A-python-inheritance/10-square.py
|
945e59b894388c2c68e3a4beff404c0670f4ff3b
|
[] |
no_license
|
Katorea132/higher_level_programming
|
b78809d5d2a052c1e9680d24cc547d12ac69c41e
|
746f094c10fed8c2497b65c7a18c782e1b7cd3a9
|
refs/heads/master
| 2022-12-17T04:39:57.794263 | 2020-09-24T19:30:57 | 2020-09-24T19:30:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 656 |
py
|
#!/usr/bin/python3
"""THis module is for squares
"""
Rekt = __import__("9-rectangle").Rectangle
class Square(Rekt):
"""The square class from the super class rectangle from
the super class geometry
Args:
Rekt (class): super class
"""
def __init__(self, size):
"""Initializer
Args:
size (integer): The size of a side of the square
"""
self.integer_validator("size", size)
self.__size = size
super().__init__(size, size)
def area(self):
"""Returns the area
Returns:
int: The area
"""
return self.__size * self.__size
|
[
"[email protected]"
] | |
3fcee134c03e33b7dcf94b71921e4a066cf3c566
|
105ef2d5f8bba13c15deb8c4a2a9af307b4e547a
|
/Baekjoon/python/11053.py
|
212a1537f73cd5c4d20725f1cd212c45c8474320
|
[] |
no_license
|
caniro/algo-note
|
1ec4c0e08adcb542d3356daf7b6e943af722394f
|
d237a5b58a67ca453dc7a1a335f99428af2c5df5
|
refs/heads/master
| 2023-08-29T22:39:35.189711 | 2021-11-04T11:18:07 | 2021-11-04T11:18:07 | 260,473,565 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 971 |
py
|
# 가장 긴 증가하는 부분 수열 : https://www.acmicpc.net/problem/11053
from sys import stdin
input = stdin.readline
INF = 1e9
def lis(arr):
if not arr:
return 0
c = [-INF] + [INF] * len(arr)
c[1] = arr[0]
max_length = 1
def search(low, high, value):
if low == high:
return low
elif low + 1 == high:
return high if value > c[low] else low
mid = (low + high) // 2
if c[mid] == value:
return mid
elif c[mid] < value:
return search(mid + 1, high, value)
else:
return search(low, mid, value)
for num in arr[1:]:
if num > c[max_length]:
max_length += 1
c[max_length] = num
else:
next_idx = search(1, max_length, num)
c[next_idx] = num
return max_length
N = int(input().rstrip())
A = [int(n) for n in input().rstrip().split()]
print(lis(A))
|
[
"[email protected]"
] | |
6af8e050da68bfdedfdc86850a2cfb29c077ba0a
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/twilio/twilio/rest/preview/trusted_comms/business/__init__.py
|
f8c935fdadfa88c9e90f8212fc00caf550491736
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:4568e5a8fab302e3c70ed11607b218146e4027e860e186373a1901bf7e49b1cc
size 8394
|
[
"[email protected]"
] | |
375a99607fd2f2f1a217329571e15ee926971bc9
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_135/1332.py
|
14d48f941dcb48478886f954b0ba13b7112a23ce
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 610 |
py
|
def line(f):
return f.readline().strip()
f = open("A-small-attempt0.in", "r")
o = open("1.out", "w")
T = int(line(f))
for t in xrange(T):
ans1 = int(line(f))
arr1 = []
for i in xrange(4):
arr1.append(map(int, line(f).split()))
ans2 = int(line(f))
arr2 = []
for i in xrange(4):
arr2.append(map(int, line(f).split()))
overlap = set(arr1[ans1-1]).intersection(set(arr2[ans2-1]))
if len(overlap) == 0:
s = "Case #%d: Volunteer cheated!" % (t+1)
elif len(overlap) == 1:
s = "Case #%d: %d" % (t+1, overlap.pop())
else:
s = "Case #%d: Bad magician!" % (t+1)
print>>o, s
|
[
"[email protected]"
] | |
45792ef3fd3e901732b4fa5547b889acb1f5ba55
|
baf8ccd12b27d0882c75a9c3845a0679e831f618
|
/22_numerai/rl/sarsa.py
|
b42dcf00bd73335c856a2ee0f4ee839362e9fd06
|
[
"MIT"
] |
permissive
|
Tjorriemorrie/trading
|
c55d545a0a09e3fb92673696e95dd66b02858ab6
|
aafa15a6c564bfa86948ab30e33d554172b38a3e
|
refs/heads/master
| 2022-12-13T20:57:23.591343 | 2021-07-07T20:28:34 | 2021-07-07T20:28:34 | 28,739,306 | 2 | 2 |
MIT
| 2022-07-06T20:01:28 | 2015-01-03T08:55:17 |
q
|
UTF-8
|
Python
| false | false | 2,238 |
py
|
import gzip
import logging
import operator
import os
import pickle
from world import World
logging.getLogger(__name__)
class Sarsa():
def __init__(self, filename):
self.filename = filename
self.world = World()
self.alpha = 0.
self.epsilon = self.alpha / 2.
self.delta = None
def __enter__(self):
try:
with gzip.open(self.filename) as fz:
q = pickle.load(fz)
except (IOError, EOFError) as e:
logging.warn('Could not load Q at {}'.format(self.filename))
q = {}
self.q = q
logging.debug('Q loaded')
def __exit__(self, exc_type, exc_value, traceback):
# filename_tmp = '{0}/models/tmp.pklz'.format(os.path.realpath(os.path.dirname(__file__)))
# filename = '{0}/models/{1}_{2}.pklz'.format(os.path.realpath(os.path.dirname(__file__)), currency, interval)
with gzip.open(self.filename, 'wb') as fz:
pickle.dump(self.q, fz)
# os.rename(filename_tmp, filename)
logging.debug('Q saved')
def train(self):
logging.info('training...')
# reset delta
self.delta = None
# initial state
s = getState(df, periods)
# initial action
a = getAction(q, s, epsilon, actions)
# get reward
r, ticks = getReward(df, a, pip_mul, std)
# get delta
d = getDelta(q, s, a, r)
# update Q
q = updateQ(q, s, a, d, r, alpha)
return q, r, d, ticks
def summarizeActions(q):
summary_total = {}
summary_count = {}
for key, value in q.iteritems():
state, action = key.split('|')
# total
action_total = summary_total.get(action, 0)
action_total += value
action_total /= 2
summary_total[action] = action_total
action_count = summary_count.get(action, 0)
action_count += 1
summary_count[action] = action_count
summary_sorted = sorted(summary_total.items(), key=operator.itemgetter(1))
for action, info in summary_sorted:
logging.error('{0:10s} after {2} states with {1:.4f} avg'.format(action, info, summary_count[action]))
|
[
"[email protected]"
] | |
c09732f66b28e55cad678be37b13b597723a0410
|
d12b59b33df5c467abf081d48e043dac70cc5a9c
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/l2tpcapacity_1fb03b1eecddd532c02195eaf76667b2.py
|
1845fd0d4f5769a607323c17e090b428a5725628
|
[
"MIT"
] |
permissive
|
ajbalogh/ixnetwork_restpy
|
59ce20b88c1f99f95a980ff01106bda8f4ad5a0f
|
60a107e84fd8c1a32e24500259738e11740069fd
|
refs/heads/master
| 2023-04-02T22:01:51.088515 | 2021-04-09T18:39:28 | 2021-04-09T18:39:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,059 |
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class L2tpCapacity(Base):
"""This object measures the L2TP capacity of the LAC DUT.
The L2tpCapacity class encapsulates a list of l2tpCapacity resources that are managed by the user.
A list of resources can be retrieved from the server using the L2tpCapacity.find() method.
The list can be managed by using the L2tpCapacity.add() and L2tpCapacity.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'l2tpCapacity'
_SDM_ATT_MAP = {
'InputParameters': 'inputParameters',
'Mode': 'mode',
'Name': 'name',
}
def __init__(self, parent):
super(L2tpCapacity, self).__init__(parent)
@property
def Results(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_23583c0cce1dabf7b75fe7d2ae18cfc4.Results): An instance of the Results class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_23583c0cce1dabf7b75fe7d2ae18cfc4 import Results
return Results(self)._select()
@property
def TestConfig(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_69c95a290760a4febaa65cc7629e1166.TestConfig): An instance of the TestConfig class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_69c95a290760a4febaa65cc7629e1166 import TestConfig
return TestConfig(self)
@property
def InputParameters(self):
"""
Returns
-------
- str: Input Parameters
"""
return self._get_attribute(self._SDM_ATT_MAP['InputParameters'])
@InputParameters.setter
def InputParameters(self, value):
self._set_attribute(self._SDM_ATT_MAP['InputParameters'], value)
@property
def Mode(self):
"""
Returns
-------
- str(existingMode | newMode): Test mode
"""
return self._get_attribute(self._SDM_ATT_MAP['Mode'])
@Mode.setter
def Mode(self, value):
self._set_attribute(self._SDM_ATT_MAP['Mode'], value)
@property
def Name(self):
"""
Returns
-------
- str: Test name
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, InputParameters=None, Mode=None, Name=None):
"""Updates l2tpCapacity resource on the server.
Args
----
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, InputParameters=None, Mode=None, Name=None):
"""Adds a new l2tpCapacity resource on the server and adds it to the container.
Args
----
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with all currently retrieved l2tpCapacity resources using find and the newly added l2tpCapacity resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained l2tpCapacity resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, InputParameters=None, Mode=None, Name=None):
"""Finds and retrieves l2tpCapacity resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve l2tpCapacity resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all l2tpCapacity resources from the server.
Args
----
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with matching l2tpCapacity resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of l2tpCapacity data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the l2tpCapacity resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Apply(self):
"""Executes the apply operation on the server.
Applies the specified Quick Test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self):
"""Executes the applyAsync operation on the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self):
"""Executes the applyAsyncResult operation on the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self):
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self):
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
run(InputParameters=string)list
-------------------------------
- InputParameters (str): The input arguments of the test.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(InputParameters=string)
-----------------------------
- InputParameters (str): The input arguments of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self):
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('waitForTest', payload=payload, response_object=None)
|
[
"[email protected]"
] | |
c0b2dc52c6067fe4d6acf3ac56599bffd2491b3e
|
e3b9aa9b17ebb55e53dbc4fa9d1f49c3a56c6488
|
/minfraud/komand_minfraud/actions/email_lookup/action.py
|
9925063bfcaa673b4be293d6606ed91c7b12b331
|
[
"MIT"
] |
permissive
|
OSSSP/insightconnect-plugins
|
ab7c77f91c46bd66b10db9da1cd7571dfc048ab7
|
846758dab745170cf1a8c146211a8bea9592e8ff
|
refs/heads/master
| 2023-04-06T23:57:28.449617 | 2020-03-18T01:24:28 | 2020-03-18T01:24:28 | 248,185,529 | 1 | 0 |
MIT
| 2023-04-04T00:12:18 | 2020-03-18T09:14:53 | null |
UTF-8
|
Python
| false | false | 2,803 |
py
|
import komand
from .schema import EmailLookupInput, EmailLookupOutput
# Custom imports below
import minfraud
class EmailLookup(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='email_lookup',
description='Query email info',
input=EmailLookupInput(),
output=EmailLookupOutput())
def run(self, params={}):
address = params.get('address')
domain = params.get('domain')
email = params.get('email')
user = self.connection.user
license = self.connection.license
# Set client
client = minfraud.Client(user, license)
# Define request
request = {'device': {'ip_address': address}}
email_dic = {}
if domain:
email_dic['domain'] = domain
if email:
email_dic['address'] = email
# Add email_dic to request
if email_dic:
request['email'] = email_dic
else:
self.logger.info('No email info provided')
try:
# Generate request
insights = client.insights(request)
except minfraud.AuthenticationError:
self.logger.error('Authentication failed')
raise
except minfraud.InsufficientFundsError:
self.logger.error('Insufficient funds')
raise
except minfraud.InvalidRequestError:
self.logger.error('Invalid request')
raise
except minfraud.HttpError:
self.logger.error('Unexpected HTTP error occurred')
raise
except minfraud.MinFraudError:
self.logger.error('Unexpected content received from server')
raise
# Overall risk score
risk_score = str(insights.risk_score)
#TO-DO - rename email to email_result
# Email info
is_free = insights.email.is_free
is_high_risk = insights.email.is_high_risk
email_result = {'is_free': is_free,
'is_high_risk': is_high_risk
}
# Clean email dict
email_result = komand.helper.clean_dict(email_result)
return {'risk_score': risk_score, 'email_result': email_result}
def test(self):
user = self.connection.user
license = self.connection.license
# Set client
client = minfraud.Client(user, license)
# Define request
request = {'device': {'ip_address': '8.8.8.8'}}
try:
# Generate request
insights = client.insights(request)
except minfraud.AuthenticationError:
self.logger.error('Authentication failed')
raise
except minfraud.InsufficientFundsError:
self.logger.error('Insufficient funds')
raise
return {}
|
[
"[email protected]"
] | |
8dae3ca824e26c97a94c7051f539dda7571c8482
|
26d6c34df00a229dc85ad7326de6cb5672be7acc
|
/msgraph-cli-extensions/v1_0/calendar_v1_0/azext_calendar_v1_0/vendored_sdks/calendar/aio/operations/_users_events_calendar_operations.py
|
7667aac8777412b86e848c9b3827e2c95d9a37a4
|
[
"MIT"
] |
permissive
|
BrianTJackett/msgraph-cli
|
87f92471f68f85e44872939d876b9ff5f0ae6b2c
|
78a4b1c73a23b85c070fed2fbca93758733f620e
|
refs/heads/main
| 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 |
NOASSERTION
| 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null |
UTF-8
|
Python
| false | false | 90,659 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsersEventsCalendarOperations:
"""UsersEventsCalendarOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~calendar.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_calendar_permissions(
self,
user_id: str,
event_id: str,
orderby: Optional[List[Union[str, "models.Enum384"]]] = None,
select: Optional[List[Union[str, "models.Enum385"]]] = None,
expand: Optional[List[str]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfCalendarPermission6"]:
"""Get calendarPermissions from users.
Get calendarPermissions from users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~calendar.models.Enum384]
:param select: Select properties to be returned.
:type select: list[str or ~calendar.models.Enum385]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfCalendarPermission6 or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~calendar.models.CollectionOfCalendarPermission6]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfCalendarPermission6"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_calendar_permissions.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfCalendarPermission6', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_calendar_permissions.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/calendarPermissions'} # type: ignore
async def create_calendar_permissions(
self,
user_id: str,
event_id: str,
body: "models.MicrosoftGraphCalendarPermission",
**kwargs
) -> "models.MicrosoftGraphCalendarPermission":
"""Create new navigation property to calendarPermissions for users.
Create new navigation property to calendarPermissions for users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param body: New navigation property.
:type body: ~calendar.models.MicrosoftGraphCalendarPermission
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphCalendarPermission, or the result of cls(response)
:rtype: ~calendar.models.MicrosoftGraphCalendarPermission
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphCalendarPermission"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_calendar_permissions.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphCalendarPermission')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphCalendarPermission', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_calendar_permissions.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/calendarPermissions'} # type: ignore
async def get_calendar_permissions(
self,
user_id: str,
event_id: str,
calendar_permission_id: str,
select: Optional[List[Union[str, "models.Enum386"]]] = None,
expand: Optional[List[str]] = None,
**kwargs
) -> "models.MicrosoftGraphCalendarPermission":
"""Get calendarPermissions from users.
Get calendarPermissions from users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param calendar_permission_id: key: id of calendarPermission.
:type calendar_permission_id: str
:param select: Select properties to be returned.
:type select: list[str or ~calendar.models.Enum386]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphCalendarPermission, or the result of cls(response)
:rtype: ~calendar.models.MicrosoftGraphCalendarPermission
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphCalendarPermission"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_calendar_permissions.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'calendarPermission-id': self._serialize.url("calendar_permission_id", calendar_permission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphCalendarPermission', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_calendar_permissions.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/calendarPermissions/{calendarPermission-id}'} # type: ignore
async def update_calendar_permissions(
self,
user_id: str,
event_id: str,
calendar_permission_id: str,
body: "models.MicrosoftGraphCalendarPermission",
**kwargs
) -> None:
"""Update the navigation property calendarPermissions in users.
Update the navigation property calendarPermissions in users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param calendar_permission_id: key: id of calendarPermission.
:type calendar_permission_id: str
:param body: New navigation property values.
:type body: ~calendar.models.MicrosoftGraphCalendarPermission
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_calendar_permissions.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'calendarPermission-id': self._serialize.url("calendar_permission_id", calendar_permission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphCalendarPermission')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_calendar_permissions.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/calendarPermissions/{calendarPermission-id}'} # type: ignore
async def delete_calendar_permissions(
self,
user_id: str,
event_id: str,
calendar_permission_id: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property calendarPermissions for users.
Delete navigation property calendarPermissions for users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param calendar_permission_id: key: id of calendarPermission.
:type calendar_permission_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_calendar_permissions.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'calendarPermission-id': self._serialize.url("calendar_permission_id", calendar_permission_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_calendar_permissions.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/calendarPermissions/{calendarPermission-id}'} # type: ignore
def list_calendar_view(
self,
user_id: str,
event_id: str,
orderby: Optional[List[Union[str, "models.Enum387"]]] = None,
select: Optional[List[Union[str, "models.Enum388"]]] = None,
expand: Optional[List[Union[str, "models.Enum389"]]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfEvent28"]:
"""Get calendarView from users.
Get calendarView from users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~calendar.models.Enum387]
:param select: Select properties to be returned.
:type select: list[str or ~calendar.models.Enum388]
:param expand: Expand related entities.
:type expand: list[str or ~calendar.models.Enum389]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfEvent28 or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~calendar.models.CollectionOfEvent28]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfEvent28"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_calendar_view.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfEvent28', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_calendar_view.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/calendarView'} # type: ignore
async def create_calendar_view(
self,
user_id: str,
event_id: str,
body: "models.MicrosoftGraphEvent",
**kwargs
) -> "models.MicrosoftGraphEvent":
"""Create new navigation property to calendarView for users.
Create new navigation property to calendarView for users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param body: New navigation property.
:type body: ~calendar.models.MicrosoftGraphEvent
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEvent, or the result of cls(response)
:rtype: ~calendar.models.MicrosoftGraphEvent
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEvent"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_calendar_view.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphEvent')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEvent', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_calendar_view.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/calendarView'} # type: ignore
async def get_calendar_view(
self,
user_id: str,
event_id: str,
event_id1: str,
select: Optional[List[Union[str, "models.Enum390"]]] = None,
expand: Optional[List[Union[str, "models.Enum391"]]] = None,
**kwargs
) -> "models.MicrosoftGraphEvent":
"""Get calendarView from users.
Get calendarView from users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param event_id1: key: id of event.
:type event_id1: str
:param select: Select properties to be returned.
:type select: list[str or ~calendar.models.Enum390]
:param expand: Expand related entities.
:type expand: list[str or ~calendar.models.Enum391]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEvent, or the result of cls(response)
:rtype: ~calendar.models.MicrosoftGraphEvent
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEvent"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_calendar_view.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'event-id1': self._serialize.url("event_id1", event_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEvent', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_calendar_view.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/calendarView/{event-id1}'} # type: ignore
async def update_calendar_view(
self,
user_id: str,
event_id: str,
event_id1: str,
body: "models.MicrosoftGraphEvent",
**kwargs
) -> None:
"""Update the navigation property calendarView in users.
Update the navigation property calendarView in users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param event_id1: key: id of event.
:type event_id1: str
:param body: New navigation property values.
:type body: ~calendar.models.MicrosoftGraphEvent
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_calendar_view.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'event-id1': self._serialize.url("event_id1", event_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphEvent')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_calendar_view.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/calendarView/{event-id1}'} # type: ignore
async def delete_calendar_view(
self,
user_id: str,
event_id: str,
event_id1: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property calendarView for users.
Delete navigation property calendarView for users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param event_id1: key: id of event.
:type event_id1: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_calendar_view.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'event-id1': self._serialize.url("event_id1", event_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_calendar_view.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/calendarView/{event-id1}'} # type: ignore
def list_events(
self,
user_id: str,
event_id: str,
orderby: Optional[List[Union[str, "models.Enum392"]]] = None,
select: Optional[List[Union[str, "models.Enum393"]]] = None,
expand: Optional[List[Union[str, "models.Enum394"]]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfEvent29"]:
"""Get events from users.
Get events from users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~calendar.models.Enum392]
:param select: Select properties to be returned.
:type select: list[str or ~calendar.models.Enum393]
:param expand: Expand related entities.
:type expand: list[str or ~calendar.models.Enum394]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfEvent29 or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~calendar.models.CollectionOfEvent29]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfEvent29"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_events.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfEvent29', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_events.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/events'} # type: ignore
async def create_events(
self,
user_id: str,
event_id: str,
body: "models.MicrosoftGraphEvent",
**kwargs
) -> "models.MicrosoftGraphEvent":
"""Create new navigation property to events for users.
Create new navigation property to events for users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param body: New navigation property.
:type body: ~calendar.models.MicrosoftGraphEvent
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEvent, or the result of cls(response)
:rtype: ~calendar.models.MicrosoftGraphEvent
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEvent"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_events.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphEvent')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEvent', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_events.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/events'} # type: ignore
async def get_events(
self,
user_id: str,
event_id: str,
event_id1: str,
select: Optional[List[Union[str, "models.Enum395"]]] = None,
expand: Optional[List[Union[str, "models.Enum396"]]] = None,
**kwargs
) -> "models.MicrosoftGraphEvent":
"""Get events from users.
Get events from users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param event_id1: key: id of event.
:type event_id1: str
:param select: Select properties to be returned.
:type select: list[str or ~calendar.models.Enum395]
:param expand: Expand related entities.
:type expand: list[str or ~calendar.models.Enum396]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphEvent, or the result of cls(response)
:rtype: ~calendar.models.MicrosoftGraphEvent
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphEvent"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_events.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'event-id1': self._serialize.url("event_id1", event_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphEvent', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_events.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/events/{event-id1}'} # type: ignore
async def update_events(
self,
user_id: str,
event_id: str,
event_id1: str,
body: "models.MicrosoftGraphEvent",
**kwargs
) -> None:
"""Update the navigation property events in users.
Update the navigation property events in users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param event_id1: key: id of event.
:type event_id1: str
:param body: New navigation property values.
:type body: ~calendar.models.MicrosoftGraphEvent
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_events.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'event-id1': self._serialize.url("event_id1", event_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphEvent')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_events.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/events/{event-id1}'} # type: ignore
async def delete_events(
self,
user_id: str,
event_id: str,
event_id1: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property events for users.
Delete navigation property events for users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param event_id1: key: id of event.
:type event_id1: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_events.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'event-id1': self._serialize.url("event_id1", event_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_events.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/events/{event-id1}'} # type: ignore
def list_multi_value_extended_properties(
self,
user_id: str,
event_id: str,
orderby: Optional[List[Union[str, "models.Enum397"]]] = None,
select: Optional[List[Union[str, "models.Enum398"]]] = None,
expand: Optional[List[str]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfMultiValueLegacyExtendedProperty17"]:
"""Get multiValueExtendedProperties from users.
Get multiValueExtendedProperties from users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~calendar.models.Enum397]
:param select: Select properties to be returned.
:type select: list[str or ~calendar.models.Enum398]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfMultiValueLegacyExtendedProperty17 or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~calendar.models.CollectionOfMultiValueLegacyExtendedProperty17]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfMultiValueLegacyExtendedProperty17"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_multi_value_extended_properties.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfMultiValueLegacyExtendedProperty17', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_multi_value_extended_properties.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/multiValueExtendedProperties'} # type: ignore
async def create_multi_value_extended_properties(
self,
user_id: str,
event_id: str,
body: "models.MicrosoftGraphMultiValueLegacyExtendedProperty",
**kwargs
) -> "models.MicrosoftGraphMultiValueLegacyExtendedProperty":
"""Create new navigation property to multiValueExtendedProperties for users.
Create new navigation property to multiValueExtendedProperties for users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param body: New navigation property.
:type body: ~calendar.models.MicrosoftGraphMultiValueLegacyExtendedProperty
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphMultiValueLegacyExtendedProperty, or the result of cls(response)
:rtype: ~calendar.models.MicrosoftGraphMultiValueLegacyExtendedProperty
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphMultiValueLegacyExtendedProperty"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_multi_value_extended_properties.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphMultiValueLegacyExtendedProperty')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphMultiValueLegacyExtendedProperty', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_multi_value_extended_properties.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/multiValueExtendedProperties'} # type: ignore
async def get_multi_value_extended_properties(
self,
user_id: str,
event_id: str,
multi_value_legacy_extended_property_id: str,
select: Optional[List[Union[str, "models.Enum399"]]] = None,
expand: Optional[List[str]] = None,
**kwargs
) -> "models.MicrosoftGraphMultiValueLegacyExtendedProperty":
"""Get multiValueExtendedProperties from users.
Get multiValueExtendedProperties from users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param multi_value_legacy_extended_property_id: key: id of multiValueLegacyExtendedProperty.
:type multi_value_legacy_extended_property_id: str
:param select: Select properties to be returned.
:type select: list[str or ~calendar.models.Enum399]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphMultiValueLegacyExtendedProperty, or the result of cls(response)
:rtype: ~calendar.models.MicrosoftGraphMultiValueLegacyExtendedProperty
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphMultiValueLegacyExtendedProperty"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_multi_value_extended_properties.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'multiValueLegacyExtendedProperty-id': self._serialize.url("multi_value_legacy_extended_property_id", multi_value_legacy_extended_property_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphMultiValueLegacyExtendedProperty', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_multi_value_extended_properties.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/multiValueExtendedProperties/{multiValueLegacyExtendedProperty-id}'} # type: ignore
async def update_multi_value_extended_properties(
self,
user_id: str,
event_id: str,
multi_value_legacy_extended_property_id: str,
body: "models.MicrosoftGraphMultiValueLegacyExtendedProperty",
**kwargs
) -> None:
"""Update the navigation property multiValueExtendedProperties in users.
Update the navigation property multiValueExtendedProperties in users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param multi_value_legacy_extended_property_id: key: id of multiValueLegacyExtendedProperty.
:type multi_value_legacy_extended_property_id: str
:param body: New navigation property values.
:type body: ~calendar.models.MicrosoftGraphMultiValueLegacyExtendedProperty
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_multi_value_extended_properties.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'multiValueLegacyExtendedProperty-id': self._serialize.url("multi_value_legacy_extended_property_id", multi_value_legacy_extended_property_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphMultiValueLegacyExtendedProperty')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_multi_value_extended_properties.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/multiValueExtendedProperties/{multiValueLegacyExtendedProperty-id}'} # type: ignore
async def delete_multi_value_extended_properties(
self,
user_id: str,
event_id: str,
multi_value_legacy_extended_property_id: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property multiValueExtendedProperties for users.
Delete navigation property multiValueExtendedProperties for users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param multi_value_legacy_extended_property_id: key: id of multiValueLegacyExtendedProperty.
:type multi_value_legacy_extended_property_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_multi_value_extended_properties.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'multiValueLegacyExtendedProperty-id': self._serialize.url("multi_value_legacy_extended_property_id", multi_value_legacy_extended_property_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_multi_value_extended_properties.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/multiValueExtendedProperties/{multiValueLegacyExtendedProperty-id}'} # type: ignore
def list_single_value_extended_properties(
self,
user_id: str,
event_id: str,
orderby: Optional[List[Union[str, "models.Enum400"]]] = None,
select: Optional[List[Union[str, "models.Enum401"]]] = None,
expand: Optional[List[str]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfSingleValueLegacyExtendedProperty17"]:
"""Get singleValueExtendedProperties from users.
Get singleValueExtendedProperties from users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~calendar.models.Enum400]
:param select: Select properties to be returned.
:type select: list[str or ~calendar.models.Enum401]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfSingleValueLegacyExtendedProperty17 or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~calendar.models.CollectionOfSingleValueLegacyExtendedProperty17]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfSingleValueLegacyExtendedProperty17"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_single_value_extended_properties.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfSingleValueLegacyExtendedProperty17', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_single_value_extended_properties.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/singleValueExtendedProperties'} # type: ignore
async def create_single_value_extended_properties(
self,
user_id: str,
event_id: str,
body: "models.MicrosoftGraphSingleValueLegacyExtendedProperty",
**kwargs
) -> "models.MicrosoftGraphSingleValueLegacyExtendedProperty":
"""Create new navigation property to singleValueExtendedProperties for users.
Create new navigation property to singleValueExtendedProperties for users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param body: New navigation property.
:type body: ~calendar.models.MicrosoftGraphSingleValueLegacyExtendedProperty
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphSingleValueLegacyExtendedProperty, or the result of cls(response)
:rtype: ~calendar.models.MicrosoftGraphSingleValueLegacyExtendedProperty
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphSingleValueLegacyExtendedProperty"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_single_value_extended_properties.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphSingleValueLegacyExtendedProperty')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSingleValueLegacyExtendedProperty', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_single_value_extended_properties.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/singleValueExtendedProperties'} # type: ignore
async def get_single_value_extended_properties(
self,
user_id: str,
event_id: str,
single_value_legacy_extended_property_id: str,
select: Optional[List[Union[str, "models.Enum402"]]] = None,
expand: Optional[List[str]] = None,
**kwargs
) -> "models.MicrosoftGraphSingleValueLegacyExtendedProperty":
"""Get singleValueExtendedProperties from users.
Get singleValueExtendedProperties from users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param single_value_legacy_extended_property_id: key: id of singleValueLegacyExtendedProperty.
:type single_value_legacy_extended_property_id: str
:param select: Select properties to be returned.
:type select: list[str or ~calendar.models.Enum402]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphSingleValueLegacyExtendedProperty, or the result of cls(response)
:rtype: ~calendar.models.MicrosoftGraphSingleValueLegacyExtendedProperty
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphSingleValueLegacyExtendedProperty"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_single_value_extended_properties.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'singleValueLegacyExtendedProperty-id': self._serialize.url("single_value_legacy_extended_property_id", single_value_legacy_extended_property_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSingleValueLegacyExtendedProperty', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_single_value_extended_properties.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/singleValueExtendedProperties/{singleValueLegacyExtendedProperty-id}'} # type: ignore
async def update_single_value_extended_properties(
self,
user_id: str,
event_id: str,
single_value_legacy_extended_property_id: str,
body: "models.MicrosoftGraphSingleValueLegacyExtendedProperty",
**kwargs
) -> None:
"""Update the navigation property singleValueExtendedProperties in users.
Update the navigation property singleValueExtendedProperties in users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param single_value_legacy_extended_property_id: key: id of singleValueLegacyExtendedProperty.
:type single_value_legacy_extended_property_id: str
:param body: New navigation property values.
:type body: ~calendar.models.MicrosoftGraphSingleValueLegacyExtendedProperty
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_single_value_extended_properties.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'singleValueLegacyExtendedProperty-id': self._serialize.url("single_value_legacy_extended_property_id", single_value_legacy_extended_property_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphSingleValueLegacyExtendedProperty')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_single_value_extended_properties.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/singleValueExtendedProperties/{singleValueLegacyExtendedProperty-id}'} # type: ignore
async def delete_single_value_extended_properties(
self,
user_id: str,
event_id: str,
single_value_legacy_extended_property_id: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property singleValueExtendedProperties for users.
Delete navigation property singleValueExtendedProperties for users.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:param single_value_legacy_extended_property_id: key: id of singleValueLegacyExtendedProperty.
:type single_value_legacy_extended_property_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_single_value_extended_properties.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
'singleValueLegacyExtendedProperty-id': self._serialize.url("single_value_legacy_extended_property_id", single_value_legacy_extended_property_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_single_value_extended_properties.metadata = {'url': '/users/{user-id}/events/{event-id}/calendar/singleValueExtendedProperties/{singleValueLegacyExtendedProperty-id}'} # type: ignore
|
[
"[email protected]"
] | |
2e0cec4b5e1bec814164ba1d46fcb45d8a657b93
|
42d3d37a3dd22402154da4f4bd020afd7b7bad58
|
/examples/adspygoogle/adwords/v201109/basic_operations/add_ad_groups.py
|
af1d61fdd7c560ea8a6a05c35e2fac85f4c8c218
|
[
"Apache-2.0"
] |
permissive
|
nearlyfreeapps/python-googleadwords
|
1388316ec4f8d9d6074688ec4742872b34b67636
|
b30d90f74248cfd5ca52967e9ee77fc4cd1b9abc
|
refs/heads/master
| 2020-06-03T23:05:08.865535 | 2012-08-02T21:46:16 | 2012-08-02T21:46:16 | 5,278,295 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,856 |
py
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds ad groups to a given campaign. To get ad groups, run
get_ad_groups.py.
Tags: AdGroupService.mutate
"""
__author__ = '[email protected] (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
from adspygoogle.common import Utils
campaign_id = 'INSERT_CAMPAIGN_ID_HERE'
def main(client, campaign_id):
# Initialize appropriate service.
ad_group_service = client.GetAdGroupService(
'https://adwords-sandbox.google.com', 'v201109')
# Construct operations and add ad groups.
operations = [{
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'name': 'Earth to Mars Cruises #%s' % Utils.GetUniqueName(),
'status': 'ENABLED',
'bids': {
'xsi_type': 'ManualCPCAdGroupBids',
'keywordMaxCpc': {
'amount': {
'microAmount': '1000000'
}
},
# Optional field.
'keywordContentMaxCpc': {
'amount': {
'microAmount': '2000000'
}
}
}
}
}, {
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'name': 'Earth to Venus Cruises #%s' % Utils.GetUniqueName(),
'status': 'ENABLED',
'bids': {
'xsi_type': 'ManualCPCAdGroupBids',
'keywordMaxCpc': {
'amount': {
'microAmount': '2000000'
}
},
}
}
}]
ad_groups = ad_group_service.Mutate(operations)[0]
# Display results.
for ad_group in ad_groups['value']:
print ('Ad group with name \'%s\' and id \'%s\' was added.'
% (ad_group['name'], ad_group['id']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, campaign_id)
|
[
"[email protected]"
] | |
57999ae9ce2381856766849022c89cd3e153c7e4
|
9b4fe9c2693abc6ecc614088665cbf855971deaf
|
/744.find-smallest-letter-greater-than-target.py
|
49625d5f3841c1e6060e6f275b7326d894db8a48
|
[
"MIT"
] |
permissive
|
windard/leeeeee
|
e795be2b9dcabfc9f32fe25794878e591a6fb2c8
|
0dd67edca4e0b0323cb5a7239f02ea46383cd15a
|
refs/heads/master
| 2022-08-12T19:51:26.748317 | 2022-08-07T16:01:30 | 2022-08-07T16:01:30 | 222,122,359 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,823 |
py
|
# coding=utf-8
#
# @lc app=leetcode id=744 lang=python
#
# [744] Find Smallest Letter Greater Than Target
#
# https://leetcode.com/problems/find-smallest-letter-greater-than-target/description/
#
# algorithms
# Easy (43.56%)
# Likes: 245
# Dislikes: 376
# Total Accepted: 46.6K
# Total Submissions: 104.9K
# Testcase Example: '["c","f","j"]\n"a"'
#
#
# Given a list of sorted characters letters containing only lowercase letters,
# and given a target letter target, find the smallest element in the list that
# is larger than the given target.
#
# Letters also wrap around. For example, if the target is target = 'z' and
# letters = ['a', 'b'], the answer is 'a'.
#
#
# Examples:
#
# Input:
# letters = ["c", "f", "j"]
# target = "a"
# Output: "c"
#
# Input:
# letters = ["c", "f", "j"]
# target = "c"
# Output: "f"
#
# Input:
# letters = ["c", "f", "j"]
# target = "d"
# Output: "f"
#
# Input:
# letters = ["c", "f", "j"]
# target = "g"
# Output: "j"
#
# Input:
# letters = ["c", "f", "j"]
# target = "j"
# Output: "c"
#
# Input:
# letters = ["c", "f", "j"]
# target = "k"
# Output: "c"
#
#
#
# Note:
#
# letters has a length in range [2, 10000].
# letters consists of lowercase letters, and contains at least 2 unique
# letters.
# target is a lowercase letter.
#
#
#
class Solution(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
min_length = float("inf")
min_char = None
for letter in letters:
if (ord(letter) - ord(target)) % 26 < min_length:
if not ord(letter) - ord(target):
continue
min_length = (ord(letter) - ord(target)) % 26
min_char = letter
return min_char
|
[
"[email protected]"
] | |
52c105db51a9729ca761c5db76853562fb4dd51a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03044/s052008101.py
|
8dc7ecea338f828c998221cffb71731fd4019ce9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 768 |
py
|
import sys
from collections import deque
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
def main():
N = int(readline())
G = [[] for _ in range(N)]
for _ in range(N - 1):
u, v, w = map(int, readline().split())
G[u - 1].append((v - 1, w))
G[v - 1].append((u - 1, w))
dist = [-1] * N
color = [0] * N
dist[0] = 0
queue = deque([0])
while queue:
v = queue.popleft()
for nv, cost in G[v]:
if dist[nv] == -1:
dist[nv] = dist[v] + cost
color[nv] = dist[nv] % 2
queue.append(nv)
print(*color, sep='\n')
return
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
069e8afc3bae88fc490dc7db80adf1c3c2ff5992
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2375/60595/257705.py
|
dfc1e79a8269cf592621f58eb91b8d16b18a863c
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,701 |
py
|
class Graph(object):
def __init__(self, maps):
self.maps = maps
self.nodenum = self.get_nodenum()
self.edgenum = self.get_edgenum()
def get_nodenum(self):
return len(self.maps)
def get_edgenum(self):
count = 0
for i in range(self.nodenum):
for j in range(i):
if self.maps[i][j] > 0:
count += 1
return count
def prim(self):
list = []
if self.nodenum <= 0 or self.edgenum < self.nodenum - 1:
return list
selected_node = [0]
candidate_node = [i for i in range(1, self.nodenum)]
while len(candidate_node) > 0:
begin, end, minweight = 0, 0, 9999
for i in selected_node:
for j in candidate_node:
if self.maps[i][j] < minweight:
minweight = self.maps[i][j]
begin = i
end = j
list.append([begin, end, minweight])
selected_node.append(end)
candidate_node.remove(end)
return list
def Test():
n,m=map(int,input().split())
mat=[]
for i in range(0,n):
line=[]
for j in range(0,n):
line.append(99999)
mat.append(line)
for i in range(0,m):
s=input().split()
try:
mat[int(s[0])-1][int(s[1])-1]=int(s[2])
mat[int(s[1]) - 1][int(s[0]) - 1] = int(s[2])
except:
print(n,m)
graph=Graph(mat)
message=graph.prim()
res=0
for i in range(0,len(message)):
res=max(res,message[i][2])
print(res,end="")
if __name__ == "__main__":
Test()
|
[
"[email protected]"
] | |
615e70e685775ea91236d4f9d8bf8ffa6acd6d50
|
9e28200b71d43de1e122a964e88f1b547bfde465
|
/question_leetcode/159_3.py
|
ac9e41c85595c93128f7e311a207156c3c39e650
|
[] |
no_license
|
paul0920/leetcode
|
6f8a7086eefd3e9bccae83752ef41cbfee1acaea
|
474886c5c43a6192db2708e664663542c2e39548
|
refs/heads/master
| 2023-08-19T14:10:10.494355 | 2021-09-16T20:26:50 | 2021-09-16T20:26:50 | 290,560,326 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 406 |
py
|
import collections
s = "ecebaa"
s = "bacc"
start = 0
count = collections.defaultdict(int)
res = 0
res_string = []
for idx, c in enumerate(s):
count[c] += 1
if len(count) > 2:
count[s[start]] -= 1
if not count[s[start]]:
count.pop(s[start])
start += 1
res = max(res, idx - start + 1)
res_string.append(s[start:idx+1])
print res
print res_string
|
[
"[email protected]"
] | |
853bd821d4c8c5ac1a86b930a9840d78d132224a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02873/s837577213.py
|
b7d5cafe31946f81d03165a317e1a59b8ade8854
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 342 |
py
|
s=input()
l=[]
i=0
while i<len(s):
k=0
while s[i]=='<' if i<len(s) else False:
k+=1
i+=1
if k>0:
l.append(k)
k=0
while s[i]=='>' if i<len(s) else False:
k+=1
i+=1
if k>0:
l.append(k)
sm=0
for i in l:
sm+=(i*(i+1))//2
for i in range(0 if s[0]=='<' else 1,len(l)-1,2):
sm-=min(l[i],l[i+1])
print(sm)
|
[
"[email protected]"
] | |
d6d1beb28158c44d313682bf5c100994d4d897db
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4005/codes/1791_1621.py
|
88314ecf676c1eade669782dcfbe3233714d8418
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 79 |
py
|
from numpy import*
a=array(input(":").upper())
q=array(int(input(":")))
soma=0
|
[
"[email protected]"
] | |
0986e209f3f4491736d25bff9acd114e0c92e812
|
4c9eb8584b16bb103a1401a8f297f62589941c01
|
/flo/cli/Rdr2Geo.py
|
37d70f06ef8cebd48f5ae3fad9407d2f1276678c
|
[] |
no_license
|
pyre/flo
|
d62e0bba61926fd395df1c2767198c5743ade531
|
7b61a7a4cf12d4448b99f1b841866fe31a27bb61
|
refs/heads/master
| 2023-03-08T11:21:55.874526 | 2021-09-28T06:47:10 | 2021-09-28T06:47:10 | 156,036,991 | 5 | 0 | null | 2023-02-28T17:42:13 | 2018-11-04T00:51:52 |
JavaScript
|
UTF-8
|
Python
| false | false | 509 |
py
|
#-*- coding: utf-8 -*-
# support
import flo
# superclass
from .Workflow import Workflow
# declaration
class Rdr2Geo(Workflow, family="flo.cli.rdr2geo"):
"""
Invoke the {rdr2geo} workflow to compute the transformation from radar coordinates to
geodetic coordinates for a given SLC
"""
# public state
flow = flo.model.flows.flow()
flow.default = flo.isce3.workflows.rdr2geo # by default, make the one named after me...
flow.doc = "the workflow to execute"
# end of file
|
[
"[email protected]"
] | |
cd1eb7e40c810db20c3ae7b49d3798be2f3e58b5
|
34597ad1d89ee507473c5d91f03a5819143ec48f
|
/EBOs/UserV1/model.py
|
ab856ee1f233ace7266afeb7b415be1894a6ca4b
|
[] |
no_license
|
rmetcalf9/dockPondSampleEBOs
|
082c3a18961665e02402f0f14e3180019fc75bde
|
abd8d973feee03bcbf52938d6364c93d38aa2d5c
|
refs/heads/master
| 2020-03-12T16:26:11.636502 | 2018-06-29T10:58:17 | 2018-06-29T10:58:17 | 130,716,032 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,328 |
py
|
from flask_restplus import fields
def getModel(flaskRestPlusAPI):
#Function must be declared inside getModel function as this is the only part that is imported by dockPond
def getTypeModel(flaskRestPlusAPI, typeName):
if typeName=='http://ic.ac.uk/AIAMetaData/AIAComponents/EnterpriseObjectLibrary/Core/IC_EBO/User/V1/UserEBO:UserEBOTypeV1':
return flaskRestPlusAPI.model('UserEBOTypeV1', {
'Identification': fields.Nested(getTypeModel(flaskRestPlusAPI, 'http://ic.ac.uk/AIAMetaData/AIAComponents/EnterpriseObjectLibrary/Core/IC_EBO/User/V1/UserEBO:IdentificationTypeV1')),
'CID': fields.String(default='',description='College CID'),
'Status': fields.String(default='',description='Status of the User'),
})
if typeName=='http://ic.ac.uk/AIAMetaData/AIAComponents/EnterpriseObjectLibrary/Core/IC_EBO/User/V1/UserEBO:IdentificationTypeV1':
return flaskRestPlusAPI.model('IdentificationTypeV1', {
'UserName': fields.String(default='',description='Cannonical User identifier'),
})
raise Exception('Searching for unknown type')
return flaskRestPlusAPI.model('UserEBOV1', {
'UserEBO': fields.Nested(getTypeModel(flaskRestPlusAPI, 'http://ic.ac.uk/AIAMetaData/AIAComponents/EnterpriseObjectLibrary/Core/IC_EBO/User/V1/UserEBO:UserEBOTypeV1')),
})
|
[
"[email protected]"
] | |
02cca8d92f564c91c6c3d266eaef9202830aaabd
|
2fdc719bea50f10e2a4fc507d25b83ff4e612071
|
/projects/buck/bucklets/tools/download_all.py
|
a70cbda5baa067b219192369df1ce9371cbd8098
|
[
"Apache-2.0"
] |
permissive
|
aslamz/appium
|
5610b61598b5d74a41c43b2d6729f21f6978b7c8
|
778fe9c92041c99f06d9d074caed2f9c61c8bbb0
|
refs/heads/master
| 2022-06-01T18:46:07.210870 | 2021-01-04T12:56:25 | 2021-01-04T12:56:25 | 40,705,347 | 0 | 0 |
Apache-2.0
| 2022-05-20T20:52:31 | 2015-08-14T08:53:27 |
Ruby
|
UTF-8
|
Python
| false | false | 1,306 |
py
|
#!/usr/bin/python
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from optparse import OptionParser
import re
from subprocess import check_call, CalledProcessError, Popen, PIPE
MAIN = ['//:classpath']
PAT = re.compile(r'"(//.*?)" -> "//bucklets/tools:download_file"')
opts = OptionParser()
opts.add_option('--src', action='store_true')
args, _ = opts.parse_args()
targets = set()
p = Popen(['buck', 'audit', 'classpath', '--dot'] + MAIN, stdout = PIPE)
for line in p.stdout:
m = PAT.search(line)
if m:
n = m.group(1)
if args.src and n.endswith('__download_bin'):
n = n[:-4] + '_src'
targets.add(n)
r = p.wait()
if r != 0:
exit(r)
try:
check_call(['buck', 'build'] + sorted(targets))
except CalledProcessError as err:
exit(1)
|
[
"[email protected]"
] | |
5d60063af802f6cb1f0a9b6e580171f272016318
|
9ce3080999a69f1d330356645fe3e655052cf954
|
/aiida_registry/make_pages.py
|
0c4911cf752d6d8c8ef644290e6d20c49269cc15
|
[] |
no_license
|
chrisjsewell/aiida-registry
|
b0969b8298e8e5108653ec56ac54a8807e3cc1e6
|
a2cc2cf6c61e835e535d6af6125efcdf7dcae33b
|
refs/heads/master
| 2021-06-16T10:17:19.887994 | 2019-10-30T18:00:55 | 2019-10-30T18:00:55 | 148,847,505 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,634 |
py
|
# -*- coding: utf-8 -*-
"""Generate HTML pages for plugin registry.
Reads plugin-metadata.json produced by fetch_metadata.
"""
from __future__ import absolute_import
from __future__ import print_function
import codecs
import json
import os
import shutil
from collections import defaultdict
from jinja2 import Environment, PackageLoader, select_autoescape
from . import othercolorclass, entrypoint_metainfo, main_entrypoints, PLUGINS_METADATA, entrypointtypes, state_dict
# Subfolders
OUT_FOLDER = 'out'
STATIC_FOLDER = 'static'
HTML_FOLDER = 'plugins' # Name for subfolder where HTMLs for plugins are going to be sitting
TEMPLATES_FOLDER = 'templates'
# Absolute paths
pwd = os.path.split(os.path.abspath(__file__))[0]
STATIC_FOLDER_ABS = os.path.join(pwd, STATIC_FOLDER)
entrypoints_count = defaultdict(list)
other_entrypoint_names = set()
def get_html_plugin_fname(plugin_name):
import string
valid_characters = set(string.ascii_letters + string.digits + '_-')
simple_string = "".join(c for c in plugin_name if c in valid_characters)
return "{}.html".format(simple_string)
def get_summary_info(entry_points):
"""Get info for plugin detail page.
"""
global entrypoints_count, other_entrypoint_names
summary_info = []
ep = entry_points.copy()
for entrypoint_name in main_entrypoints:
try:
num = len(ep.pop(entrypoint_name))
if num > 0:
summary_info.append({
"colorclass":
entrypoint_metainfo[entrypoint_name]['colorclass'],
"text":
entrypoint_metainfo[entrypoint_name]['shortname'],
"count":
num
})
entrypoints_count[entrypoint_name].append(num)
except KeyError:
#No specific entrypoints, pass
pass
# Check remaining non-empty entrypoints
remaining = [ep_name for ep_name in ep if ep[ep_name]]
remaining_count = [len(ep[ep_name]) for ep_name in ep if ep[ep_name]]
total_count = sum(remaining_count)
if total_count:
other_elements = []
for ep_name in remaining:
try:
other_elements.append(
entrypoint_metainfo[ep_name]['shortname'])
except KeyError:
for strip_prefix in ['aiida.']:
if ep_name.startswith(strip_prefix):
ep_name = ep_name[len(strip_prefix):]
break
other_elements.append(
ep_name.replace('_', ' ').replace('.', ' ').capitalize())
summary_info.append({
"colorclass":
othercolorclass,
"text":
'Other ({})'.format(format_entry_points_list(other_elements)),
"count":
total_count
})
entrypoints_count['other'].append(total_count)
other_entrypoint_names.update(other_elements)
return summary_info
def format_entry_points_list(ep_list):
"""Return string of entry points, respecting some limit."""
import copy
max_len = 5
tmp = sorted(copy.copy(ep_list))
if len(tmp) > max_len:
tmp = tmp[:max_len] + ['...']
return ", ".join(tmp)
def validate_plugin_entry_points(plugin_data):
"""Validate that all registered entry points start with the registered entry point root."""
try:
entry_point_root = plugin_data['entry_point']
except KeyError:
# plugin should not specify entry points
entry_point_root = 'MISSING'
for ep_list in plugin_data['entry_points'].values():
for ep in ep_list:
ep_string, _path = ep.split('=')
ep_string = ep_string.strip()
if not ep_string.startswith(entry_point_root):
print(
" >> WARNING: Entry point '{}' does not start with '{}'".
format(ep_string, entry_point_root))
def global_summary():
"""Compute summary of plugin registry."""
global entrypoints_count, other_entrypoint_names
global_summary = []
for entrypoint_name in main_entrypoints:
global_summary.append({
'name':
entrypoint_metainfo[entrypoint_name]['shortname'],
'colorclass':
entrypoint_metainfo[entrypoint_name]['colorclass'],
'num_entries':
len(entrypoints_count[entrypoint_name]),
'total_num':
sum(entrypoints_count[entrypoint_name]),
})
global_summary.append({
'name':
"Other",
'tooltip':
format_entry_points_list(other_entrypoint_names),
'colorclass':
othercolorclass,
'num_entries':
len(entrypoints_count['other']),
'total_num':
sum(entrypoints_count['other'])
})
return global_summary
def make_pages():
# Create output folder, copy static files
if os.path.exists(OUT_FOLDER):
shutil.rmtree(OUT_FOLDER)
os.mkdir(OUT_FOLDER)
os.mkdir(os.path.join(OUT_FOLDER, HTML_FOLDER))
shutil.copytree(STATIC_FOLDER_ABS, os.path.join(OUT_FOLDER, STATIC_FOLDER))
env = Environment(
loader=PackageLoader('aiida_registry.mod'),
autoescape=select_autoescape(['html', 'xml']),
)
with open(PLUGINS_METADATA) as f:
plugins_metadata = json.load(f)
# Create HTML view for each plugin
for plugin_name, plugin_data in plugins_metadata.items():
print(" - {}".format(plugin_name))
subpage = os.path.join(HTML_FOLDER, get_html_plugin_fname(plugin_name))
subpage_abspath = os.path.join(OUT_FOLDER, subpage)
plugin_data['subpage'] = subpage
plugin_data[
'entrypointtypes'] = entrypointtypes # add a static entrypointtypes dictionary
plugin_data["summaryinfo"] = get_summary_info(
plugin_data["entry_points"])
plugin_data['state_dict'] = state_dict
# Write plugin html
plugin_html = env.get_template("singlepage.html").render(**plugin_data)
with codecs.open(subpage_abspath, 'w', 'utf-8') as f:
f.write(plugin_html)
print(" - Page {} generated.".format(subpage))
all_data = {}
all_data['plugins'] = plugins_metadata
all_data['globalsummary'] = global_summary()
print("[main index]")
rendered = env.get_template("main_index.html").render(**all_data)
outfile = os.path.join(OUT_FOLDER, 'index.html')
with codecs.open(outfile, 'w', 'utf-8') as f:
f.write(rendered)
print(" - index.html generated")
|
[
"[email protected]"
] | |
54af66ff4d6027355a3710a71ff0203770426322
|
c81d7dfef424b088bf2509a1baf406a80384ea5a
|
/venv/Lib/site-packages/whitenoise/httpstatus_backport.py
|
fcb1c22f1d45ec7f7fc3b25ffc361c1df72b45bc
|
[] |
no_license
|
Goutham2591/OMK_PART2
|
111210d78fc4845481ed55c852b8f2f938918f4a
|
cb54fb21ebf472bffc6ee4f634bf1e68303e113d
|
refs/heads/master
| 2022-12-10T01:43:08.213010 | 2018-04-05T02:09:41 | 2018-04-05T02:09:41 | 124,828,094 | 0 | 1 | null | 2022-12-07T23:43:03 | 2018-03-12T03:20:14 |
Python
|
UTF-8
|
Python
| false | false | 558 |
py
|
"""
Very partial backport of the `http.HTTPStatus` enum from Python 3.5
This implements just enough of the interface for our purposes, it does not
attempt to be a full implementation.
"""
class HTTPStatus(int):
phrase = None
def __new__(cls, code, phrase):
instance = int.__new__(cls, code)
instance.phrase = phrase
return instance
HTTPStatus.OK = HTTPStatus(200, 'OK')
HTTPStatus.NOT_MODIFIED = HTTPStatus(304, 'Not Modified')
HTTPStatus.METHOD_NOT_ALLOWED = HTTPStatus(405, 'Method Not Allowed')
|
[
"[email protected]"
] | |
39cd46f95479b5459cef6c53ce8edc1945642153
|
79bb7105223895235263fd391906144f9f9645fd
|
/python/kernel_tests/identity_op_py_test.py
|
7cde987900cb2e034c0d925eba85540adc313147
|
[] |
no_license
|
ml-lab/imcl-tensorflow
|
f863a81bfebe91af7919fb45036aa05304fd7cda
|
54ab3ec2e32087ce70ecae2f36b56a8a92f2ba89
|
refs/heads/master
| 2021-01-22T06:37:18.129405 | 2016-06-08T15:53:28 | 2016-06-08T15:53:28 | 63,518,098 | 1 | 2 | null | 2016-07-17T06:29:14 | 2016-07-17T06:29:13 | null |
UTF-8
|
Python
| false | false | 2,365 |
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for IdentityOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import gen_array_ops
class IdentityOpTest(tf.test.TestCase):
def testInt32_6(self):
with self.test_session():
value = tf.identity([1, 2, 3, 4, 5, 6]).eval()
self.assertAllEqual(np.array([1, 2, 3, 4, 5, 6]), value)
def testInt32_2_3(self):
with self.test_session():
inp = tf.constant([10, 20, 30, 40, 50, 60], shape=[2, 3])
value = tf.identity(inp).eval()
self.assertAllEqual(np.array([[10, 20, 30], [40, 50, 60]]), value)
def testString(self):
source = [b"A", b"b", b"C", b"d", b"E", b"f"]
with self.test_session():
value = tf.identity(source).eval()
self.assertAllEqual(source, value)
def testIdentityShape(self):
with self.test_session():
shape = [2, 3]
array_2x3 = [[1, 2, 3], [6, 5, 4]]
tensor = tf.constant(array_2x3)
self.assertEquals(shape, tensor.get_shape())
self.assertEquals(shape, tf.identity(tensor).get_shape())
self.assertEquals(shape, tf.identity(array_2x3).get_shape())
self.assertEquals(shape, tf.identity(np.array(array_2x3)).get_shape())
def testRefIdentityShape(self):
with self.test_session():
shape = [2, 3]
tensor = tf.Variable(tf.constant([[1, 2, 3], [6, 5, 4]], dtype=tf.int32))
self.assertEquals(shape, tensor.get_shape())
self.assertEquals(shape, gen_array_ops._ref_identity(tensor).get_shape())
if __name__ == "__main__":
tf.test.main()
|
[
"[email protected]"
] | |
00c674bec719f04e064532c7307ee71bc50f8bbc
|
8b6cd902deb20812fba07f1bd51a4460d22adc03
|
/back-end/.history/djreact/users/serializers_20191221131418.py
|
4a84b43397e2a944a5fd21996d7d0d6712fd600d
|
[] |
no_license
|
vishaldenzil/Django-react-
|
f3a49d141e0b6882685b7eaa4dc43c84857f335a
|
35b6d41f6dacb3bddcf7858aa4dc0d2fe039ff98
|
refs/heads/master
| 2022-11-08T09:27:02.938053 | 2020-05-29T04:53:52 | 2020-05-29T04:53:52 | 267,768,028 | 0 | 1 | null | 2022-10-15T14:08:30 | 2020-05-29T04:52:20 |
Python
|
UTF-8
|
Python
| false | false | 190 |
py
|
from rest_framework import serializers
from .models import User
class UserRegistrationSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = __all__
|
[
"[email protected]"
] | |
f89422d908d4ded0742b533ea5c45917262a21e9
|
e47b6d86c2309c857c9af4e84ff2e30455030681
|
/Bridge.py
|
0c456d90f8e0be7e8fb10b816da313a991482ee8
|
[] |
no_license
|
bigeyesung/DesignPattern
|
39aec1d9c549ec7fce5bfe5a67a65267692786d8
|
4d2e48f6f053b5a9b6a87e73cdb79c5978592ab6
|
refs/heads/master
| 2020-08-17T11:05:42.104343 | 2020-07-07T20:02:42 | 2020-07-07T20:02:42 | 215,656,773 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,109 |
py
|
from abc import ABC, abstractmethod
class Abstraction:
def __init__(self, implementation: Implementation):
self.implementation = implementation
def operation(self):
return self.implementation.operation_implementation()
class ExtendedAbstraction(Abstraction):
def operation(self):
return self.implementation.operation_implementation()
class Implementation(ABC):
@abstractmethod
def operation_implementation(self):
pass
class ConcreteImplementationA(Implementation):
def operation_implementation(self):
return "platform A."
class ConcreteImplementationB(Implementation):
def operation_implementation(self):
return "platform B."
def client_code(abstraction: Abstraction):
print(abstraction.operation(), end="")
if __name__ == "__main__":
implementation = ConcreteImplementationA()
abstraction = Abstraction(implementation)
client_code(abstraction)
print("\n")
implementation = ConcreteImplementationB()
abstraction = ExtendedAbstraction(implementation)
client_code(abstraction)
|
[
"[email protected]"
] | |
ee92648ad5b8a4be878dc87469075f80bd3a442d
|
cdd79cef15bdf6a0b9098e27028bbe38607bc288
|
/蟻本/2-3_最長共通部分文字列問題_配るDP.py
|
d9e557cf8591cc2a57a19eb9d8c300f6120fd617
|
[] |
no_license
|
nord2sudjp/atcoder
|
ee35a3eb35717485dc62627172de24c9dac102fb
|
6b1cc5102a615492cc7ff8a33813bbb954641782
|
refs/heads/master
| 2023-08-25T11:27:14.205593 | 2021-09-27T05:43:04 | 2021-09-27T05:43:04 | 302,855,505 | 0 | 0 | null | null | null | null |
SHIFT_JIS
|
Python
| false | false | 620 |
py
|
N,M=map(int,input().split())
S=input()
T=input()
MAX_N=N+2
MAX_M=M+2
DP=[[0]*(MAX_N) for _ in range(MAX_M)]
#DP[i+1][j+1] : S[i]T[j]に対するLCSの長さ
for i in range(N):
for j in range(M):
# i,jは文字列としては現在を見ている
# DPとしては過去のDPを見ている
# DP[i][j]は文字列S[i]T[j]までの共通文字列の長さを表す
DP[i][j+1]=max(DP[i][j+1],DP[i][j])
DP[i+1][j]=max(DP[i+1][j],DP[i][j])
if S[i]==T[j]:
DP[i+1][j+1]=max(DP[i+1][j+1],DP[i][j]+1) #dp[i][j]までの長さに1を足した物
print(DP[N][M])
|
[
"[email protected]"
] | |
8055239902f815052d3b4a078afeb5a0d13730b7
|
459929ce79538ec69a6f8c32e608f4e484594d68
|
/venv/Lib/site-packages/kubernetes/client/models/extensions_v1beta1_ingress_backend.py
|
efa600d193b4a86f19a2dcc154c8bf3990938050
|
[] |
no_license
|
yychai97/Kubernetes
|
ec2ef2a98a4588b7588a56b9d661d63222278d29
|
2955227ce81bc21f329729737b5c528b02492780
|
refs/heads/master
| 2023-07-02T18:36:41.382362 | 2021-08-13T04:20:27 | 2021-08-13T04:20:27 | 307,412,544 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,474 |
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.15
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ExtensionsV1beta1IngressBackend(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'service_name': 'str',
'service_port': 'object'
}
attribute_map = {
'service_name': 'serviceName',
'service_port': 'servicePort'
}
def __init__(self, service_name=None, service_port=None): # noqa: E501
"""ExtensionsV1beta1IngressBackend - a model defined in OpenAPI""" # noqa: E501
self._service_name = None
self._service_port = None
self.discriminator = None
self.service_name = service_name
self.service_port = service_port
@property
def service_name(self):
"""Gets the service_name of this ExtensionsV1beta1IngressBackend. # noqa: E501
Specifies the name of the referenced service. # noqa: E501
:return: The service_name of this ExtensionsV1beta1IngressBackend. # noqa: E501
:rtype: str
"""
return self._service_name
@service_name.setter
def service_name(self, service_name):
"""Sets the service_name of this ExtensionsV1beta1IngressBackend.
Specifies the name of the referenced service. # noqa: E501
:param service_name: The service_name of this ExtensionsV1beta1IngressBackend. # noqa: E501
:type: str
"""
if service_name is None:
raise ValueError("Invalid value for `service_name`, must not be `None`") # noqa: E501
self._service_name = service_name
@property
def service_port(self):
"""Gets the service_port of this ExtensionsV1beta1IngressBackend. # noqa: E501
Specifies the port of the referenced service. # noqa: E501
:return: The service_port of this ExtensionsV1beta1IngressBackend. # noqa: E501
:rtype: object
"""
return self._service_port
@service_port.setter
def service_port(self, service_port):
"""Sets the service_port of this ExtensionsV1beta1IngressBackend.
Specifies the port of the referenced service. # noqa: E501
:param service_port: The service_port of this ExtensionsV1beta1IngressBackend. # noqa: E501
:type: object
"""
if service_port is None:
raise ValueError("Invalid value for `service_port`, must not be `None`") # noqa: E501
self._service_port = service_port
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExtensionsV1beta1IngressBackend):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
bbb90548f8bac0d1b4062e9b26e835791376a92c
|
a94c446a0d9ce77df965674f63be54d54b2be577
|
/raspy/components/potentiometers/microchip/register_memory_address.py
|
2c00349ad860e9bab97439a83e7dadd9917a5182
|
[
"MIT"
] |
permissive
|
cyrusbuilt/RasPy
|
3434e02c2bff09ef9f3ff4995bda14edc781c14b
|
1e34840cc90ea7f19317e881162209d3d819eb09
|
refs/heads/master
| 2020-03-18T20:19:27.426002 | 2018-08-03T17:07:25 | 2018-08-03T17:07:25 | 135,207,376 | 0 | 0 |
MIT
| 2018-08-03T17:07:26 | 2018-05-28T20:42:17 |
Python
|
UTF-8
|
Python
| false | false | 480 |
py
|
"""Register memory addresses."""
WIPER0 = 0x00
"""Wiper 0."""
WIPER1 = 0x01
"""Wiper 1."""
WIPER0_NV = 0x02
"""Wiper 0 non-volatile."""
WIPER1_NV = 0x03
"""Wiper 1 non-volatile."""
TCON01 = 0x04
"""Terminal control for wipers 0 and 1."""
WIPER2 = 0x06
"""Wiper 2."""
WIPER3 = 0x07
"""Wiper 3."""
WIPER2_NV = 0x08
"""Wiper 2 non-volatile."""
WIPER3_NV = 0x09
"""Wiper 3 non-volatile."""
TCON23 = 0x04
"""Terminal control for wipers 2 and 3."""
NONE = 0
"""Null bit."""
|
[
"[email protected]"
] | |
e7b7bc0fa2a5b32fb56f559e5bdd1a625c0572ed
|
8f439e50c741483ffefd5bad16f11d4b60da8fe9
|
/examples/infomax_transductive.py
|
785c7864d2eb6dd43726820bbc8b4e4abf238b6c
|
[
"MIT"
] |
permissive
|
sumanthratna/pytorch_geometric
|
19d66b6cc874fbce9207efc204a0ed1f9bb04d88
|
9c6a069c995cac38e4f3a2f1e9cfc7cebac889c6
|
refs/heads/master
| 2023-08-29T09:58:33.807755 | 2021-09-08T16:00:09 | 2021-09-08T16:00:09 | 404,423,682 | 2 | 0 |
MIT
| 2021-09-08T20:58:23 | 2021-09-08T16:44:15 | null |
UTF-8
|
Python
| false | false | 1,720 |
py
|
import os.path as osp
import torch
import torch.nn as nn
from torch_geometric.datasets import Planetoid
from torch_geometric.nn import GCNConv, DeepGraphInfomax
dataset = 'Cora'
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset)
dataset = Planetoid(path, dataset)
class Encoder(nn.Module):
def __init__(self, in_channels, hidden_channels):
super(Encoder, self).__init__()
self.conv = GCNConv(in_channels, hidden_channels, cached=True)
self.prelu = nn.PReLU(hidden_channels)
def forward(self, x, edge_index):
x = self.conv(x, edge_index)
x = self.prelu(x)
return x
def corruption(x, edge_index):
return x[torch.randperm(x.size(0))], edge_index
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = DeepGraphInfomax(
hidden_channels=512, encoder=Encoder(dataset.num_features, 512),
summary=lambda z, *args, **kwargs: torch.sigmoid(z.mean(dim=0)),
corruption=corruption).to(device)
data = dataset[0].to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
def train():
model.train()
optimizer.zero_grad()
pos_z, neg_z, summary = model(data.x, data.edge_index)
loss = model.loss(pos_z, neg_z, summary)
loss.backward()
optimizer.step()
return loss.item()
def test():
model.eval()
z, _, _ = model(data.x, data.edge_index)
acc = model.test(z[data.train_mask], data.y[data.train_mask],
z[data.test_mask], data.y[data.test_mask], max_iter=150)
return acc
for epoch in range(1, 301):
loss = train()
print('Epoch: {:03d}, Loss: {:.4f}'.format(epoch, loss))
acc = test()
print('Accuracy: {:.4f}'.format(acc))
|
[
"[email protected]"
] | |
24696d3d7d1ec6758135a501519de7bf80fc9c3f
|
1208ac3718420c4a118ab6b777d99980b85f952a
|
/123.py
|
5f73d229ebcbe9954cec2122d838cef49c4cf56b
|
[] |
no_license
|
deimelperez/150_Py_challenges
|
6ab9aea77c9c117b682790bfe36fb5e280cb8afc
|
b58f55312e7abf30cb7cb6d68b249bb5dcd3c862
|
refs/heads/master
| 2023-03-13T02:30:15.095467 | 2021-03-04T19:02:11 | 2021-03-04T19:02:11 | 344,579,979 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,490 |
py
|
import os
import csv
clear = lambda: os.system('cls')
def prompt():
ch = 0
while ch != 1 and ch != 2 and ch != 3 and ch != 4:
clear()
print('1- Add to file')
print('2- View all records')
print('3- Delete record')
print('4- Exit')
ch = int(input('Select an option: '))
clear()
return ch
def add_to_file():
file = open('122 Salaries.csv', 'a')
name = input('Enter name: ')
salary = input('Enter salary: ')
record = name + ',' + salary + '\n'
file.write(str(record))
file.close()
return
def view_records():
file = open('122 Salaries.csv', 'r')
for row in file:
print(row)
file.close()
input("\nPress enter to continue")
return
def delete_record():
file = list(csv.reader(open('122 Salaries.csv')))
tem = []
x = 0
for row in file:
tem.append(row)
print(x, row)
x = x + 1
row = int(input('Select which row you want to delete: '))
del tem[row]
file = open('122 Salaries.csv', 'w')
x = 0
for row in tem:
newRec = tem[x][0] + ',' + tem[x][1] + '\n'
file.write(str(newRec))
x = x + 1
file.close()
return
def main():
ch = 0
while ch != 4:
ch = prompt()
if ch == 1:
add_to_file()
elif ch == 2:
view_records()
elif ch == 3:
delete_record()
input("\nPress enter to continue")
return
main()
|
[
"[email protected]"
] | |
cff764949b2ed11e5a93eb1010ee840f4c990c13
|
f7d3c8483521ec45bf0bb0927c0c57a275e03996
|
/ch04-linear/linear_ml.py
|
fe394af134bee53da6a57b9be7d233d6d95f245d
|
[] |
no_license
|
buzzzzx/DataScienceLearning
|
2fe7fef6fb8538e2acd46d19643ff4fc50dc249a
|
af38157f01ba3682141b11788276daf6d6002b37
|
refs/heads/master
| 2020-03-23T16:40:21.517239 | 2018-07-24T15:10:17 | 2018-07-24T15:10:17 | 141,699,329 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,808 |
py
|
# -*- coding: utf-8 -*-
__author__ = 'buzz'
__date__ = '2018/7/16 下午2:42'
"""
1. spit the data: trainData, testData
2. train the model
3. evaluate the model, get the MSE and COD
4. visualization
"""
import os
import sys
from sklearn import linear_model
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def linearModel(data):
features = ["x"]
labels = ["y"]
trainData = data[:15]
testData = data[15:]
model = trainModel(trainData, features, labels)
error, score = evaluateModel(model, testData, features, labels)
visualizeModel(model, data, features, labels, error, score)
def trainModel(trainData, features, labels):
model = linear_model.LinearRegression()
model.fit(trainData[features], trainData[labels])
return model
def evaluateModel(model, testData, features, labels):
error = np.mean((model.predict(testData[features]) - testData[labels]) ** 2)
score = model.score(testData[features], testData[labels])
return error, score
def visualizeModel(model, data, features, labels, error, score):
plt.rcParams['font.sans-serif'] = ['SimHei']
fig = plt.figure(figsize=(6, 6), dpi=80)
ax = fig.add_subplot(111)
ax.set_title("线性回归示例")
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.scatter(data[features], data[labels], color='b', label=u'%s: $y = x + \epsilon$' % "真实值")
if model.intercept_ > 0:
# 画线图,用红色线条表示模型结果
# 在Python3中,str不需要decode
if sys.version_info[0] == 3:
ax.plot(data[features], model.predict(data[features]), color='r',
label=u'%s: $y = %.3fx$ + %.3f' \
% ("预测值", model.coef_, model.intercept_))
else:
ax.plot(data[features], model.predict(data[features]), color='r',
label=u'%s: $y = %.3fx$ + %.3f' \
% ("预测值".decode("utf-8"), model.coef_, model.intercept_))
## coef: 系数,intercept: 截距
else:
# 在Python3中,str不需要decode
if sys.version_info[0] == 3:
ax.plot(data[features], model.predict(data[features]), color='r',
label=u'%s: $y = %.3fx$ - %.3f' \
% ("预测值", model.coef_, abs(model.intercept_)))
else:
ax.plot(data[features], model.predict(data[features]), color='r',
label=u'%s: $y = %.3fx$ - %.3f' \
% ("预测值".decode("utf-8"), model.coef_, abs(model.intercept_)))
legend = plt.legend(shadow=True)
legend.get_frame().set_facecolor('#6F93AE')
# 显示均方差和决定系数
# 在Python3中,str不需要decode
if sys.version_info[0] == 3:
ax.text(0.99, 0.01,
u'%s%.3f\n%s%.3f' \
% ("均方差:", error, "决定系数:", score),
style='italic', verticalalignment='bottom', horizontalalignment='right',
transform=ax.transAxes, color='m', fontsize=13)
else:
ax.text(0.99, 0.01,
u'%s%.3f\n%s%.3f' \
% ("均方差:".decode("utf-8"), error, "决定系数:".decode("utf-8"), score),
style='italic', verticalalignment='bottom', horizontalalignment='right',
transform=ax.transAxes, color='m', fontsize=13)
# 展示上面所画的图片。图片将阻断程序的运行,直至所有的图片被关闭
# 在Python shell里面,可以设置参数"block=False",使阻断失效。
plt.show()
if __name__ == '__main__':
filepath = 'data/simple_example.csv'
data = pd.read_csv(filepath)
linearModel(data)
# 选择列
# data["x"] data[["x", "y"]]
# 选择行
# data[:10]
|
[
"[email protected]"
] | |
ccb6cff749499176fa4d9de1366c42f43483fafb
|
0add7953d3e3ce2df9e8265102be39b758579753
|
/built-in/TensorFlow/Official/cv/image_segmentation/UNet_Industrial_for_TensorFlow/model/layers/__init__.py
|
a7816ce0045ac92926203a79ec08c91e0727c967
|
[
"Apache-2.0"
] |
permissive
|
Huawei-Ascend/modelzoo
|
ae161c0b4e581f8b62c77251e9204d958c4cf6c4
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
refs/heads/master
| 2023-04-08T08:17:40.058206 | 2020-12-07T08:04:57 | 2020-12-07T08:04:57 | 319,219,518 | 1 | 1 |
Apache-2.0
| 2023-03-24T22:22:00 | 2020-12-07T06:01:32 |
Python
|
UTF-8
|
Python
| false | false | 2,506 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from model.layers.utils import _log_hparams
from model.layers.activation import crelu
from model.layers.activation import elu
from model.layers.activation import leaky_relu
from model.layers.activation import prelu
from model.layers.activation import relu
from model.layers.activation import relu6
from model.layers.activation import selu
from model.layers.activation import sigmoid
from model.layers.activation import softmax
from model.layers.activation import tanh
from model.layers.conv2d import conv2d
from model.layers.deconv2d import deconv2d
from model.layers.dense import dense
from model.layers.drop_layers import dropout
from model.layers.math_ops import reduce_mean
from model.layers.normalization import batch_norm
from model.layers.padding import pad
from model.layers.pooling import average_pooling2d
from model.layers.pooling import max_pooling2d
from model.layers.array_ops import concat
from model.layers.array_ops import flatten
from model.layers.array_ops import reshape
from model.layers.array_ops import squeeze
from model.layers.array_ops import upscale_2d
__all__ = [
# activation layers
'crelu',
'elu',
'leaky_relu',
'prelu',
'relu',
'relu6',
'selu',
'sigmoid',
'softmax',
'tanh',
# array ops
'concat',
'flatten',
'reshape',
'squeeze',
'upscale_2d',
# conv layers
'conv2d',
# deconv layers
'deconv2d',
# dense layers
'dense',
# drop layers
'dropout',
# math_ops layers
'reduce_mean',
# normalization layers
'batch_norm',
# padding layers
'pad',
# pooling layers
'average_pooling2d',
'max_pooling2d',
]
|
[
"[email protected]"
] | |
96f85c38df153deb1653e341c876ccc4fc255a21
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02879/s387457862.py
|
04f057243e2adabe89adf048f7b2c93b05844897
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 93 |
py
|
a, b = map(int, input().split())
if a <= 9 and b <= 9:
print(a * b)
else:
print("-1")
|
[
"[email protected]"
] | |
6d66d2cdb6781a847e3e9c871a7d560d72c7b3c5
|
b87f66b13293782321e20c39aebc05defd8d4b48
|
/maps/build/Traits/integrationtests/ui/instance_drag_test.py
|
74dcc024c6f16f6dde1583cd5dc3b07f36b3a95c
|
[] |
no_license
|
m-elhussieny/code
|
5eae020932d935e4d724c2f3d16126a0d42ebf04
|
5466f5858dbd2f1f082fa0d7417b57c8fb068fad
|
refs/heads/master
| 2021-06-13T18:47:08.700053 | 2016-11-01T05:51:06 | 2016-11-01T05:51:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,611 |
py
|
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in /LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 12/04/2004
# Description: Test case for the traits tree editor.
#------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from enthought.traits.api \
import HasTraits, Str, Regex, List, Instance
from enthought.traits.ui.api \
import TreeEditor, TreeNode, View, Group, Item, Handler, InstanceEditor
from enthought.traits.ui.instance_choice \
import InstanceDropChoice
from enthought.traits.ui.menu \
import Menu, Action, Separator
from enthought.traits.ui.wx.tree_editor \
import NewAction, CopyAction, CutAction, PasteAction, DeleteAction, \
RenameAction
#-------------------------------------------------------------------------------
# 'Employee' class:
#-------------------------------------------------------------------------------
class Employee ( HasTraits ):
name = Str( '<unknown>' )
title = Str
phone = Regex( regex = r'\d\d\d-\d\d\d\d' )
view = View( 'title', 'phone' )
def default_title ( self ):
self.title = 'Senior Engineer'
#-------------------------------------------------------------------------------
# 'Department' class:
#-------------------------------------------------------------------------------
class Department ( HasTraits ):
name = Str( '<unknown>' )
employees = List( Employee )
view = View( [ 'employees', '|<>' ] )
#-------------------------------------------------------------------------------
# 'Company' class:
#-------------------------------------------------------------------------------
class Company ( HasTraits ):
name = Str( '<unknown>' )
departments = List( Department )
employees = List( Employee )
#-------------------------------------------------------------------------------
# 'Partner' class:
#-------------------------------------------------------------------------------
class Partner ( HasTraits ):
name = Str( '<unknown>' )
company = Instance( Company )
eom = Instance( Employee )
dom = Instance( Department )
#-------------------------------------------------------------------------------
# Create a hierarchy:
#-------------------------------------------------------------------------------
jason = Employee(
name = 'Jason',
title = 'Sr. Engineer',
phone = '536-1057' )
mike = Employee(
name = 'Mike',
title = 'Sr. Engineer',
phone = '536-1057' )
dave = Employee(
name = 'Dave',
title = 'Sr. Engineer',
phone = '536-1057' )
martin = Employee(
name = 'Martin',
title = 'Sr. Engineer',
phone = '536-1057' )
duncan = Employee(
name = 'Duncan',
title = 'Sr. Engineer' )
partner = Partner(
name = 'eric',
company = Company(
name = 'Enthought, Inc.',
departments = [
Department(
name = 'Business',
employees = [ jason, mike ]
),
Department(
name = 'Scientific',
employees = [ dave, martin, duncan ]
)
],
employees = [ dave, martin, mike, duncan, jason ]
)
)
#-------------------------------------------------------------------------------
# Define the tree trait editor:
#-------------------------------------------------------------------------------
no_view = View()
tree_editor = TreeEditor(
editable = False,
nodes = [
TreeNode( node_for = [ Company ],
auto_open = True,
children = '',
label = 'name',
view = View( [ 'name', '|<' ] ) ),
TreeNode( node_for = [ Company ],
auto_open = True,
children = 'departments',
label = '=Departments',
view = no_view,
add = [ Department ] ),
TreeNode( node_for = [ Company ],
auto_open = True,
children = 'employees',
label = '=Employees',
view = no_view,
add = [ Employee ] ),
TreeNode( node_for = [ Department ],
auto_open = True,
children = 'employees',
label = 'name',
menu = Menu( NewAction,
Separator(),
DeleteAction,
Separator(),
RenameAction,
Separator(),
CopyAction,
CutAction,
PasteAction ),
view = View( [ 'name', '|<' ] ),
add = [ Employee ] ),
TreeNode( node_for = [ Employee ],
auto_open = True,
label = 'name',
menu = Menu( NewAction,
Separator(),
Action( name = 'Default title',
action = 'object.default_title' ),
Action( name = 'Department',
action = 'handler.employee_department(editor,object)' ),
Separator(),
CopyAction,
CutAction,
PasteAction,
Separator(),
DeleteAction,
Separator(),
RenameAction ),
view = View( [ 'name', 'title', 'phone', '|<' ] ) )
]
)
#-------------------------------------------------------------------------------
# 'TreeHandler' class:
#-------------------------------------------------------------------------------
class TreeHandler ( Handler ):
def employee_department ( self, editor, object ):
dept = editor.get_parent( object )
print '%s works in the %s department.' % ( object.name, dept.name )
#-------------------------------------------------------------------------------
# Define the View to use:
#-------------------------------------------------------------------------------
view = View(
Group(
[ Item( 'company',
editor = tree_editor,
resizable = True ),
'|<>' ],
Group(
[ '{Employee of the Month}@',
Item( 'eom@',
editor = InstanceEditor( values = [
InstanceDropChoice( klass = Employee,
selectable = True ) ] ),
resizable = True ),
'|<>' ],
[ '{Department of the Month}@',
Item( 'dom@',
editor = InstanceEditor( values = [
InstanceDropChoice( klass = Department ) ] ),
resizable = True ),
'|<>' ],
show_labels = False,
layout = 'split' ),
orientation = 'horizontal',
show_labels = False,
layout = 'split' ),
title = 'Company Structure',
handler = TreeHandler(),
buttons = [ 'OK', 'Cancel' ],
resizable = True,
width = .5,
height = .5
)
#-------------------------------------------------------------------------------
# Edit it:
#-------------------------------------------------------------------------------
if __name__ == '__main__':
partner.configure_traits( view = view )
|
[
"[email protected]"
] | |
db27d9c00b53a47982cfeea67dd63ecb1da8129b
|
b9cda298b1e8da3a657aea29080a467055bae421
|
/scandium/tpl/project_template/setup.pyt
|
cc777de4496e074ae1f3fefcdbd641970330004f
|
[] |
no_license
|
vasfili/scandium
|
9fa98c18100b18f8dac60955e5602ca038e681db
|
843757d13a70a407626a0a7d5f6407a21d74e5f9
|
refs/heads/master
| 2020-12-13T22:34:50.661608 | 2015-10-14T13:14:27 | 2015-10-14T13:14:27 | 44,236,746 | 0 | 0 | null | 2015-10-14T09:11:37 | 2015-10-14T09:11:36 |
Python
|
UTF-8
|
Python
| false | false | 5,219 |
pyt
|
from setuptools import setup, find_packages
from py2exe.build_exe import py2exe as build_exe
from distutils.sysconfig import get_python_lib
import fnmatch
import py2exe
import sys
import os
# If run without args, build executables, in quiet mode.
if len(sys.argv) == 1:
sys.argv.append("py2exe")
sys.argv.append("-q")
################################################################
# Customize these variables
NAME = "{{project_name}}"
VERSION = "{{version}}"
DESCRIPTION = "{{description}}"
COMPANY_NAME = "{{company_name}}"
LICENSE = "{{license}}"
# Fiddle with these variables if you use Python modules that
# py2exe can't find, or you change the location of static
# and template data.
INCLUDES = ['jinja2.ext', 'PySide.QtNetwork']
EXCLUDES = ["Tkconstants", "Tkinter", "tcl"]
PACKAGES = find_packages(exclude=("tests",))
PACKAGE_DATA_DIRS = ('static', 'templates')
################################################################
# A program using PySide
# The manifest will be inserted as resource into {{project_name}}.exe. This
# gives the controls the Windows XP appearance (if run on XP ;-) and
# ensures the Visual C++ Redistributable Package DLLs get found.
#
# Another option would be to store it in a file named
# {{project_name}}.exe.manifest, and copy it with the data_files option into
# the dist-dir.
#
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="{{project_name}}"
type="win32"
/>
<description>{{project_name}} Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.VC90.CRT"
version="9.0.21022.8"
processorArchitecture="X86"
publicKeyToken="1fc8b3b9a1e18e3b"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
# Extention to embed package_data in py2exe's distributable
# See: http://crazedmonkey.com/blog/python/pkg_resources-with-py2exe.html
class MediaCollector(build_exe):
def copy_extensions(self, extensions):
build_exe.copy_extensions(self, extensions)
def collect_media(path):
for root, _, filenames in os.walk(path):
for fname in fnmatch.filter(filenames, '*'):
parent = os.path.join(self.collect_dir, root)
if not os.path.exists(parent):
self.mkpath(parent)
self.copy_file(os.path.join(root, fname), \
os.path.join(parent, fname))
self.compiled_files.append(os.path.join(root, fname))
for dname in PACKAGE_DATA_DIRS:
collect_media(os.path.join(NAME, dname))
collect_media(os.path.join(NAME, dname))
# Create Windows Application target
#
class Target:
def __init__(self, **kw):
self.__dict__.update(kw)
# for the versioninfo resources
self.version = VERSION
self.company_name = COMPANY_NAME
self.description = DESCRIPTION
self.copyright = LICENSE
self.name = NAME
app = Target(
# what to build
script = "runapp.py",
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog=NAME))],
icon_resources = [(1, "%s/static/icons/icon.ico" % NAME)],
dest_base = NAME
)
# Qt4 uses plugins for image processing. These cannot be bundled into the
# executable, so we copy them into the application directory, along with
# the Qt DLL files, which we then exclude from the bundle.
path = os.path.join(get_python_lib(), 'PySide', 'plugins', 'imageformats')
imageformats = []
for dll in os.listdir(path):
imageformats.append(os.path.join(path, dll))
path = os.path.join(get_python_lib(), 'PySide')
qt = []
for dll in ("QtCore4.dll", "QtGui4.dll", "QtNetwork4.dll"):
qt.append(os.path.join(path, dll))
DATA_FILES = [('imageformats', imageformats), ('', qt)]
################################################################
setup(
cmdclass = {'py2exe': MediaCollector},
data_files = DATA_FILES,
include_package_data=True,
options = {"py2exe": {"compressed": 1,
"optimize": 1,
"ascii": 0,
"bundle_files": 1,
"packages": PACKAGES,
"includes": INCLUDES,
"excludes": EXCLUDES,
# exclude the Qt4 DLLs to ensure the data_files version gets used, otherwise image processing will fail
"dll_excludes": ['msvcp90.dll', 'w9xpopen.exe', "QtCore4.dll", "QtGui4.dll", "QtNetwork4.dll"]}},
zipfile = None,
windows = [app],
)
|
[
"[email protected]"
] | |
8449cd14afa4652b75eadf140e87adf6909ad3d1
|
1539f86f91ce0ee6150fba7363976d32cd37ece2
|
/codes_auto/99.recover-binary-search-tree.py
|
71f632672d901d70aa7038b3688b89a5cf53aea0
|
[] |
no_license
|
zhpbo/LeetCode_By_Python
|
fdee0a8b7ea7ed1f61a99f0041e1c748e50f138c
|
0017b9db891d36789116f7299d32510a373e68da
|
refs/heads/master
| 2023-07-09T15:38:45.003002 | 2020-08-18T07:04:51 | 2020-08-18T07:04:51 | 281,598,190 | 0 | 0 | null | 2021-08-18T04:58:39 | 2020-07-22T06:47:05 | null |
UTF-8
|
Python
| false | false | 995 |
py
|
#
# @lc app=leetcode.cn id=99 lang=python3
#
# [99] recover-binary-search-tree
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def recoverTree(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
tree = []
def helper(root,flag):
if not root:
return
helper(root.left,flag)
if flag=="traverse":
tree.append(root.val)
elif flag == "modify":
# print("更改前:",root.val)
root.val = tree[0]
del tree[0]
# print("更改后:",root.val)
helper(root.right, flag)
helper(root, flag="traverse")
# print(tree)
tree.sort()
# print(tree)
helper(root, flag="modify")
# @lc code=end
|
[
"[email protected]"
] | |
6ece9e8b26aba619307519cdbbc359223e72c41a
|
57d5ebeece91f5759d54e898154f11e97c6e5609
|
/tests/add_trailing_comma_test.py
|
ee7bed3b6df646ee1055c45a784166a530c78b5b
|
[
"MIT"
] |
permissive
|
chriskuehl/add-trailing-comma
|
0c50e16fd6d25057d025f75a23ddde0aafec4dbd
|
d26f8ca449eb12cfaec3d3cd1f8ced789bd73e9a
|
refs/heads/master
| 2020-12-02T07:46:50.317774 | 2017-07-10T01:29:14 | 2017-07-10T01:29:14 | 96,725,169 | 0 | 0 | null | 2017-07-10T01:53:11 | 2017-07-10T01:53:11 | null |
UTF-8
|
Python
| false | false | 3,624 |
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import ast
import sys
import pytest
from add_trailing_comma import _fix_calls
from add_trailing_comma import main
@pytest.mark.parametrize(
'src',
(
# No relevant multiline calls
'x = 5',
'x(1)',
# Don't rewrite functions that have a single generator argument as
# this breaks lib2to3 based tools.
'tuple(\n'
' a for a in b\n'
')',
# Don't rewrite *args or **kwargs unless --py35-plus
'x(\n'
' *args\n'
')',
'x(\n'
' **kwargs\n'
')',
# The ast tells us that the inner call starts on line 2, but the first
# paren (and last paren) are actually both on line 3.
'x(\n'
' "foo"\n'
' "bar".format(1),\n'
')',
# Don't add a comma when it's not at the end of a line
'x((\n'
' 1,\n'
'))',
),
)
def test_fix_calls_noops(src):
ret = _fix_calls(src, py35_plus=False)
assert ret == src
def _has_16806_bug():
# See https://bugs.python.org/issue16806
return ast.parse('"""\n"""').body[0].value.col_offset == -1
@pytest.mark.xfail(not _has_16806_bug(), reason='multiline string parse bug')
def test_ignores_invalid_ast_node():
src = (
'x(\n'
' """\n'
' """\n'
')'
)
assert _fix_calls(src, py35_plus=False) == src
def test_py35_plus_rewrite():
src = (
'x(\n'
' *args\n'
')'
)
ret = _fix_calls(src, py35_plus=True)
assert ret == (
'x(\n'
' *args,\n'
')'
)
@pytest.mark.xfail(sys.version_info < (3, 5), reason='py35+ only feature')
@pytest.mark.parametrize(
'syntax',
(
'y(*args1, *args2)\n',
'y(**kwargs1, **kwargs2)\n',
),
)
def test_auto_detected_py35_plus_rewrite(syntax):
src = syntax + 'x(\n *args\n)'
expected = syntax + 'x(\n *args,\n)'
assert _fix_calls(src, py35_plus=False) == expected
def test_main_trivial():
assert main(()) == 0
def test_main_noop(tmpdir):
f = tmpdir.join('f.py')
f.write('x = 5\n')
assert main((f.strpath,)) == 0
assert f.read() == 'x = 5\n'
def test_main_changes_a_file(tmpdir, capsys):
f = tmpdir.join('f.py')
f.write('x(\n 1\n)\n')
assert main((f.strpath,)) == 1
out, _ = capsys.readouterr()
assert out == 'Rewriting {}\n'.format(f.strpath)
assert f.read() == 'x(\n 1,\n)\n'
def test_main_syntax_error(tmpdir):
f = tmpdir.join('f.py')
f.write('from __future__ import print_function\nprint 1\n')
assert main((f.strpath,)) == 0
def test_main_non_utf8_bytes(tmpdir, capsys):
f = tmpdir.join('f.py')
f.write_binary('# -*- coding: cp1252 -*-\nx = €\n'.encode('cp1252'))
assert main((f.strpath,)) == 1
out, _ = capsys.readouterr()
assert out == '{} is non-utf-8 (not supported)\n'.format(f.strpath)
def test_main_py35_plus_argument_star_args(tmpdir):
f = tmpdir.join('f.py')
f.write('x(\n *args\n)\n')
assert main((f.strpath,)) == 0
assert f.read() == 'x(\n *args\n)\n'
assert main((f.strpath, '--py35-plus')) == 1
assert f.read() == 'x(\n *args,\n)\n'
def test_main_py35_plus_argument_star_star_kwargs(tmpdir):
f = tmpdir.join('f.py')
f.write('x(\n **args\n)\n')
assert main((f.strpath,)) == 0
assert f.read() == 'x(\n **args\n)\n'
assert main((f.strpath, '--py35-plus')) == 1
assert f.read() == 'x(\n **args,\n)\n'
|
[
"[email protected]"
] | |
607b14e2c65395162c1e43a9e0046c08f05de656
|
7465148de5d656ebfe68b588a2f271a11384ed6a
|
/examples/multiple_actions_docker/second.py
|
e1f78138fe14078b2864bf7a2b3a58b404a44222
|
[] |
no_license
|
fiefdx/LitePipeline
|
1462dacdd1a0f2c67972b6014b428c2c45d46949
|
09608f8c5f248d2ba10e5840bf00d69e76ed6291
|
refs/heads/master
| 2023-04-14T11:45:18.929249 | 2023-04-02T06:48:30 | 2023-04-02T06:48:30 | 226,355,739 | 2 | 0 | null | 2023-04-01T17:49:14 | 2019-12-06T15:17:33 |
Python
|
UTF-8
|
Python
| false | false | 1,206 |
py
|
# -*- coding: utf-8 -*-
import os
import sys
import time
import json
import logging
import datetime
from pathlib import Path
import tornado
from litepipeline_helper.models.action import Action
import logger
LOG = logging.getLogger(__name__)
home = str(Path.home())
if __name__ == "__main__":
workspace, input_data = Action.get_input()
logs_directory = os.path.join(workspace, "logs")
logger.config_logging(file_name = "second.log",
log_level = "DEBUG",
dir_name = logs_directory,
day_rotate = False,
when = "D",
interval = 1,
max_size = 20,
backup_count = 5,
console = False)
LOG.debug("test start")
LOG.debug("input_data: %s", input_data)
data = {"messages": []}
for i in range(10, 20):
now = datetime.datetime.now()
message = "%s: hello world, tornado(%03d): %s" % (now, i, tornado.version)
data["messages"].append(message)
LOG.debug(message)
time.sleep(1)
Action.set_output(data = data)
LOG.debug("test end")
|
[
"[email protected]"
] | |
51a5adfbaade61004be3dca483ae4850f82444ba
|
a2b20597759990445081057d35d113434cfcf970
|
/stubs/integration_test/fixture_stubs/django/db/__init__.pyi
|
5319b5a66ff2a2a4b828e875de269690e72683c4
|
[
"MIT"
] |
permissive
|
facebook/pyre-check
|
34059599c02b65605c574f13555229f3b931fd4e
|
fe8ccedc572cc1faa1fd01e9138f65e982875002
|
refs/heads/main
| 2023-09-03T19:10:11.587028 | 2023-09-02T07:40:35 | 2023-09-02T07:40:35 | 110,274,488 | 6,703 | 575 |
MIT
| 2023-09-13T17:02:32 | 2017-11-10T17:31:36 |
OCaml
|
UTF-8
|
Python
| false | false | 843 |
pyi
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
from typing import Any
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.utils import (
ConnectionHandler,
DatabaseError as DatabaseError,
DataError as DataError,
Error as Error,
IntegrityError as IntegrityError,
InterfaceError as InterfaceError,
InternalError as InternalError,
NotSupportedError as NotSupportedError,
OperationalError as OperationalError,
ProgrammingError as ProgrammingError,
)
def close_old_connections(**kwargs: Any) -> None: ...
def reset_queries(**kwargs: Any) -> None: ...
transaction: Any
connections: ConnectionHandler
connection: BaseDatabaseWrapper
|
[
"[email protected]"
] | |
3e0591086d651f921210267a7a24e4842272772c
|
62fc811f203f041c07d4bc782ce5f7f5cb8dd7c6
|
/test.py
|
01b7128de9357da4fab8a70928f00beee19546bf
|
[] |
no_license
|
riaz/Recee
|
71dba563383059bac474bf361f216adfdebab8ae
|
a68c356a5c77ef0365f45c557d945d50fadcb430
|
refs/heads/master
| 2021-01-10T05:07:40.018566 | 2015-11-16T04:46:31 | 2015-11-16T04:46:31 | 46,204,411 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 589 |
py
|
from openalpr import Alpr
import sys
alpr = Alpr("eu", "nplate_train/openalpr.conf.in", "nplate_train/runtime_data")
if not alpr.is_loaded():
print("Error loading OpenALPR")
sys.exit(1)
#alpr.set_top_n(20)
alpr.set_default_region("eu")
results = alpr.recognize_file("/home/riaz/Desktop/hack/2009_09_08_drive_0010/I1_000388.png")
for plate in results['results']:
if len(plate['candidates']) > 0:
print "Found: %12s %12f" % ( plate['candidates'][0]['plate'],plate['candidates'][0]['confidence'])
# Call when completely done to release memory
alpr.unload()
|
[
"[email protected]"
] | |
9dd7a920b8c9aaa780d588326e861519a2da8ca1
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_383/ch18_2019_03_21_00_26_29_879325.py
|
f8de6535586f5e1582515556af18efa461925a5e
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 107 |
py
|
def encontra_cateto(hipotenusa,cateto):
cateto2=((hipotenusa**2)-(cateto**2))**(1/2)
return cateto2
|
[
"[email protected]"
] | |
a2c55b36e5abd15a11aed5da04519c8e52823407
|
17be0e9275082c3239fedc11bc617ecd5856136c
|
/letor/offline/train_one_state.py
|
ee6031ab06d7a984a088428fc91a8abe491fd882
|
[] |
no_license
|
mdkmongo/semantichealth.github.io
|
8bb814bfd3b0b3a71828625a2acebfd8013e2eef
|
6462ba2cc406967b0371b09822e4c26860e96c91
|
refs/heads/master
| 2021-01-21T08:24:07.128484 | 2016-08-19T05:35:04 | 2016-08-19T05:35:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,589 |
py
|
from s3_helpers import *
from get_rank_for_state_plan import *
from query_characterizer import *
import pickle
def train_one_state(click_data, state, log, s3_fea):
'''
'''
# set folder name of S3
s3clnt = s3_helper()
log.trace('characterize queries for state %s' %state)
s_rows = click_data[click_data['state']==state]
q_cluster, vocab, centroids = query_characterizer(s_rows['query'], log)
log.trace('run letor training for state %s' %state)
letor_rank, plans = get_rank_for_state_plan(q_cluster, np.array([[r['ranks'],r['clicks']] for r in s_rows]), log, s3_fea)
if not plans: # or (not letor_rank):
log.warning('no feature file found for state %s, skip training.' %state)
return
# exclude missing plan IDs in ES
with open('missing.pickle') as f:
missing = pickle.load(f)
picker = np.array([p not in missing for p in plans])
# upload the stuff to S3
save_training = 'training/%s_%d.pickle' %(state, len(letor_rank))
with open(save_training, 'w') as f:
pickle.dump([list(np.array(plans)[picker]), letor_rank[:, picker]], f)
s3clnt.delete_by_state('training/%s' %(state))
s3clnt.upload(save_training)
save_online = 'online/%s_runtime.pickle' %(state)
cen = [list(c) for c in centroids]
voc = [None]*len(vocab)
for k,v in vocab.items():
voc[v] = k
with open(save_online, 'w') as f:
pickle.dump([voc, cen], f)
s3clnt.delete_by_state('online/%s' %(state))
s3clnt.upload(save_online)
log.trace('ranking & online file are saved on s3')
|
[
"[email protected]"
] | |
8df1690d1f23f89363ab4c98e63ee1b3d812a469
|
505dc9404c89e56aea70f2db9fc1b3fb311fc5d9
|
/usr/lib/enigma2/python/Components/Renderer/speedyflipclockfortuna_metall1.py
|
ec87abf72fe3c8bbce8261dc3e619d1ce0ca2573
|
[] |
no_license
|
e2plugins/4ATV_speedy_blue
|
ae8181ed4017beb4b48e58fe7cbbcbe2a1696057
|
c84da50a0d872a2e74812214eed5532ed0893534
|
refs/heads/master
| 2022-11-14T17:09:41.134795 | 2020-07-12T06:24:24 | 2020-07-12T06:24:24 | 277,350,143 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,136 |
py
|
# FlipClock
# Copyright (c) .:TBX:. 2016
# Mod by Maggy
# Th4ATV_2_2_speedy_black_mod program 4ATV_2_2_speedy_black_mod free software: you can red4ATV_2_2_speedy_black_modtribute it and/or modify
# it under the terms of the GNU General Public License as publ4ATV_2_2_speedy_black_modhed by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Th4ATV_2_2_speedy_black_mod program 4ATV_2_2_speedy_black_mod d4ATV_2_2_speedy_black_modtributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with th4ATV_2_2_speedy_black_mod program. If not, see <http://www.gnu.org/licenses/>.
#
from Components.Renderer.Renderer import Renderer
from enigma import ePixmap, eTimer, eDVBVolumecontrol
from Components.config import config
class speedyflipclock_metall1(Renderer):
def __init__(self):
Renderer.__init__(self)
self.timer = eTimer()
self.timer.callback.append(self.pollme)
GUI_WIDGET = ePixmap
def changed(self, what):
if not self.suspended:
value = self.source.text
if 'H1' in value:
value = value[3:4]
elif 'H2' in value:
value = value[4:5]
elif 'M1' in value:
value = value[3:4]
elif 'M2' in value:
value = value[4:5]
elif 'S1' in value:
value = value[3:4]
elif 'S2' in value:
value = value[4:5]
else:
value = 0
self.instance.setPixmapFromFile('/usr/share/enigma2/4ATV_2_2_speedy_black_fortuna/flipclock/flipclock_metall1/' + str(value) + '.png')
def pollme(self):
self.changed(None)
return
def onShow(self):
self.suspended = False
self.timer.start(200)
def onHide(self):
self.suspended = True
self.timer.stop()
|
[
"[email protected]"
] | |
77028e65d46a2e748e17451d5f7ea8d70505ece8
|
afdda41e01518db1a2685e9eb7fad524d7b5c69b
|
/ABC161/D/test.py
|
a064933ef448b042dd8bc488c08b80b5cdfacac1
|
[] |
no_license
|
kame3niku9/atcoder
|
4bea5598b6529b7dd5d84a4b342b7ef650b81141
|
b5042f31d43425e4ca1e02cc4bbfecbd5a738b49
|
refs/heads/master
| 2022-07-10T11:37:47.560392 | 2020-11-22T13:47:08 | 2020-11-22T13:47:08 | 233,927,925 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 961 |
py
|
from main import resolve
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """15"""
output = """23"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """1"""
output = """1"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """13"""
output = """21"""
self.assertIO(input, output)
def test_入力例_4(self):
input = """100000"""
output = """3234566667"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
fb3292faa83df637e9541d37e4a20e7c4c8eaabc
|
3562a01673bc62df91fdff621e48b82b15cb330c
|
/Part 1 - Data Preprocessing/Section 2 -------------------- Part 1 - Data Preprocessing --------------------/data_preprocess.py
|
4a7456da3e023a18c6b161a2a1cd77ee4e089c56
|
[] |
no_license
|
laksh10-stan/Machine-Learning-A-Z
|
16bf070a6ddbde812b053b84d9f09186cf9a0257
|
ba2ac016879dc5ea4be4d670e7a8de5e24abbae2
|
refs/heads/master
| 2021-02-08T20:46:36.892343 | 2020-03-01T17:54:16 | 2020-03-01T17:54:16 | 244,195,310 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,303 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 23 00:25:37 2019
@author: laksh
"""
#importing libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#import dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:,:-1].values
y = dataset.iloc[:,3].values
#taking care of the missing data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN',strategy = 'mean', axis = 0)
imputer = imputer.fit(X[:,1:3])
X[:,1:3] = imputer.transform(X[:,1:3])
print(X)
print(y)
#Encoding categorical Data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
# Dummy Encoding
onehotencoder = OneHotEncoder(categorical_features = [0])
X = onehotencoder.fit_transform(X).toarray()
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
#Splitting dataset into training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
#Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
#Data Preprocessing Template
|
[
"[email protected]"
] | |
6fcb98bf130d6fe7794dcbb0f39cba96ea071f2b
|
316eada5e13da6207801831b115cb8bc0a8ed970
|
/politician/urls.py
|
60abfdbfc4ac76261894f1a93d0a5ba1e3722102
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
edward-ly/WeVoteServer
|
d942ecba975e2b5a2082a078c9bd2b35ad58d3d3
|
24b9f0d0cd065f933707dd08391f3883bab9fb37
|
refs/heads/develop
| 2021-01-23T21:21:39.227475 | 2019-05-09T16:04:36 | 2019-05-09T16:04:36 | 102,893,733 | 0 | 0 | null | 2017-09-08T18:51:44 | 2017-09-08T18:44:40 |
Python
|
UTF-8
|
Python
| false | false | 1,260 |
py
|
# politician/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from . import views_admin
from django.conf.urls import url
urlpatterns = [
url(r'^$', views_admin.politician_list_view, name='politician_list',),
url(r'^edit_process/$', views_admin.politician_edit_process_view, name='politician_edit_process'),
url(r'^delete/', views_admin.politician_delete_process_view, name='politician_delete_process'),
url(r'^import/$',
views_admin.politicians_import_from_master_server_view, name='politicians_import_from_master_server'),
url(r'^new/$', views_admin.politician_new_view, name='politician_new'),
url(r'^(?P<politician_id>[0-9]+)/edit/$', views_admin.politician_edit_view, name='politician_edit'),
url(r'^(?P<politician_id>[0-9]+)/retrieve_photos/$',
views_admin.politician_retrieve_photos_view, name='politician_retrieve_photos'),
# url(r'^(?P<politician_id>[0-9]+)/tag_new/$', views.politician_tag_new_view, name='politician_tag_new'),
# url(r'^(?P<politician_id>[0-9]+)/tag_new_process/$',
# views.politician_tag_new_process_view, name='politician_tag_new_process'),
# url(r'^(?P<pk>[0-9]+)/add_tag/$', views.PoliticianAddTagView.as_view(), name='politician_add_tag'),
]
|
[
"[email protected]"
] | |
a1407bef754ce906c79d678043c844ba8180c32a
|
47243c719bc929eef1475f0f70752667b9455675
|
/bungeni.main/branches/pre-mr-merge/bungeni/models/orm.py
|
97a2bcb2469cadc27c2c77f64d2192a5b1e961cd
|
[] |
no_license
|
malangalanga/bungeni-portal
|
bbf72ce6d69415b11287a8796b81d4eb6520f03a
|
5cf0ba31dfbff8d2c1b4aa8ab6f69c7a0ae9870d
|
refs/heads/master
| 2021-01-19T15:31:42.943315 | 2014-11-18T09:03:00 | 2014-11-18T09:03:00 | 32,453,405 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 37,055 |
py
|
import sqlalchemy as rdb
from sqlalchemy.orm import mapper, relation, column_property, backref
import schema
import domain
# Users
# general representation of a person
mapper(domain.User, schema.users,
properties={"user_addresses": relation(domain.UserAddress)}
)
# Groups
mapper(domain.Group, schema.groups,
primary_key=[schema.groups.c.group_id],
properties={
"members": relation(domain.GroupMembership),
"group_principal_id": column_property(
#
# !+ ATTENTION: the following sqlalchemy str concat (on c.type)
# gives some VERY strange behaviour :
#
# print "group." + schema.groups.c.type + "."
# >>> :type_1 || groups.type || :param_1
#
# print group.groups.type.
# >>> "group.%s." % (schema.groups.c.type)
#
("group." + schema.groups.c.type + "." +
rdb.cast(schema.groups.c.group_id, rdb.String)
).label("group_principal_id")
),
"contained_groups": relation(domain.Group,
backref=backref("parent_group",
remote_side=schema.groups.c.group_id)
),
# "keywords": relation(domain.Keyword, secondary=schema.groups_keywords)
},
polymorphic_on=schema.groups.c.type,
polymorphic_identity="group"
)
# Keywords for groups
#mapper(domain.Keyword, schema.keywords,
# properties = {
# "groups": relation(domain.Group,
# secondary=schema.groups_keywords, backref="keywords"
# ),
# }
#)
# delegate rights to act on behalf of a user
mapper(domain.UserDelegation, schema.user_delegations,
properties={
"user": relation(domain.User,
primaryjoin=rdb.and_(
schema.user_delegations.c.user_id == schema.users.c.user_id
),
uselist=False,
lazy=True
),
"delegation": relation(domain.User,
primaryjoin=rdb.and_(
(schema.user_delegations.c.delegation_id ==
schema.users.c.user_id),
schema.users.c.active_p == "A"
),
uselist=False,
lazy=True
),
}
)
# group subclasses
s_government = rdb.select([
schema.groups.c.group_id,
schema.groups.c.start_date,
schema.groups.c.end_date,
schema.groups.c.parent_group_id,
schema.groups.c.status,
schema.groups.c.short_name,
schema.groups.c.full_name
],
whereclause=schema.groups.c.type=="government",
from_obj=[schema.groups]
).alias("list_government")
mapper(domain.ListGovernment, s_government)
mapper(domain.Government,
inherits=domain.Group,
polymorphic_on=schema.groups.c.type,
polymorphic_identity="government"
)
s_parliament = rdb.select([
schema.groups.c.group_id,
schema.groups.c.start_date,
schema.groups.c.end_date,
schema.groups.c.parent_group_id,
schema.groups.c.short_name,
schema.groups.c.status,
schema.parliaments.c.election_date,
schema.groups.c.full_name
],
whereclause=schema.groups.c.type=="parliament",
from_obj=[schema.groups.join(schema.parliaments)]
).alias("list_parliament")
mapper(domain.ListParliament, s_parliament)
mapper(domain.Parliament, schema.parliaments,
inherits=domain.Group,
polymorphic_on=schema.groups.c.type,
polymorphic_identity="parliament"
)
mapper(domain.PoliticalEntity, schema.political_parties,
inherits=domain.Group,
polymorphic_on=schema.groups.c.type,
polymorphic_identity="political-entity"
)
mapper(domain.PoliticalParty,
inherits=domain.PoliticalEntity,
polymorphic_on=schema.groups.c.type,
polymorphic_identity="political-party"
)
mapper(domain.PoliticalGroup,
inherits=domain.PoliticalEntity,
polymorphic_on=schema.groups.c.type,
polymorphic_identity="political-group"
)
mapper(domain.Ministry,
inherits=domain.Group,
polymorphic_on=schema.groups.c.type,
polymorphic_identity="ministry"
)
s_committee = rdb.select([
schema.groups.c.group_id,
schema.groups.c.start_date,
schema.groups.c.end_date,
schema.groups.c.parent_group_id,
schema.groups.c.short_name,
schema.groups.c.status,
schema.committee_type.c.committee_type_id.label("_fk_committee_type_id"),
schema.committee_type.c.committee_type.label("committee_type_id"),
schema.committee_type.c.committee_type,
schema.groups.c.full_name],
whereclause=schema.groups.c.type=="committee",
from_obj=[schema.groups.join(
schema.committees.join(schema.committee_type),
schema.groups.c.group_id==schema.committees.c.committee_id
)]
).alias("list_committee")
mapper(domain.ListCommittee, s_committee)
mapper(domain.Committee, schema.committees,
inherits=domain.Group,
polymorphic_on=schema.groups.c.type,
polymorphic_identity="committee",
properties={
"committee_type": relation(domain.CommitteeType,
uselist=False,
lazy=False
),
},
)
mapper(domain.Office, schema.offices,
inherits=domain.Group,
polymorphic_on=schema.groups.c.type,
polymorphic_identity="office"
)
# Ministers and Committee members are defined by their group membership in a
# ministry or committee (group)
# we need to specify join clause for user explicitly because we have multiple fk
# to the user table.
mapper(domain.GroupMembership, schema.user_group_memberships,
properties={
"user":relation(domain.User,
primaryjoin=rdb.and_(schema.user_group_memberships.c.user_id ==
schema.users.c.user_id),
uselist=False,
lazy=False),
"group":relation(domain.Group,
primaryjoin=(schema.user_group_memberships.c.group_id ==
schema.groups.c.group_id),
uselist=False,
lazy=True),
"replaced":relation(domain.GroupMembership,
primaryjoin=(schema.user_group_memberships.c.replaced_id ==
schema.user_group_memberships.c.membership_id),
uselist=False,
lazy=True),
"member_titles":relation(domain.MemberRoleTitle)
},
polymorphic_on=schema.user_group_memberships.c.membership_type,
polymorphic_identity="member",
)
mapper(domain.MemberOfParliament, schema.parliament_memberships,
inherits=domain.GroupMembership,
primary_key=[schema.user_group_memberships.c.membership_id],
properties={
"constituency": relation(domain.Constituency,
primaryjoin=(schema.parliament_memberships.c.constituency_id ==
schema.constituencies.c.constituency_id),
uselist=False,
lazy=False),
"constituency_id": [schema.parliament_memberships.c.constituency_id],
"province": relation(domain.Province,
primaryjoin=(schema.parliament_memberships.c.province_id ==
schema.provinces.c.province_id),
uselist=False,
lazy=False),
"province_id": [schema.parliament_memberships.c.province_id],
"region": relation(domain.Region,
primaryjoin=(schema.parliament_memberships.c.region_id ==
schema.regions.c.region_id),
uselist=False,
lazy=False),
"region_id": [schema.parliament_memberships.c.region_id],
"party": relation(domain.PoliticalParty,
primaryjoin=(schema.parliament_memberships.c.party_id ==
schema.political_parties.c.party_id),
uselist=False,
lazy=False),
"party_id": [schema.parliament_memberships.c.party_id],
"start_date": column_property(
schema.user_group_memberships.c.start_date.label("start_date")),
"end_date": column_property(
schema.user_group_memberships.c.end_date.label("end_date")),
},
polymorphic_on=schema.user_group_memberships.c.membership_type,
polymorphic_identity="parliamentmember",
)
s_member_of_parliament = rdb.select([
schema.user_group_memberships.c.membership_id,
schema.user_group_memberships.c.start_date,
schema.user_group_memberships.c.end_date,
schema.user_group_memberships.c.group_id,
schema.parliament_memberships.c.elected_nominated,
schema.users.c.first_name,
schema.users.c.middle_name,
schema.users.c.last_name,
(schema.users.c.first_name+" "+schema.users.c.last_name
).label("user_id"),
schema.users.c.user_id.label("_fk_user_id"),
schema.constituencies.c.name.label("constituency_id"),
schema.parliament_memberships.c.constituency_id.label(
"_fk_constituency_id"),
schema.constituencies.c.name.label("name"),
# !+PROVINCE_REGION(mr, aug-2010) is this needed?
schema.provinces.c.province_id.label("province_id"),
schema.parliament_memberships.c.province_id.label("_fk_province_id"),
schema.provinces.c.province_id.label("province"),
schema.regions.c.region_id.label("region_id"),
schema.parliament_memberships.c.region_id.label("_fk_region_id"),
schema.regions.c.region_id.label("region"),
],
from_obj=[
schema.parliament_memberships.join(schema.constituencies
).join(schema.provinces # !+PROVINCE(mr, aug-2010) needed?
).join(schema.regions # !+REGION(mr, aug-2010) needed?
).join(schema.user_group_memberships
).join(schema.users,
schema.user_group_memberships.c.user_id==schema.users.c.user_id)
]
).alias("list_member_of_parliament")
mapper(domain.ListMemberOfParliament, s_member_of_parliament)
s_minister = rdb.select([
schema.user_group_memberships.c.membership_id,
schema.user_group_memberships.c.start_date,
schema.user_group_memberships.c.end_date,
schema.user_group_memberships.c.group_id,
schema.users.c.first_name,
schema.users.c.middle_name,
schema.users.c.last_name,
(schema.users.c.first_name + " " + schema.users.c.last_name
).label("user_id"),
schema.users.c.user_id.label("_fk_user_id"),
],
whereclause=schema.user_group_memberships.c.membership_type=="minister",
from_obj=[schema.user_group_memberships.join(
schema.users,
schema.user_group_memberships.c.user_id==schema.users.c.user_id
)],
).alias("list_minister")
mapper(domain.ListMinister, s_minister)
mapper(domain.Minister,
inherits=domain.GroupMembership,
polymorphic_on=schema.user_group_memberships.c.membership_type,
polymorphic_identity="minister",
)
s_committeemember = rdb.select([
schema.user_group_memberships.c.membership_id,
schema.user_group_memberships.c.start_date,
schema.user_group_memberships.c.end_date,
schema.user_group_memberships.c.group_id,
schema.users.c.first_name,
schema.users.c.middle_name,
schema.users.c.last_name,
(schema.users.c.first_name + " " + schema.users.c.last_name
).label("user_id"),
schema.users.c.user_id.label("_fk_user_id"),
],
whereclause=schema.user_group_memberships.c.membership_type=="committeemember",
from_obj=[schema.user_group_memberships.join(
schema.users,
schema.user_group_memberships.c.user_id==schema.users.c.user_id
)],
).alias("list_committeemember")
mapper(domain.ListCommitteeMember, s_committeemember)
mapper(domain.CommitteeMember,
inherits=domain.GroupMembership,
polymorphic_on=schema.user_group_memberships.c.membership_type,
polymorphic_identity="committeemember",
)
s_partymember = rdb.select([
schema.user_group_memberships.c.membership_id,
schema.user_group_memberships.c.start_date,
schema.user_group_memberships.c.end_date,
schema.user_group_memberships.c.group_id,
schema.users.c.first_name,
schema.users.c.middle_name,
schema.users.c.last_name,
(schema.users.c.first_name + " " + schema.users.c.last_name
).label("user_id"),
schema.users.c.user_id.label("_fk_user_id"),
],
whereclause=schema.user_group_memberships.c.membership_type=="partymember",
from_obj=[schema.user_group_memberships.join(
schema.users,
schema.user_group_memberships.c.user_id==schema.users.c.user_id
)],
).alias("list_partymember")
mapper(domain.ListPartyMember, s_partymember)
mapper(domain.PartyMember,
inherits=domain.GroupMembership,
polymorphic_on=schema.user_group_memberships.c.membership_type,
polymorphic_identity="partymember",
)
s_officemember = rdb.select([
schema.user_group_memberships.c.membership_id,
schema.user_group_memberships.c.start_date,
schema.user_group_memberships.c.end_date,
schema.user_group_memberships.c.group_id,
schema.users.c.first_name,
schema.users.c.middle_name,
schema.users.c.last_name,
(schema.users.c.first_name + " " + schema.users.c.last_name
).label("user_id"),
schema.users.c.user_id.label("_fk_user_id"),
],
whereclause=schema.user_group_memberships.c.membership_type=="officemember",
from_obj=[schema.user_group_memberships.join(
schema.users,
schema.user_group_memberships.c.user_id==schema.users.c.user_id
)],
).alias("list_officemember")
mapper(domain.ListOfficeMember, s_officemember)
mapper(domain.OfficeMember,
inherits=domain.GroupMembership,
polymorphic_on=schema.user_group_memberships.c.membership_type,
polymorphic_identity="officemember",
)
# staff assigned to a group (committee, ...)
s_committeestaff = rdb.select([
schema.user_group_memberships.c.membership_id,
schema.user_group_memberships.c.start_date,
schema.user_group_memberships.c.end_date,
schema.user_group_memberships.c.group_id,
schema.users.c.first_name,
schema.users.c.middle_name,
schema.users.c.last_name,
(schema.users.c.first_name + " " + schema.users.c.last_name
).label("user_id"),
schema.users.c.user_id.label("_fk_user_id"),
],
whereclause=schema.user_group_memberships.c.membership_type=="committeestaff",
from_obj=[schema.user_group_memberships.join(
schema.users,
schema.user_group_memberships.c.user_id==schema.users.c.user_id
)],
).alias("list_committeestaff")
mapper(domain.ListCommitteeStaff, s_committeestaff)
mapper(domain.CommitteeStaff,
inherits=domain.GroupMembership,
polymorphic_on=schema.user_group_memberships.c.membership_type,
polymorphic_identity="committeestaff",
)
mapper(domain.ParliamentSession, schema.parliament_sessions)
mapper(domain.GroupSitting, schema.sittings,
properties={
"sitting_type": relation(domain.SittingType, uselist=False),
"group": relation(domain.Group,
primaryjoin=schema.sittings.c.group_id == schema.groups.c.group_id,
uselist=False,
lazy=True
),
"start_date": column_property(
schema.sittings.c.start_date.label("start_date")
),
"end_date": column_property(
schema.sittings.c.end_date.label("end_date")
),
"item_schedule": relation(domain.ItemSchedule,
order_by=schema.items_schedule.c.planned_order
),
"venue": relation(domain.Venue)
}
)
mapper(domain.ResourceType, schema.resource_types)
mapper(domain.Resource, schema.resources)
mapper(domain.ResourceBooking, schema.resourcebookings)
mapper(domain.Venue, schema.venues)
##############################
# Parliamentary Items
mapper(domain.ParliamentaryItem, schema.parliamentary_items,
polymorphic_on=schema.parliamentary_items.c.type,
polymorphic_identity="item",
properties={
"owner": relation(domain.User,
primaryjoin=rdb.and_(schema.parliamentary_items.c.owner_id ==
schema.users.c.user_id),
uselist=False,
lazy=False),
"consignatories": relation(domain.User,
secondary=schema.consignatories),
"attached_files": relation(domain.AttachedFile)
}
)
s_heading = rdb.select([
schema.parliamentary_items.c.parliamentary_item_id,
schema.parliamentary_items.c.short_name,
schema.parliamentary_items.c.submission_date.label("submission_date"),
schema.parliamentary_items.c.status,
schema.parliamentary_items.c.status_date,
schema.parliamentary_items.c.parliament_id,
schema.users.c.first_name,
schema.users.c.middle_name,
schema.users.c.last_name,
(schema.users.c.first_name + " " + schema.users.c.last_name
).label("owner_id"),
schema.parliamentary_items.c.owner_id.label("_fk_owner_id")
],
whereclause=schema.parliamentary_items.c.type == "heading",
from_obj=[schema.parliamentary_items.join(
schema.users, schema.parliamentary_items.c.owner_id ==
schema.users.c.user_id
)],
).alias("list_heading")
mapper(domain.ListHeading, s_heading)
mapper(domain.Heading,
inherits=domain.ParliamentaryItem,
polymorphic_on=schema.parliamentary_items.c.type,
polymorphic_identity="heading"
)
s_question = rdb.select([
schema.parliamentary_items.c.parliamentary_item_id,
schema.parliamentary_items.c.short_name,
schema.parliamentary_items.c.submission_date.label("submission_date"),
schema.parliamentary_items.c.status,
schema.parliamentary_items.c.status_date,
schema.parliamentary_items.c.parliament_id,
schema.questions.c.approval_date,
schema.questions.c.ministry_submit_date,
schema.questions.c.question_number,
schema.users.c.first_name,
schema.users.c.middle_name,
schema.users.c.last_name,
(schema.users.c.first_name + " " + schema.users.c.last_name
).label("owner_id"),
schema.groups.c.full_name.label("ministry_id"),
schema.parliamentary_items.c.owner_id.label("_fk_owner_id"),
schema.questions.c.ministry_id.label("_fk_ministry_id"),
],
whereclause=schema.parliamentary_items.c.type == "question",
from_obj=[schema.parliamentary_items.join(
schema.questions.join(
schema.groups,
schema.questions.c.ministry_id == schema.groups.c.group_id),
schema.parliamentary_items.c.parliamentary_item_id ==
schema.questions.c.question_id
).join(
schema.users,
schema.parliamentary_items.c.owner_id == schema.users.c.user_id)
],
).alias("list_questions")
mapper(domain.ListQuestion, s_question)
mapper(domain.Question, schema.questions,
inherits=domain.ParliamentaryItem,
polymorphic_on=schema.parliamentary_items.c.type,
polymorphic_identity="question",
properties={
"changes":relation(domain.QuestionChange,
backref="origin",
cascade="all, delete-orphan",
passive_deletes=False
),
"ministry": relation(domain.Ministry),
}
)
mapper(domain.QuestionChange, schema.question_changes)
mapper(domain.QuestionVersion, schema.question_versions,
properties={
"change": relation(domain.QuestionChange, uselist=False),
"head": relation(domain.Question, uselist=False),
"attached_files": relation(domain.AttachedFileVersion,
primaryjoin=rdb.and_(
schema.question_versions.c.content_id ==
schema.attached_file_versions.c.item_id,
schema.question_versions.c.version_id ==
schema.attached_file_versions.c.file_version_id
),
foreign_keys=[schema.attached_file_versions.c.item_id,
schema.attached_file_versions.c.file_version_id
]
),
}
)
s_motion = rdb.select([
schema.parliamentary_items.c.parliamentary_item_id,
schema.parliamentary_items.c.short_name,
schema.parliamentary_items.c.submission_date,
schema.parliamentary_items.c.status,
schema.parliamentary_items.c.status_date,
schema.parliamentary_items.c.parliament_id,
schema.motions.c.approval_date,
schema.motions.c.motion_number,
schema.motions.c.notice_date,
schema.users.c.first_name,
schema.users.c.middle_name,
schema.users.c.last_name,
schema.parliamentary_items.c.owner_id.label("_fk_owner_id"),
(schema.users.c.first_name + " " + schema.users.c.last_name
).label("owner_id"),
],
whereclause=schema.parliamentary_items.c.type == "motion",
from_obj=[schema.parliamentary_items.join(
schema.motions).join(
schema.users,
schema.parliamentary_items.c.owner_id == schema.users.c.user_id
)
],
).alias("list_motion")
mapper(domain.ListMotion, s_motion)
mapper(domain.Motion, schema.motions,
inherits=domain.ParliamentaryItem,
polymorphic_on=schema.parliamentary_items.c.type,
polymorphic_identity="motion",
properties={
"changes": relation(domain.MotionChange,
backref="origin",
cascade="all, delete-orphan",
passive_deletes=False
),
}
)
mapper(domain.MotionChange, schema.motion_changes)
mapper(domain.MotionVersion, schema.motion_versions,
properties={
"change":relation(domain.MotionChange, uselist=False),
"head": relation(domain.Motion, uselist=False),
"attached_files": relation(domain.AttachedFileVersion,
primaryjoin=rdb.and_(
schema.motion_versions.c.content_id ==
schema.attached_file_versions.c.item_id,
schema.motion_versions.c.version_id ==
schema.attached_file_versions.c.file_version_id
),
foreign_keys=[
schema.attached_file_versions.c.item_id,
schema.attached_file_versions.c.file_version_id
]
),
}
)
s_bill = rdb.select([
schema.parliamentary_items.c.parliamentary_item_id,
schema.parliamentary_items.c.short_name,
schema.parliamentary_items.c.submission_date,
schema.parliamentary_items.c.status,
schema.parliamentary_items.c.status_date,
schema.parliamentary_items.c.parliament_id,
schema.bills.c.publication_date,
schema.bills.c.ministry_id,
schema.users.c.first_name,
schema.users.c.middle_name,
schema.users.c.last_name,
(schema.users.c.first_name + " " + schema.users.c.last_name
).label("owner_id"),
schema.parliamentary_items.c.owner_id.label("_fk_owner_id"),
],
whereclause=schema.parliamentary_items.c.type == "bill",
from_obj=[schema.parliamentary_items.join(
schema.bills).join(
schema.users,
schema.parliamentary_items.c.owner_id == schema.users.c.user_id
)
],
).alias("list_bill")
mapper(domain.ListBill, s_bill)
mapper(domain.Bill, schema.bills,
inherits=domain.ParliamentaryItem,
polymorphic_on=schema.parliamentary_items.c.type,
polymorphic_identity="bill",
properties={
"changes": relation(domain.BillChange,
backref="origin",
cascade="all, delete-orphan",
passive_deletes=False
)
}
)
mapper(domain.BillChange, schema.bill_changes)
mapper(domain.BillVersion, schema.bill_versions,
properties={
"change": relation(domain.BillChange, uselist=False),
"head": relation(domain.Bill, uselist=False),
"attached_files": relation(domain.AttachedFileVersion,
primaryjoin=rdb.and_(
schema.bill_versions.c.content_id ==
schema.attached_file_versions.c.item_id,
schema.bill_versions.c.version_id ==
schema.attached_file_versions.c.file_version_id
),
foreign_keys=[
schema.attached_file_versions.c.item_id,
schema.attached_file_versions.c.file_version_id
]
),
}
)
s_event = rdb.select([
schema.parliamentary_items.c.parliamentary_item_id,
schema.parliamentary_items.c.short_name,
schema.parliamentary_items.c.submission_date,
schema.parliamentary_items.c.status,
schema.parliamentary_items.c.status_date,
schema.parliamentary_items.c.parliament_id,
schema.event_items.c.event_date,
schema.event_items.c.item_id,
schema.users.c.first_name,
schema.users.c.middle_name,
schema.users.c.last_name,
(schema.users.c.first_name + " " + schema.users.c.last_name
).label("owner_id"),
schema.parliamentary_items.c.owner_id.label("_fk_owner_id"),
],
whereclause=schema.parliamentary_items.c.type == "event",
from_obj=[schema.parliamentary_items.join(
schema.agenda_items).join(
schema.users,
schema.parliamentary_items.c.owner_id == schema.users.c.user_id
)],
).alias("list_event")
mapper(domain.ListEventItem, s_event)
mapper(domain.EventItem, schema.event_items,
inherits=domain.ParliamentaryItem,
inherit_condition=(
schema.event_items.c.event_item_id ==
schema.parliamentary_items.c.parliamentary_item_id
),
polymorphic_on=schema.parliamentary_items.c.type,
polymorphic_identity="event"
)
s_agendaitem = rdb.select([
schema.parliamentary_items.c.parliamentary_item_id,
schema.parliamentary_items.c.short_name,
schema.parliamentary_items.c.submission_date,
schema.parliamentary_items.c.status,
schema.parliamentary_items.c.status_date,
schema.parliamentary_items.c.parliament_id,
schema.agenda_items.c.approval_date,
schema.agenda_items.c.group_id,
schema.users.c.first_name,
schema.users.c.middle_name,
schema.users.c.last_name,
(schema.users.c.first_name + " " + schema.users.c.last_name
).label("owner_id"),
schema.parliamentary_items.c.owner_id.label("_fk_owner_id"),
],
whereclause=schema.parliamentary_items.c.type == "agendaitem",
from_obj=[schema.parliamentary_items.join(
schema.agenda_items).join(
schema.users,
schema.parliamentary_items.c.owner_id == schema.users.c.user_id
)],
).alias("list_agendaitem")
mapper(domain.ListAgendaItem, s_agendaitem)
mapper(domain.AgendaItem, schema.agenda_items,
inherits=domain.ParliamentaryItem,
polymorphic_on=schema.parliamentary_items.c.type,
polymorphic_identity="agendaitem",
properties={
"changes": relation(domain.AgendaItemChange,
backref="origin",
cascade="all, delete-orphan",
passive_deletes=False
),
"group": relation(domain.Group,
primaryjoin=(
schema.agenda_items.c.group_id == schema.groups.c.group_id),
backref="agenda_items",
lazy=False,
uselist=False
)
}
)
mapper(domain.AgendaItemChange, schema.agenda_item_changes)
mapper(domain.AgendaItemVersion, schema.agenda_item_versions,
properties={
"change": relation(domain.AgendaItemChange, uselist=False),
"head": relation(domain.AgendaItem, uselist=False),
"attached_files": relation(domain.AttachedFileVersion,
primaryjoin=rdb.and_(
schema.agenda_item_versions.c.content_id ==
schema.attached_file_versions.c.item_id,
schema.agenda_item_versions.c.version_id ==
schema.attached_file_versions.c.file_version_id
),
foreign_keys=[
schema.attached_file_versions.c.item_id,
schema.attached_file_versions.c.file_version_id
]
),
}
)
s_tableddocument = rdb.select([
schema.parliamentary_items.c.parliamentary_item_id,
schema.parliamentary_items.c.short_name,
schema.parliamentary_items.c.submission_date,
schema.parliamentary_items.c.status,
schema.parliamentary_items.c.status_date,
schema.parliamentary_items.c.parliament_id,
schema.tabled_documents.c.approval_date,
schema.users.c.first_name,
schema.users.c.middle_name,
schema.users.c.last_name,
(schema.users.c.first_name + " " + schema.users.c.last_name
).label("owner_id"),
schema.parliamentary_items.c.owner_id.label("_fk_owner_id"), ],
whereclause=schema.parliamentary_items.c.type == "tableddocument",
from_obj=[schema.parliamentary_items.join(
schema.tabled_documents).join(
schema.users,
schema.parliamentary_items.c.owner_id ==
schema.users.c.user_id
)],
).alias("list_tableddocument")
mapper(domain.ListTabledDocument, s_tableddocument)
mapper(domain.TabledDocument, schema.tabled_documents,
inherits=domain.ParliamentaryItem,
polymorphic_on=schema.parliamentary_items.c.type,
polymorphic_identity="tableddocument",
properties={
"changes": relation(domain.TabledDocumentChange,
backref="origin",
cascade="all, delete-orphan",
passive_deletes=False
),
}
)
mapper(domain.TabledDocumentChange, schema.tabled_document_changes)
mapper(domain.TabledDocumentVersion, schema.tabled_document_versions,
properties={
"change": relation(domain.TabledDocumentChange, uselist=False),
"head": relation(domain.TabledDocument, uselist=False),
"attached_files": relation(domain.AttachedFileVersion,
primaryjoin=rdb.and_(
schema.tabled_document_versions.c.content_id ==
schema.attached_file_versions.c.item_id,
schema.tabled_document_versions.c.version_id ==
schema.attached_file_versions.c.file_version_id
),
foreign_keys=[
schema.attached_file_versions.c.item_id,
schema.attached_file_versions.c.file_version_id
]
),
}
)
mapper(domain.AttachedFile, schema.attached_files,
properties={
"changes": relation(domain.AttachedFileChange,
backref="origin",
cascade="all, delete-orphan",
passive_deletes=False
),
}
)
mapper(domain.AttachedFileChange, schema.attached_file_changes)
mapper(domain.AttachedFileVersion, schema.attached_file_versions,
properties={
"change": relation(domain.AttachedFileChange, uselist=False),
"head": relation(domain.AttachedFile, uselist=False)
}
)
#Items scheduled for a sitting expressed as a relation
# to their item schedule
mapper(domain.ItemSchedule, schema.items_schedule,
properties={
"item": relation(
domain.ParliamentaryItem,
uselist=False
),
"discussion": relation(
domain.ScheduledItemDiscussion,
uselist=False,
cascade="all, delete-orphan"
),
"sitting": relation(domain.GroupSitting, uselist=False),
}
)
mapper(domain.ScheduledItemDiscussion, schema.item_discussion)
# items scheduled for a sitting
# expressed as a join between item and schedule
s_consignatories = rdb.select([
schema.consignatories.c.item_id,
schema.consignatories.c.user_id.label("consignatory"),
(schema.users.c.first_name + " " + schema.users.c.last_name
).label("user_id"),
schema.users.c.first_name,
schema.users.c.middle_name,
schema.users.c.last_name,
],
from_obj=[schema.consignatories.join(schema.users)],
).alias("list_consignatories")
mapper(domain.ListConsignatory, s_consignatories)
mapper(domain.Consignatory, schema.consignatories,
properties={
"item": relation(domain.ParliamentaryItem, uselist=False),
"user": relation(domain.User, uselist=False)
}
)
mapper(domain.BillType, schema.bill_types)
#mapper(domain.DocumentSource, schema.document_sources)
mapper(domain.HoliDay, schema.holidays)
######################
#
s_constituency = rdb.select([
schema.constituencies.c.constituency_id,
schema.constituencies.c.name,
schema.constituencies.c.start_date,
schema.constituencies.c.end_date
],
from_obj=[schema.constituencies]
).alias("list_constituency")
mapper(domain.ListConstituency, s_constituency)
mapper(domain.Constituency, schema.constituencies)
mapper(domain.Province, schema.provinces)
mapper(domain.Region, schema.regions)
mapper(domain.Country, schema.countries)
mapper(domain.ConstituencyDetail, schema.constituency_details,
properties={
"constituency": relation(domain.Constituency,
uselist=False,
lazy=True,
backref="details"
),
}
)
mapper(domain.CommitteeType, schema.committee_type)
mapper(domain.SittingType, schema.sitting_type)
s_sittingattendance = rdb.select([
schema.sitting_attendance.c.sitting_id,
schema.sitting_attendance.c.attendance_id.label("_fk_attendance_id"),
schema.sitting_attendance.c.member_id.label("_fk_member_id"),
schema.attendance_type.c.attendance_type.label("attendance_id"),
(schema.users.c.first_name + " " + schema.users.c.last_name
).label("member_id"),
schema.users.c.first_name,
schema.users.c.middle_name,
schema.users.c.last_name,
],
from_obj=[schema.sitting_attendance.join(
schema.attendance_type).join(schema.users)
],
).alias("list_sittingattendance")
mapper(domain.ListGroupSittingAttendance, s_sittingattendance)
mapper(domain.GroupSittingAttendance, schema.sitting_attendance,
properties={
"user": relation(domain.User, uselist=False, lazy=False),
"attendance_type": relation(domain.AttendanceType,
uselist=False,
lazy=False
),
"sitting": relation(domain.GroupSitting, uselist=False, lazy=False),
}
)
mapper(domain.AttendanceType, schema.attendance_type)
mapper(domain.MemberTitle, schema.user_role_types)
mapper(domain.MemberRoleTitle, schema.role_titles.join(schema.addresses),
properties={
"title_name": relation(domain.MemberTitle, uselist=False, lazy=False),
}
)
mapper(domain.AddressType, schema.address_types)
mapper(domain.UserAddress, schema.addresses)
s_group_item_assignments = rdb.select([
schema.group_item_assignments.c.assignment_id,
schema.group_item_assignments.c.group_id.label("_fk_group_id"),
schema.group_item_assignments.c.item_id.label("_fk_item_id"),
schema.groups.c.short_name.label("item_id"),
(schema.groups.c.short_name + " - " + schema.groups.c.full_name
).label("group_id"),
schema.group_item_assignments.c.start_date,
schema.group_item_assignments.c.end_date,
schema.group_item_assignments.c.due_date,
],
from_obj=[schema.groups.join(
schema.group_item_assignments).join(schema.parliamentary_items)
],
).alias("list_group_item_assignments")
mapper(domain.ListGroupItemAssignment, s_group_item_assignments);
mapper(domain.GroupItemAssignment, schema.group_item_assignments,
properties={
"group": relation(domain.Group,
primaryjoin=(schema.group_item_assignments.c.group_id ==
schema.groups.c.group_id),
backref="group_assignments",
lazy=True,
uselist=False
),
"item": relation(domain.ParliamentaryItem,
backref="item_assignments",
uselist=False
),
}
)
mapper(domain.ItemGroupItemAssignment, schema.group_item_assignments,
inherits=domain.GroupItemAssignment
)
mapper(domain.GroupGroupItemAssignment, schema.group_item_assignments,
inherits=domain.GroupItemAssignment
)
mapper(domain.Report, schema.reports,
inherits=domain.ParliamentaryItem,
polymorphic_on=schema.parliamentary_items.c.type,
polymorphic_identity="report"
)
mapper(domain.SittingReport, schema.sitting_reports,
properties={
"sitting": relation(domain.GroupSitting,
backref="reports",
lazy=True,
uselist=False
),
"report": relation(domain.Report,
backref="sittings",
lazy=True,
uselist=False
),
}
)
mapper(domain.Report4Sitting, schema.sitting_reports,
inherits=domain.Report
)
mapper(domain.ObjectTranslation, schema.translations)
|
[
"ashok.hariharan@fc5d704a-7d24-0410-8c4a-57ddeba10ffc"
] |
ashok.hariharan@fc5d704a-7d24-0410-8c4a-57ddeba10ffc
|
2971a3b1ec52cbc72aa4073ad4c8172d91dccafd
|
4b265adfae6d91d614a628705571805a2c3d241e
|
/migrations/versions/3e4b752d4b66_.py
|
4be97e84029f717fc303084c944902289e0ab040
|
[] |
no_license
|
1010784344/mybbs
|
02d85a661f42b648cd0939c0550959d758f0717d
|
0787c77c32f78de6e6cf16db55c3502bf43307d2
|
refs/heads/master
| 2022-12-22T17:45:55.908981 | 2020-03-02T06:44:10 | 2020-03-02T06:44:10 | 244,299,839 | 0 | 0 | null | 2022-09-16T18:18:56 | 2020-03-02T06:45:01 |
Python
|
UTF-8
|
Python
| false | false | 1,042 |
py
|
"""empty message
Revision ID: 3e4b752d4b66
Revises: 907d0dec1971
Create Date: 2018-06-10 22:55:59.028570
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3e4b752d4b66'
down_revision = '907d0dec1971'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.Column('author_id', sa.String(length=50), nullable=False),
sa.ForeignKeyConstraint(['author_id'], ['front_user.id'], ),
sa.ForeignKeyConstraint(['post_id'], ['post.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comment')
# ### end Alembic commands ###
|
[
"[email protected]"
] | |
b441d0ca2ecfaf7defd1eaf369f3be18a2441a4e
|
2a1f4c4900693c093b2fcf4f84efa60650ef1424
|
/py/cli/factory_env_unittest.py
|
d04bc714f33ffe86931dfcac330040122dbf74b8
|
[
"BSD-3-Clause"
] |
permissive
|
bridder/factory
|
b925f494303728fa95017d1ba3ff40ac5cf6a2fd
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
refs/heads/master
| 2023-08-10T18:51:08.988858 | 2021-09-21T03:25:28 | 2021-09-21T03:25:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,655 |
py
|
#!/usr/bin/env python3
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import unittest
from cros.factory.cli import factory_env
from cros.factory.utils import process_utils
FACTORY_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
FACTORY_ENV_TOOL = os.path.join(FACTORY_ROOT, "bin/factory_env")
FACTORY_ENV_SCRIPT = os.path.join(FACTORY_ROOT, "py/cli/factory_env.py")
DUMMY_SCRIPT = os.path.join(
FACTORY_ROOT, "py/cli/testdata/scripts/dummy_script.py")
DUMMY_EXCUTABLE = os.path.join(
FACTORY_ROOT, "py/cli/testdata/bin/dummy_script")
class FactoryEnvUnittest(unittest.TestCase):
def testSymbolicLinkToFactoryEnv(self):
self.assertEqual(0,
process_utils.LogAndCheckCall(DUMMY_EXCUTABLE).returncode)
def testFactoryEnvWithSymbolicLinkToFactoryEnv(self):
self.assertEqual(0, process_utils.LogAndCheckCall(
[FACTORY_ENV_TOOL, DUMMY_EXCUTABLE]).returncode)
def testMultipleFactoryEnv(self):
self.assertEqual(0, process_utils.LogAndCheckCall(
[FACTORY_ENV_TOOL, FACTORY_ENV_TOOL, DUMMY_EXCUTABLE]).returncode)
def testFactoryEnvWithScript(self):
self.assertEqual(0, process_utils.LogAndCheckCall(
[FACTORY_ENV_TOOL, DUMMY_SCRIPT]).returncode)
def testHelpMessage(self):
process = process_utils.Spawn(
[FACTORY_ENV_TOOL, '--help'], read_stdout=True)
self.assertEqual(factory_env.HELP_MSG, process.stdout_data)
self.assertEqual(1, process.returncode)
def testScriptNotFound(self):
process = process_utils.Spawn(
[FACTORY_ENV_TOOL, 'script/not/found'], read_stdout=True)
self.assertEqual(factory_env.HELP_MSG, process.stdout_data)
self.assertEqual(1, process.returncode)
def testPythonInterpreter(self):
output = process_utils.CheckOutput(
[FACTORY_ENV_TOOL, 'python', '-c', 'import sys; print(sys.path)'])
self.assertIn('factory/py_pkg', output)
class SymlinkUnittest(unittest.TestCase):
def testLegalityForSymlinkInBin(self):
for path in glob.glob(os.path.join(FACTORY_ROOT, "bin/**")):
if not os.path.islink(path):
continue
real_path = os.path.realpath(path)
if not real_path.endswith('.py'):
continue
# Make sure bin/tool_name links to FACTORY_ENV_SCRIPT
self.assertEqual(real_path, FACTORY_ENV_SCRIPT)
# Make sure py/cli/tool_name.py exist
self.assertTrue(os.path.exists(factory_env.GetRealScriptPath(path)))
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
6f3d59a5ac32817a6c36952aa29e3cdf020c6b25
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02687/s942211700.py
|
9fe247fc4e86e7d6a02581a8de06497d47b904f9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 60 |
py
|
s = {input()}
test = {'ABC', 'ARC'}
print(list(test - s)[0])
|
[
"[email protected]"
] | |
bfe2741feb16a2c462c0fd4040ed8d43e1017389
|
c5c56d7c14b4518e53bcde2527b9cc6e53a7e1b9
|
/doctests/yatzy.py
|
6dc991e1845f9366663dfba8bb5396adf434c97b
|
[] |
no_license
|
lancelote/pluralsight-unit-testing-python
|
0402a39e3800eec49f2be529e684d028689d3b47
|
fd5ce8264bc95ed66109c4fa575a177248c3d49a
|
refs/heads/master
| 2021-01-10T08:06:39.605195 | 2016-03-23T08:15:25 | 2016-03-23T08:15:25 | 51,952,064 | 4 | 6 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,470 |
py
|
# coding=utf-8
""""
Yatzy Game
"""
from operator import itemgetter
def dice_counts(dice):
"""Make a dictionary of how many of each value are in the dice
Args:
dice (lst): A sorted list of 5 integers indicating the dice rolled
Returns:
dict: How many of each value are in the dice
Examples:
>>> sorted(dice_counts([1, 2, 2, 3, 3]).items())
[(1, 1), (2, 2), (3, 2), (4, 0), (5, 0), (6, 0)]
>>> dice_counts('12345')
Traceback (most recent call last):
...
TypeError: Can't convert 'int' object to str implicitly
"""
return {x: dice.count(x) for x in range(1, 7)}
def small_straight(dice):
"""Score the given roll in the 'Small Straight' Yatzy category
Args:
dice (lst): A sorted list of 5 integers indicating the dice rolled
Returns:
int: Score
Examples:
>>> small_straight([1, 2, 3, 4, 5])
15
>>> small_straight([1, 2, 3, 4, 4])
0
This function works with lists or sets or other collection types:
>>> small_straight({1, 2, 3, 4, 5})
15
>>> small_straight([5, 4, 3, 2, 1])
15
"""
return sum(dice) if sorted(dice) == [1, 2, 3, 4, 5] else 0
def yatzy(dice):
"""Score the given roll in the 'Yatzy' category
Args:
dice (list): A sorted list of 5 integers indicating the dice rolled
Returns:
int: Score
Examples:
>>> yatzy([1, 1, 1, 1, 1])
50
>>> yatzy([4, 4, 4, 4, 4])
50
>>> yatzy([4, 4, 4, 4, 1])
0
"""
counts = dice_counts(dice)
if 5 in counts.values():
return 50
return 0
def full_house(dice):
"""Score the given roll in the 'Full House' category
Args:
dice (list): A sorted list of 5 integers indicating the dice rolled
Returns:
int: Score
Examples:
>>> full_house([1, 1, 2, 2, 2])
8
>>> full_house([6, 6, 6, 2, 2])
22
>>> full_house([1, 2, 3, 4, 5])
0
>>> full_house([1, 2, 2, 1, 3])
0
"""
counts = dice_counts(dice)
if 2 in counts.values() and 3 in counts.values():
return sum(dice)
return 0
def ones(dice):
"""Scores the given roll in the 'Ones' category
Args:
dice (list): A sorted list of 5 integers indicating the dice rolled
Returns:
int: Score
"""
return dice_counts(dice)[1]
def twos(dice):
"""Scores the given roll in the 'Twos' category
Args:
dice (list): A sorted list of 5 integers indicating the dice rolled
Returns:
int: Score
"""
return dice_counts(dice)[2]*2
ALL_CATEGORIES = [full_house, yatzy, small_straight, ones, twos]
def scores_in_categories(dice, categories=ALL_CATEGORIES):
"""Score the dice in each category and return those with a non-zero score
Args:
dice (list): A sorted list of 5 integers indicating the dice rolled
categories (list): A list of category functions
Returns:
list: Category scores
Examples:
>>> scores = scores_in_categories([1, 1, 2, 2, 2])
>>> [(score, category.__name__) for (score, category) in scores]
[(8, 'full_house'), (6, 'twos'), (2, 'ones')]
"""
scores = [(category(dice), category) for category in categories
if category(dice) > 0]
return sorted(scores, reverse=True, key=itemgetter(0))
|
[
"[email protected]"
] | |
c37d59611b5baee508727e5a3157ac82893f1bf2
|
54053da876c54cebf241ff74360a71bef44e030c
|
/django/agecalculator/manage.py
|
ed85fc9969a825aba7eb861b88569fd28ebe7ac5
|
[] |
no_license
|
neeteliz/luminarpython1
|
15cd4e169d9209a1ceea425000c7621e0dcfb9f5
|
9187a3c6a1cde5a57aa62a7b56f53e37361e72aa
|
refs/heads/master
| 2020-12-08T11:40:32.573020 | 2020-02-12T06:02:50 | 2020-02-12T06:02:50 | 232,972,757 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 545 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "agecalculator.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
22f25d5fd6e16c9f624041b28bb63a5c58cbb490
|
8015f1c62a2cb4efd21aa8938336913bf8117868
|
/bamap/ba0892.pngMap.py
|
d050ebdbdb8e5564a858cf28c735506b3aed9f60
|
[] |
no_license
|
GamerNoTitle/Beepers-and-OLED
|
675b5e3c179df0f0e27b42bf594c43860d03b9af
|
afe1340e5394ae96bda5f9022a8a66824368091e
|
refs/heads/master
| 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,468 |
py
|
ba0892.pngMap = [
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111001011111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111100000011111111111111111111111111000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111110000000001111111111111111111100110000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111110000000000001011111111111100000000000000111111111111111110011111111111111111111111111111111111',
'11111111111111111111111111111111110000000000000000011111000000000000000000111111111111110000000111111111111111111111111111111111',
'11111111111111111111111111111111110000000000000000000000000000000000000010001101000000000000001111111111111111111111111111111111',
'11111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111',
'11111111111111111111111111111111111100010100100000000000000000000000000000000000000000000000001111111111111111111111111111111111',
'11111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111110000000000000000000000000000000000000000001011111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111110000000000000000000000000001111111111111101111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111000000000000000000000000001011111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111100000000000000000000000000011111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111100000000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111100000000000000000000000000111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000000100111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000000000111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000101111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000001111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
]
|
[
"[email protected]"
] | |
91b0db98ecd89c19b85d3d89b875b8fb59e63615
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/unit/modules/test_devmap.py
|
f7fc9f09ea1f41f68a666b5c0b0b0a4431769644
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556 | 2023-06-29T23:12:28 | 2023-07-19T11:47:47 | 1,390,248 | 11,026 | 6,296 |
Apache-2.0
| 2023-09-14T20:45:37 | 2011-02-20T20:16:56 |
Python
|
UTF-8
|
Python
| false | false | 991 |
py
|
"""
:codeauthor: Rupesh Tare <[email protected]>
"""
import os.path
import pytest
import salt.modules.devmap as devmap
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {devmap: {}}
def test_multipath_list():
"""
Test for Device-Mapper Multipath list
"""
mock = MagicMock(return_value="A")
with patch.dict(devmap.__salt__, {"cmd.run": mock}):
assert devmap.multipath_list() == ["A"]
def test_multipath_flush():
"""
Test for Device-Mapper Multipath flush
"""
mock = MagicMock(return_value=False)
with patch.object(os.path, "exists", mock):
assert devmap.multipath_flush("device") == "device does not exist"
mock = MagicMock(return_value=True)
with patch.object(os.path, "exists", mock):
mock = MagicMock(return_value="A")
with patch.dict(devmap.__salt__, {"cmd.run": mock}):
assert devmap.multipath_flush("device") == ["A"]
|
[
"[email protected]"
] | |
db3caed3bea7b8e75f04ec4721bc0ebd0e3624b1
|
ede96590eee4880ff83d1f1d8db5229e92c6e919
|
/leasing/metadata.py
|
0639d907861c764c94129916cc3f6a2315f07bc7
|
[
"MIT"
] |
permissive
|
igordavydsson/mvj
|
a4c5b39e7be9f95e15a2e906ad61b98611998063
|
b467c6229f9d458d56b66f628b0841adb67a2970
|
refs/heads/master
| 2020-04-22T20:42:06.650182 | 2019-02-12T13:50:57 | 2019-02-12T13:50:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,833 |
py
|
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from enumfields.drf import EnumField
from rest_framework.fields import ChoiceField, DecimalField
from rest_framework.metadata import SimpleMetadata
from rest_framework.relations import PrimaryKeyRelatedField
from field_permissions.metadata import FieldPermissionsMetadataMixin
from leasing.models import Contact, Decision, Invoice, Lease
from leasing.models.invoice import InvoiceSet
from leasing.permissions import PerMethodPermission
from users.models import User
ALL_METHODS = {
'GET': False,
'OPTIONS': False,
'HEAD': False,
'POST': False,
'PUT': False,
'PATCH': False,
'DELETE': False,
}
class FieldsMetadata(FieldPermissionsMetadataMixin, SimpleMetadata):
"""Returns metadata for all the fields and the possible choices in the
serializer even when the fields are read only.
Additionally adds decimal_places and max_digits info for DecimalFields."""
def determine_metadata(self, request, view, serializer=None):
metadata = super().determine_metadata(request, view)
if not serializer and hasattr(view, 'get_serializer'):
serializer = view.get_serializer()
if serializer:
metadata["fields"] = self.get_serializer_info(serializer)
# Determine allowed methods for model views
if hasattr(serializer, 'Meta') and serializer.Meta.model:
method_permissions = ALL_METHODS.copy()
for permission in view.get_permissions():
if not hasattr(permission, 'get_required_permissions'):
continue
for method in method_permissions.keys():
perms = permission.get_required_permissions(method, serializer.Meta.model)
method_permissions[method] = request.user.has_perms(perms)
metadata['methods'] = method_permissions
# Determine methods the user has permission to for custom views
# and viewsets that are using PerMethodPermission.
if PerMethodPermission in view.permission_classes:
permission = PerMethodPermission()
method_permissions = {}
for method in view.allowed_methods:
required_perms = permission.get_required_permissions(method, view)
method_permissions[method.upper()] = request.user.has_perms(required_perms)
metadata['methods'] = method_permissions
return metadata
def get_field_info(self, field):
field_info = super().get_field_info(field)
if isinstance(field, DecimalField):
field_info['decimal_places'] = field.decimal_places
field_info['max_digits'] = field.max_digits
# Kludge for translating language names
if isinstance(field, ChoiceField) and field.field_name == 'language':
field_info['choices'] = [{
'value': choice_value,
'display_name': _(choice_name).capitalize(),
} for choice_value, choice_name in field.choices.items()]
field_info['choices'].sort(key=lambda x: x['display_name'])
if isinstance(field, PrimaryKeyRelatedField) or isinstance(field, EnumField):
# TODO: Make configurable
if hasattr(field, 'queryset') and field.queryset.model in (User, Lease, Contact, Decision, Invoice,
InvoiceSet):
return field_info
field_info['choices'] = [{
'value': choice_value,
'display_name': force_text(choice_name, strings_only=True)
} for choice_value, choice_name in field.choices.items()]
return field_info
|
[
"[email protected]"
] | |
471b5a634eac53812dbe5e6260c757c693f8a688
|
4f00c6a08db5755b294bd519b9377866f5ff6c19
|
/src/tests/google/net/proto2/python/internal/python_message.py
|
557dfc1d9d0b1bac8d18ec77d1eb4182d51becdc
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cooljeanius/cauliflowervest
|
02035a8455b1dde469ebfd0b202c02456820a679
|
a9bc209b610a927083bf16274d8451c6c45227bf
|
refs/heads/main
| 2022-12-24T15:28:30.616604 | 2020-09-25T23:55:15 | 2020-09-25T23:55:15 | 303,812,548 | 1 | 0 |
Apache-2.0
| 2023-09-04T16:48:46 | 2020-10-13T19:46:58 |
Python
|
UTF-8
|
Python
| false | false | 32,904 |
py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Contains a metaclass and helper functions used to create
protocol message classes from Descriptor objects at runtime.
Recall that a metaclass is the "type" of a class.
(A class is to a metaclass what an instance is to a class.)
In this case, we use the GeneratedProtocolMessageType metaclass
to inject all the useful functionality into the classes
output by the protocol compiler at compile-time.
The upshot of all this is that the real implementation
details for ALL pure-Python protocol buffers are *here in
this file*.
"""
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import struct
import weakref
from google.net.proto2.python.internal import containers
from google.net.proto2.python.internal import decoder
from google.net.proto2.python.internal import encoder
from google.net.proto2.python.internal import enum_type_wrapper
from google.net.proto2.python.internal import message_listener as message_listener_mod
from google.net.proto2.python.internal import type_checkers
from google.net.proto2.python.internal import wire_format
from google.net.proto2.python.public import descriptor as descriptor_mod
from google.net.proto2.python.public import message as message_mod
from google.net.proto2.python.public import text_format
_FieldDescriptor = descriptor_mod.FieldDescriptor
def NewMessage(descriptor, dictionary):
_AddClassAttributesForNestedExtensions(descriptor, dictionary)
_AddSlots(descriptor, dictionary)
def InitMessage(descriptor, cls):
cls._decoders_by_tag = {}
cls._extensions_by_name = {}
cls._extensions_by_number = {}
if (descriptor.has_options and
descriptor.GetOptions().message_set_wire_format):
cls._decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = (
decoder.MessageSetItemDecoder(cls._extensions_by_number))
for field in descriptor.fields:
_AttachFieldHelpers(cls, field)
_AddEnumValues(descriptor, cls)
_AddInitMethod(descriptor, cls)
_AddPropertiesForFields(descriptor, cls)
_AddPropertiesForExtensions(descriptor, cls)
_AddStaticMethods(cls)
_AddMessageMethods(descriptor, cls)
_AddPrivateHelperMethods(cls)
def _PropertyName(proto_field_name):
"""Returns the name of the public property attribute which
clients can use to get and (in some cases) set the value
of a protocol message field.
Args:
proto_field_name: The protocol message field name, exactly
as it appears (or would appear) in a .proto file.
"""
return proto_field_name
def _VerifyExtensionHandle(message, extension_handle):
"""Verify that the given extension handle is valid."""
if not isinstance(extension_handle, _FieldDescriptor):
raise KeyError('HasExtension() expects an extension handle, got: %s' %
extension_handle)
if not extension_handle.is_extension:
raise KeyError('"%s" is not an extension.' % extension_handle.full_name)
if extension_handle.containing_type is not message.DESCRIPTOR:
raise KeyError('Extension "%s" extends message type "%s", but this '
'message is of type "%s".' %
(extension_handle.full_name,
extension_handle.containing_type.full_name,
message.DESCRIPTOR.full_name))
def _AddSlots(message_descriptor, dictionary):
"""Adds a __slots__ entry to dictionary, containing the names of all valid
attributes for this message type.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__slots__'] = ['_cached_byte_size',
'_cached_byte_size_dirty',
'_fields',
'_is_present_in_parent',
'_listener',
'_listener_for_children',
'__weakref__']
def _IsMessageSetExtension(field):
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == _FieldDescriptor.LABEL_OPTIONAL)
def _AttachFieldHelpers(cls, field_descriptor):
is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED)
is_packed = (field_descriptor.has_options and
field_descriptor.GetOptions().packed)
if _IsMessageSetExtension(field_descriptor):
field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number)
sizer = encoder.MessageSetItemSizer(field_descriptor.number)
else:
field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
field_descriptor._encoder = field_encoder
field_descriptor._sizer = sizer
field_descriptor._default_constructor = _DefaultValueConstructorForField(
field_descriptor)
def AddDecoder(wiretype, is_packed):
tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype)
cls._decoders_by_tag[tag_bytes] = (
type_checkers.TYPE_TO_DECODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed,
field_descriptor, field_descriptor._default_constructor))
AddDecoder(type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type],
False)
if is_repeated and wire_format.IsTypePackable(field_descriptor.type):
AddDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, True)
def _AddClassAttributesForNestedExtensions(descriptor, dictionary):
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddEnumValues(descriptor, cls):
"""Sets class-level attributes for all enum fields defined in this message.
Also exporting a class-level object that can name enum values.
Args:
descriptor: Descriptor object for this message type.
cls: Class we're constructing for this message type.
"""
for enum_type in descriptor.enum_types:
setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type))
for enum_value in enum_type.values:
setattr(cls, enum_value.name, enum_value.number)
def _DefaultValueConstructorForField(field):
"""Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference.
"""
if field.label == _FieldDescriptor.LABEL_REPEATED:
if field.default_value != []:
raise ValueError('Repeated field default value not empty list: %s' % (
field.default_value))
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(
message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field.cpp_type, field.type)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(
message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
message_type = field.message_type
def MakeSubMessageDefault(message):
result = message_type._concrete_class()
result._SetListener(message._listener_for_children)
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
return field.default_value
return MakeScalarDefault
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
fields = message_descriptor.fields
def init(self, **kwargs):
self._cached_byte_size = 0
self._cached_byte_size_dirty = len(kwargs) > 0
self._fields = {}
self._is_present_in_parent = False
self._listener = message_listener_mod.NullMessageListener()
self._listener_for_children = _Listener(self)
for field_name, field_value in kwargs.iteritems():
field = _GetFieldByName(message_descriptor, field_name)
if field is None:
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(message_descriptor.name, field_name))
if field.label == _FieldDescriptor.LABEL_REPEATED:
copy = field._default_constructor(self)
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
for val in field_value:
copy.add().MergeFrom(val)
else:
copy.extend(field_value)
self._fields[field] = copy
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
copy = field._default_constructor(self)
copy.MergeFrom(field_value)
self._fields[field] = copy
else:
setattr(self, field_name, field_value)
init.__module__ = None
init.__doc__ = None
cls.__init__ = init
def _GetFieldByName(message_descriptor, field_name):
"""Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name.
"""
try:
return message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message has no "%s" field.' % field_name)
def _AddPropertiesForFields(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
for field in descriptor.fields:
_AddPropertiesForField(field, cls)
if descriptor.is_extendable:
cls.Extensions = property(lambda self: _ExtensionDict(self))
def _AddPropertiesForField(field, cls):
"""Adds a public property for a protocol message field.
Clients can use this property to get and (in the case
of non-repeated scalar fields) directly set the value
of a protocol message field.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
assert _FieldDescriptor.MAX_CPPTYPE == 10
constant_name = field.name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, field.number)
if field.label == _FieldDescriptor.LABEL_REPEATED:
_AddPropertiesForRepeatedField(field, cls)
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
_AddPropertiesForNonRepeatedCompositeField(field, cls)
else:
_AddPropertiesForNonRepeatedScalarField(field, cls)
def _AddPropertiesForRepeatedField(field, cls):
"""Adds a public property for a "repeated" protocol message field. Clients
can use this property to get the value of the field, which will be either a
_RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see
below).
Note that when clients add values to these containers, we perform
type-checking in the case of repeated scalar fields, and we also set any
necessary "has" bits as a side-effect.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
field_value = field._default_constructor(self)
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedScalarField(field, cls):
"""Adds a public property for a nonrepeated, scalar protocol message field.
Clients can use this property to get and directly set the value of the field.
Note that when the client sets the value of a field by using this property,
all necessary "has" bits are set as a side-effect, and we also perform
type-checking.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
type_checker = type_checkers.GetTypeChecker(field.cpp_type, field.type)
default_value = field.default_value
valid_values = set()
def getter(self):
return self._fields.get(field, default_value)
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def setter(self, new_value):
type_checker.CheckValue(new_value)
self._fields[field] = new_value
if not self._cached_byte_size_dirty:
self._Modified()
setter.__module__ = None
setter.__doc__ = 'Setter for %s.' % proto_field_name
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedCompositeField(field, cls):
"""Adds a public property for a nonrepeated, composite protocol message field.
A composite field is a "group" or "message" field.
Clients can use this property to get the value of the field, but cannot
assign to the property directly.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
message_type = field.message_type
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
field_value = message_type._concrete_class()
field_value._SetListener(self._listener_for_children)
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def setter(self, new_value):
raise AttributeError('Assignment not allowed to composite field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForExtensions(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
constant_name = extension_name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, extension_field.number)
def _AddStaticMethods(cls):
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
_AttachFieldHelpers(cls, extension_handle)
actual_handle = cls._extensions_by_number.setdefault(
extension_handle.number, extension_handle)
if actual_handle is not extension_handle:
raise AssertionError(
'Extensions "%s" and "%s" both try to extend message type "%s" with '
'field number %d.' %
(extension_handle.full_name, actual_handle.full_name,
cls.DESCRIPTOR.full_name, extension_handle.number))
cls._extensions_by_name[extension_handle.full_name] = extension_handle
handle = extension_handle
if _IsMessageSetExtension(handle):
cls._extensions_by_name[
extension_handle.message_type.full_name] = extension_handle
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(s):
message = cls()
message.MergeFromString(s)
return message
cls.FromString = staticmethod(FromString)
def _IsPresent(item):
"""Given a (FieldDescriptor, value) tuple from _fields, return true if the
value should be included in the list returned by ListFields()."""
if item[0].label == _FieldDescriptor.LABEL_REPEATED:
return bool(item[1])
elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
return item[1]._is_present_in_parent
else:
return True
def _AddListFieldsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ListFields(self):
all_fields = [item for item in self._fields.iteritems() if _IsPresent(item)]
all_fields.sort(key = lambda item: item[0].number)
return all_fields
cls.ListFields = ListFields
def _AddHasFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
singular_fields = {}
for field in message_descriptor.fields:
if field.label != _FieldDescriptor.LABEL_REPEATED:
singular_fields[field.name] = field
def HasField(self, field_name):
try:
field = singular_fields[field_name]
except KeyError:
raise ValueError(
'Protocol message has no singular "%s" field.' % field_name)
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(field)
return value is not None and value._is_present_in_parent
else:
return field in self._fields
cls.HasField = HasField
def _AddClearFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ClearField(self, field_name):
try:
field = message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message has no "%s" field.' % field_name)
if field in self._fields:
del self._fields[field]
self._Modified()
cls.ClearField = ClearField
def _AddClearExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def ClearExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
if extension_handle in self._fields:
del self._fields[extension_handle]
self._Modified()
cls.ClearExtension = ClearExtension
def _AddClearMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def Clear(self):
self._fields = {}
self._Modified()
cls.Clear = Clear
def _AddHasExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def HasExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
raise KeyError('"%s" is repeated.' % extension_handle.full_name)
if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(extension_handle)
return value is not None and value._is_present_in_parent
else:
return extension_handle in self._fields
cls.HasExtension = HasExtension
def _AddEqualsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __eq__(self, other):
if (not isinstance(other, message_mod.Message) or
other.DESCRIPTOR != self.DESCRIPTOR):
return False
if self is other:
return True
return self.ListFields() == other.ListFields()
cls.__eq__ = __eq__
def _AddStrMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __str__(self):
return text_format.MessageToString(self)
cls.__str__ = __str__
def _AddUnicodeMethod(unused_message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __unicode__(self):
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
cls.__unicode__ = __unicode__
def _AddSetListenerMethod(cls):
"""Helper for _AddMessageMethods()."""
def SetListener(self, listener):
if listener is None:
self._listener = message_listener_mod.NullMessageListener()
else:
self._listener = listener
cls._SetListener = SetListener
def _BytesForNonRepeatedElement(value, field_number, field_type):
"""Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
"""
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value)
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type)
def _AddByteSizeMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ByteSize(self):
if not self._cached_byte_size_dirty:
return self._cached_byte_size
size = 0
for field_descriptor, field_value in self.ListFields():
size += field_descriptor._sizer(field_value)
self._cached_byte_size = size
self._cached_byte_size_dirty = False
self._listener_for_children.dirty = False
return size
cls.ByteSize = ByteSize
def _AddSerializeToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializeToString(self):
errors = []
if not self.IsInitialized():
raise message_mod.EncodeError(
'Message is missing required fields: ' +
','.join(self.FindInitializationErrors()))
return self.SerializePartialToString()
cls.SerializeToString = SerializeToString
def _AddSerializePartialToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializePartialToString(self):
out = StringIO()
self._InternalSerialize(out.write)
return out.getvalue()
cls.SerializePartialToString = SerializePartialToString
def InternalSerialize(self, write_bytes):
for field_descriptor, field_value in self.ListFields():
field_descriptor._encoder(write_bytes, field_value)
cls._InternalSerialize = InternalSerialize
def _AddMergeFromStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def MergeFromString(self, serialized):
length = len(serialized)
try:
if self._InternalParse(serialized, 0, length) != length:
raise message_mod.DecodeError('Unexpected end-group tag.')
except IndexError:
raise message_mod.DecodeError('Truncated message.')
except struct.error, e:
raise message_mod.DecodeError(e)
return length
cls.MergeFromString = MergeFromString
local_ReadTag = decoder.ReadTag
local_SkipField = decoder.SkipField
decoders_by_tag = cls._decoders_by_tag
def InternalParse(self, buffer, pos, end):
self._Modified()
field_dict = self._fields
while pos != end:
(tag_bytes, new_pos) = local_ReadTag(buffer, pos)
field_decoder = decoders_by_tag.get(tag_bytes)
if field_decoder is None:
new_pos = local_SkipField(buffer, new_pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos
else:
pos = field_decoder(buffer, new_pos, end, self, field_dict)
return pos
cls._InternalParse = InternalParse
def _AddIsInitializedMethod(message_descriptor, cls):
"""Adds the IsInitialized and FindInitializationError methods to the
protocol message class."""
required_fields = [field for field in message_descriptor.fields
if field.label == _FieldDescriptor.LABEL_REQUIRED]
def IsInitialized(self, errors=None):
"""Checks if all required fields of a message are set.
Args:
errors: A list which, if provided, will be populated with the field
paths of all missing required fields.
Returns:
True iff the specified message has all required fields set.
"""
for field in required_fields:
if (field not in self._fields or
(field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and
not self._fields[field]._is_present_in_parent)):
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
for field, value in self._fields.iteritems():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.label == _FieldDescriptor.LABEL_REPEATED:
for element in value:
if not element.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
elif value._is_present_in_parent and not value.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
return True
cls.IsInitialized = IsInitialized
def FindInitializationErrors(self):
"""Finds required fields which are not initialized.
Returns:
A list of strings. Each string is a path to an uninitialized field from
the top-level message, e.g. "foo.bar[5].baz".
"""
errors = []
for field in required_fields:
if not self.HasField(field.name):
errors.append(field.name)
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_extension:
name = "(%s)" % field.full_name
else:
name = field.name
if field.label == _FieldDescriptor.LABEL_REPEATED:
for i in xrange(len(value)):
element = value[i]
prefix = "%s[%d]." % (name, i)
sub_errors = element.FindInitializationErrors()
errors += [ prefix + error for error in sub_errors ]
else:
prefix = name + "."
sub_errors = value.FindInitializationErrors()
errors += [ prefix + error for error in sub_errors ]
return errors
cls.FindInitializationErrors = FindInitializationErrors
def _AddMergeFromMethod(cls):
LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED
CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class.")
assert msg is not self
self._Modified()
fields = self._fields
for field, value in msg._fields.iteritems():
if field.label == LABEL_REPEATED:
field_value = fields.get(field)
if field_value is None:
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
elif field.cpp_type == CPPTYPE_MESSAGE:
if value._is_present_in_parent:
field_value = fields.get(field)
if field_value is None:
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
else:
self._fields[field] = value
cls.MergeFrom = MergeFrom
def _AddMessageMethods(message_descriptor, cls):
"""Adds implementations of all Message methods to cls."""
_AddListFieldsMethod(message_descriptor, cls)
_AddHasFieldMethod(message_descriptor, cls)
_AddClearFieldMethod(message_descriptor, cls)
if message_descriptor.is_extendable:
_AddClearExtensionMethod(cls)
_AddHasExtensionMethod(cls)
_AddClearMethod(message_descriptor, cls)
_AddEqualsMethod(message_descriptor, cls)
_AddStrMethod(message_descriptor, cls)
_AddUnicodeMethod(message_descriptor, cls)
_AddSetListenerMethod(cls)
_AddByteSizeMethod(message_descriptor, cls)
_AddSerializeToStringMethod(message_descriptor, cls)
_AddSerializePartialToStringMethod(message_descriptor, cls)
_AddMergeFromStringMethod(message_descriptor, cls)
_AddIsInitializedMethod(message_descriptor, cls)
_AddMergeFromMethod(cls)
def _AddPrivateHelperMethods(cls):
"""Adds implementation of private helper methods to cls."""
def Modified(self):
"""Sets the _cached_byte_size_dirty bit to true,
and propagates this to our listener iff this was a state change.
"""
if not self._cached_byte_size_dirty:
self._cached_byte_size_dirty = True
self._listener_for_children.dirty = True
self._is_present_in_parent = True
self._listener.Modified()
cls._Modified = Modified
cls.SetInParent = Modified
class _Listener(object):
"""MessageListener implementation that a parent message registers with its
child message.
In order to support semantics like:
foo.bar.baz.qux = 23
assert foo.HasField('bar')
...child objects must have back references to their parents.
This helper class is at the heart of this support.
"""
def __init__(self, parent_message):
"""Args:
parent_message: The message whose _Modified() method we should call when
we receive Modified() messages.
"""
if isinstance(parent_message, weakref.ProxyType):
self._parent_message_weakref = parent_message
else:
self._parent_message_weakref = weakref.proxy(parent_message)
self.dirty = False
def Modified(self):
if self.dirty:
return
try:
self._parent_message_weakref._Modified()
except ReferenceError:
pass
class _ExtensionDict(object):
"""Dict-like container for supporting an indexable "Extensions"
field on proto instances.
Note that in all cases we expect extension handles to be
FieldDescriptors.
"""
def __init__(self, extended_message):
"""extended_message: Message instance for which we are the Extensions dict.
"""
self._extended_message = extended_message
def __getitem__(self, extension_handle):
"""Returns the current value of the given extension handle."""
_VerifyExtensionHandle(self._extended_message, extension_handle)
result = self._extended_message._fields.get(extension_handle)
if result is not None:
return result
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
result = extension_handle._default_constructor(self._extended_message)
elif extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
result = extension_handle.message_type._concrete_class()
try:
result._SetListener(self._extended_message._listener_for_children)
except ReferenceError:
pass
else:
return extension_handle.default_value
result = self._extended_message._fields.setdefault(
extension_handle, result)
return result
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
my_fields = self._extended_message.ListFields()
other_fields = other._extended_message.ListFields()
my_fields = [ field for field in my_fields if field.is_extension ]
other_fields = [ field for field in other_fields if field.is_extension ]
return my_fields == other_fields
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def __setitem__(self, extension_handle, value):
"""If extension_handle specifies a non-repeated, scalar extension
field, sets the value of that field.
"""
_VerifyExtensionHandle(self._extended_message, extension_handle)
if (extension_handle.label == _FieldDescriptor.LABEL_REPEATED or
extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE):
raise TypeError(
'Cannot assign to extension "%s" because it is a repeated or '
'composite type.' % extension_handle.full_name)
type_checker = type_checkers.GetTypeChecker(
extension_handle.cpp_type, extension_handle.type)
type_checker.CheckValue(value)
self._extended_message._fields[extension_handle] = value
self._extended_message._Modified()
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._extended_message._extensions_by_name.get(name, None)
|
[
"[email protected]"
] | |
66228a98b7aef124fd015c3823c8dd4f0b4d939d
|
a34a6861adabdffba0dec1bf9ba2d6b48c4564cb
|
/model.py
|
db2d48ca5676c1d6378ff82589047949cbfd1179
|
[] |
no_license
|
AotY/gumbel_softx_vae
|
d4095212117cdbdd71434fd47f51ae0aef42869f
|
f345efe797fb9adc00f5d4e288da80102f23850e
|
refs/heads/master
| 2020-04-02T03:31:28.336284 | 2018-10-21T05:11:36 | 2018-10-21T05:11:36 | 153,970,643 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,875 |
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2018 LeonTao
#
# Distributed under terms of the MIT license.
"""
"""
import torch
import torch.nn as nn
import torch.funtional as F
from gumbel_softmax import GumbelSoftmax
class GumbelVAE(nn.Module):
def __init__(self,
input_size=784,
latent_size,
category_size,
device):
super(GumbelVAE, self).__init__()
self.input_size = input_size
self.latent_size = latent_size
self.category_size = category_size
self.fc1 = nn.Linear(input_size, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, latent_size * category_size)
sefl.fc4 = nn.Linear(latent_size * category_size, 256)
self.fc5 = nn.Linear(256, 512)
self.fc6 = nn.Linear(512, input_sizse)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid(dim=2)
self.gumbel_softmax = GumbelSoftmax(dim=2,
device=device)
def encode(self, input):
h1 = self.relu(self.fc1(input))
h2 = self.relu(self.fc2(h1))
h3 = self.relu(self.fc3(h2))
return h3
def decode(self, encode_output):
h4 = self.relu(self.fc4(encode_output))
h5 = self.relu(self.fc5(h4))
output = self.sigmoid(self.fc6(h5))
return output
def forward(self, input, temperature):
encode_output = self.encode(input)
tmp = encode_output.view(encode_output.size(0),
self.latent_size,
self.category_size)
tmp = self.gumbel_softmax(tmp, temperature)
tmp = tmp.view(-1, slef.latent_size * self.category_size)
decode_output = self.decode(tmp_softmax)
return decode_output, F.softmax(encode_output)
|
[
"[email protected]"
] | |
4370fadd7890d92918d28af2040293ff1d87db32
|
0c0a6a41b5bb15e74f2e938218a971d6036dfd0d
|
/drf26/manage.py
|
15cab87dfcd5367687a68ba0fa354a330b9f5615
|
[] |
no_license
|
kamal0072/API-s-based-on-drf-and-python
|
54067cd1b364a50ace2c3f4b35cccaafc977d39f
|
b31299ff2bc32f836c85f402dbe2cfa34f34dd69
|
refs/heads/master
| 2023-03-25T16:51:36.511505 | 2021-03-24T16:27:46 | 2021-03-24T16:27:46 | 351,147,386 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 661 |
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'drf26.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
393e6e059bafb801328b3a9ff0a87ac4bfd2eba2
|
e92a3d0fb77120be99de6040cb6cd34eda0a95f4
|
/Работа с фнукциями, система модулей/code/all_function_6.py
|
1640864694684c8fd29dc41e4038b8f29151ff46
|
[] |
no_license
|
Python18Academy/python_first_level
|
495f85631f5afc737aa156ef8ca0ea307340c322
|
9ce490da3108474b135a17086f4d11f2a3bbbe55
|
refs/heads/master
| 2023-09-04T17:00:36.920987 | 2021-03-31T18:44:37 | 2021-03-31T18:44:37 | 331,934,029 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 89 |
py
|
from pirog import *
make_pirog('большой', 7)
make_pirog('маленький', 3)
|
[
"[email protected]"
] | |
c4bc04bf5d469f3e5315f2941b33cfd2a704a7ed
|
35ab93904c03c1494b470fe60ff17a6e3b8858e4
|
/tests/mocks/committees.py
|
56d14dbb8a5c2c603ea525703c8a13ac295bf0d4
|
[
"MIT"
] |
permissive
|
alefbt/knesset-data-pipelines
|
cb6220fc96c95f50925e4b99d8682760729cf067
|
ed743fb4c84ce9e9ae0b935d686d05673d868416
|
refs/heads/master
| 2021-06-22T20:06:17.254073 | 2017-08-13T17:08:40 | 2017-08-13T17:08:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,195 |
py
|
from datapackage_pipelines_knesset.committees.processors.download_committee_meeting_protocols import DownloadCommitteeMeetingProtocolsProcessor
from datapackage_pipelines_knesset.committees.processors.parse_committee_meeting_protocols import ParseCommitteeMeetingProtocolsProcessor
from datapackage_pipelines_knesset.committees.processors.committee_meeting_protocols_update_db import CommitteeMeetingProtocolsUpdateDbProcessor
import os
from datapackage_pipelines_knesset.common.db import get_session
class MockDownloadCommitteeMeetingProtocols(DownloadCommitteeMeetingProtocolsProcessor):
def _get_session(self):
return get_session(connection_string="sqlite://")
def _reuqests_get(self, url):
if url == "http://fs.knesset.gov.il//20/Committees/20_ptv_389210.doc":
filename = "20_ptv_389210.doc"
elif url == "http://knesset.gov.il/protocols/data/rtf/knesset/2007-12-27.rtf":
filename = "2007-12-27.rtf"
elif url == "http://fs.knesset.gov.il//20/Committees/20_ptv_387483.doc":
filename = "20_ptv_387483.doc"
else:
raise Exception("unknown url: {}".format(url))
filename = os.path.join(os.path.dirname(__file__), filename)
if not os.path.exists(filename):
res = super(MockDownloadCommitteeMeetingProtocols, self)._reuqests_get(url)
if res.status_code != 200:
with open(filename+".status_code", 'w') as f:
f.write(str(res.status_code))
with open(filename, 'wb') as f:
f.write(res.content)
with open(filename, "rb") as f:
content = f.read()
if os.path.exists(filename+".status_code"):
with open(filename+".status_code") as f:
status_code = int(f.read())
else:
status_code = 200
return type("MockResponse", (object,), {"status_code": status_code,
"content": content})()
class MockParseCommitteeMeetingProtocols(ParseCommitteeMeetingProtocolsProcessor):
pass
class MockCommitteeMeetingProtocolsUpdateDb(CommitteeMeetingProtocolsUpdateDbProcessor):
pass
|
[
"[email protected]"
] | |
5b8612f3e472db95cd9fdaa093ba14d6411ec101
|
dd89a85bbefa12a6c8e8b66ffc84c08767f0e841
|
/backend/task_profile/migrations/0001_initial.py
|
c7740497962e5be06a671658b9c15cad0b155cb6
|
[] |
no_license
|
crowdbotics-apps/sample-27023
|
ac5f358cba9432b02080d3f3177efd23d35a08ed
|
e2e1f0d918e6cc47a87bfd7f318f1b6797f19d2d
|
refs/heads/master
| 2023-04-30T14:24:59.449141 | 2021-05-21T03:01:21 | 2021-05-21T03:01:21 | 369,397,318 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,239 |
py
|
# Generated by Django 2.2.20 on 2021-05-21 03:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TaskerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile_number', models.CharField(max_length=20)),
('photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('last_login', models.DateTimeField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('city', models.CharField(blank=True, max_length=50, null=True)),
('vehicle', models.CharField(blank=True, max_length=50, null=True)),
('closing_message', models.TextField(blank=True, null=True)),
('work_area_radius', models.FloatField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='taskerprofile_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=20)),
('message', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('user', models.ManyToManyField(related_name='notification_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='InviteCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=20)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invitecode_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='CustomerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile_number', models.CharField(max_length=20)),
('photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('last_login', models.DateTimeField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customerprofile_user', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"[email protected]"
] | |
29c050fb32b9bd65213c7fe6ed14f05bcdb546d8
|
4914e1e18cabd3db104386b13a48e3371f6c4d25
|
/tov/NM_expansion.py
|
3baaa5e68ef7945ac5e87b48e503e2f26043444a
|
[] |
no_license
|
sotzee/ns
|
592b21c013657ca202ab1138d92c32960d7e2170
|
70faa8e97560ec4072e5f0f697e3f2471f1303f7
|
refs/heads/master
| 2021-06-19T15:51:03.271980 | 2019-06-10T14:16:21 | 2019-06-10T14:16:21 | 115,557,527 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,016 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 17:36:17 2018
@author: sotzee
"""
from scipy.misc import derivative
from scipy.constants import c,G,e
from scipy.interpolate import interp1d
from unitconvert import toMev4#,toMevfm
import numpy as np
import scipy.optimize as opt
#import matplotlib.pyplot as plt
dlnx_cs2=1e-6
def energy_per_baryon_sym(n,n_s,m,T,abcd_sym):
u=n/n_s
a_sym,b_sym,c_sym,d_sym=abcd_sym
return m+T*(u**(2./3)+a_sym*u+b_sym*u**(4./3)+c_sym*u**(5./3)+d_sym*u**2)
def energy_per_baryon_sym_jac(n,n_s,T,abcd_sym):
u=n/n_s
a_sym,b_sym,c_sym,d_sym=abcd_sym
return T*(2.*u**(-1./3)+3*a_sym+4.*b_sym*u**(1./3)+5.*c_sym*u**(2./3)+6.*d_sym*u)/3
def energy_per_baryon_pnm(n,n_s,m,T,abcd_pnm):
u=n/n_s
a_pnm,b_pnm,c_pnm,d_pnm=abcd_pnm
return m+T*((2*u)**(2./3)+a_pnm*u+b_pnm*u**(4./3)+c_pnm*u**(5./3)+d_pnm*u**2)
def energy_per_baryon_pnm_jac(n,n_s,T,abcd_pnm):
u=n/n_s
a_pnm,b_pnm,c_pnm,d_pnm=abcd_pnm
return T*(4.*(2*u)**(-1./3)+3*a_pnm+4.*b_pnm*u**(1./3)+5.*c_pnm*u**(2./3)+6.*d_pnm*u)/3
def get_parameters_tmp(parameter_array,T,ELKQ_array): #where E0,L0,K0,Q0 is for symmetric nuclear matter, and S,L,K,Q are for symmtry energy
matrix=np.array([[120,-38,6,-1],[-270,90,-15,3],[216,-72,12,-3],[-60,20,-3,1]])
#print(matrix,ELKQ_array,np.dot(matrix,ELKQ_array))
return parameter_array+np.dot(matrix,ELKQ_array)/(6*T)
def get_parameters_sym(T,ELKQ_array): #S,L,K,Q are for PNM(pure neutron matter).
parameter_array=np.array([-4,6,-4,1])
return get_parameters_tmp(parameter_array,T,ELKQ_array)
def get_parameters_pnm(T,ELKQ_array): #S,L,K,Q are for PNM(pure neutron matter).
parameter_array=np.array([-4,6,-4,1])*2**(2./3)
return get_parameters_tmp(parameter_array,T,ELKQ_array)
def get_baryon_density_u_max(abcd,defaut_u_max):
coeff=[54*abcd[3],40*abcd[2],28*abcd[1],18*abcd[0],10*2**(2./3)]
roots=np.roots(coeff)
roots_real=roots.real[np.isreal(roots)]
if(len(roots_real[roots_real>0])==0):
return defaut_u_max
else:
return np.min([roots_real[roots_real>0].min()**3,defaut_u_max])
def get_baryon_density_u_max_margueron(abcd,defaut_u_max):
coeff=[54*abcd[3],40*abcd[2],28*abcd[1],18*abcd[0],10*2**(2./3)]
roots=np.roots(coeff)
roots_real=roots.real[np.isreal(roots)]
if(len(roots_real[roots_real>0])==0):
return defaut_u_max
else:
return np.min([roots_real[roots_real>0].min()**3,defaut_u_max])
def get_eos_array(u_min,u_max,baryon_density_sat,m,T,abcd):
baryon_density=baryon_density_sat*10**np.linspace(np.log10(u_min),np.log10(u_max),501)
energy_dnnsity=np.concatenate(([0],baryon_density*energy_per_baryon_pnm(baryon_density,baryon_density_sat,m,T,abcd),[10000]))
pressure=np.concatenate(([0],baryon_density**2/baryon_density_sat*energy_per_baryon_pnm_jac(baryon_density,baryon_density_sat,T,abcd),[10000]))
baryon_density=np.concatenate(([0],baryon_density,[1000*baryon_density_sat]))
result=np.array([baryon_density,energy_dnnsity,pressure])
#plt.plot(result[0],energy_per_baryon_pnm(baryon_density,baryon_density_sat,m,T,abcd))
#plt.plot(result[0],result[1])
#plt.plot(result[0][:-1],result[2][:-1])
return result,result[:,int(len(baryon_density)/2)]
def matching_eos(trial_pressure,eos_density1,eos_density2):
return eos_density1(trial_pressure)-eos_density2(trial_pressure)
def calculate_matching_pressure(trial_pressure,Preset_tol,eos_density1,eos_density2):
p_matching=opt.newton(matching_eos,trial_pressure,tol=Preset_tol,args=(eos_density1,eos_density2))
return p_matching
class EOS_EXPANSION_PNM(object):
def __init__(self,args,defaut_u_min=1e-8,defaut_u_max=12):
self.baryon_density_s,self.m,self.E_n,self.L_n,\
self.K_n,self.Q_n=args
self.args=args
self.ELKQ_array=np.array(args[2:])
self.T=.3*(1.5*np.pi**2*toMev4(self.baryon_density_s,'mevfm3'))**(2./3)/self.m
self.abcd_array=get_parameters_pnm(self.T,self.ELKQ_array)
self.u_max=get_baryon_density_u_max(self.abcd_array,defaut_u_max)
self.u_min=defaut_u_min
self.eos_array,self.sol_saturation=get_eos_array(self.u_min,self.u_max,self.baryon_density_s,self.m,self.T,self.abcd_array)
self.pressure_s=self.sol_saturation[2]
self.density_s=self.sol_saturation[1]
self.unit_mass=c**4/(G**3*self.density_s*1e51*e)**0.5
self.unit_radius=c**2/(G*self.density_s*1e51*e)**0.5
self.unit_N=self.unit_radius**3*self.baryon_density_s*1e45
self.eosPressure_frombaryon = interp1d(self.eos_array[0],self.eos_array[2], kind='linear')
self.eosPressure = interp1d(self.eos_array[1],self.eos_array[2], kind='linear')
self.eosDensity = interp1d(self.eos_array[2],self.eos_array[1], kind='linear')
self.eosBaryonDensity = interp1d(self.eos_array[2],self.eos_array[0], kind='linear')
def __getstate__(self):
state = self.__dict__.copy()
for dict_intepolation in ['eosPressure_frombaryon','eosPressure','eosDensity','eosBaryonDensity']:
del state[dict_intepolation]
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.eosPressure_frombaryon = interp1d(self.eos_array[0],self.eos_array[2], kind='linear')
self.eosPressure = interp1d(self.eos_array[1],self.eos_array[2], kind='linear')
self.eosDensity = interp1d(self.eos_array[2],self.eos_array[1], kind='linear')
self.eosBaryonDensity = interp1d(self.eos_array[2],self.eos_array[0], kind='linear')
def eosCs2(self,pressure):
return 1.0/derivative(self.eosDensity,pressure,dx=pressure*dlnx_cs2)
def eosChempo(self,pressure):
return (pressure+self.eosDensity(pressure))/self.eosBaryonDensity(pressure)
class EOS_CSS(object):
def __init__(self,args):
self.density0,self.pressure0,self.baryondensity_trans,self.cs2 = args
self.B=(self.density0-self.pressure0/self.cs2)/(1.0+1.0/self.cs2)
def eosDensity(self,pressure):
density = (pressure-self.pressure0)/self.cs2+self.density0
return np.where(density>0,density,0)
def eosBaryonDensity(self,pressure):
baryondensity_trans = self.baryondensity_trans*((pressure+self.B)/(self.pressure0+self.B))**(1.0/(1.0+self.cs2))
return np.where(baryondensity_trans>0,baryondensity_trans,0)
def eosCs2(self,pressure):
return self.cs2
def eosChempo(self,pressure):
return (pressure+self.eosDensity(pressure))/self.eosBaryonDensity(pressure)
class EOS_PnmCSS(object):
def __init__(self,args,cs2=1):
self.eosPNM=EOS_EXPANSION_PNM(args)
self.baryon_density_s=self.eosPNM.baryon_density_s
self.pressure_s=self.eosPNM.pressure_s
self.density_s=self.eosPNM.density_s
self.unit_mass=self.eosPNM.unit_mass
self.unit_radius=self.eosPNM.unit_radius
self.unit_N=self.eosPNM.unit_N
self.baryondensity_trans=self.eosPNM.u_max*self.eosPNM.baryon_density_s*0.9999999
self.pressure_trans=self.eosPNM.eosPressure_frombaryon(self.baryondensity_trans)
self.density_trans=self.eosPNM.eosDensity(self.pressure_trans)
self.cs2=cs2
args_eosCSS=[self.density_trans,self.pressure_trans\
,self.baryondensity_trans,self.cs2]
self.eosCSS=EOS_CSS(args_eosCSS)
def __getstate__(self):
state_PNM=self.eosPNM.__getstate__()
state = self.__dict__.copy()
return (state,state_PNM)
def __setstate__(self, state_):
state,state_PNM=state_
self.__dict__.update(state)
self.eosPNM.__setstate__(state_PNM)
def setMaxmass(self,result_maxmaxmass):
self.pc_max,self.mass_max,self.cs2_max=result_maxmaxmass
def eosDensity(self,pressure):
return np.where(pressure<self.pressure_trans,self.eosPNM.eosDensity(pressure),self.eosCSS.eosDensity(pressure))
def eosBaryonDensity(self,pressure):
return np.where(pressure<self.pressure_trans,self.eosPNM.eosBaryonDensity(pressure),self.eosCSS.eosBaryonDensity(pressure))
def eosCs2(self,pressure):
return np.where(pressure<self.pressure_trans,self.eosPNM.eosCs2(pressure),self.cs2)
def eosChempo(self,pressure):
return (pressure+self.eosDensity(pressure))/self.eosBaryonDensity(pressure)
Preset_tol_matching=1e-4
class EOS_Sly4_match_PnmCSS(object):
def __init__(self,eos_low,eos_high):
self.eos_low=eos_low
self.eos_high=eos_high
flag=True
for trial_pressure in [0.5,0.6,0.4,0.7,0.3,0.8,0.2,0.9,0.1,0.01,0.001]:
if(flag==True):
flag=False
try:
self.p_match=calculate_matching_pressure(trial_pressure,Preset_tol_matching,eos_low.eosDensity,eos_high.eosDensity)
except:
flag=True
else:
break
if(flag):
if(eos_high.eosPNM.u_max<1):
self.p_match=0
else:
#print('Matching of low density EoS %s and hight density %s failed'%(self.eos_low,self.eos_high))
print self.eos_high.eosPNM.args
if(self.p_match>100):
print('matching at exceptional high pressure, p_match=%f'%(self.p_match))
self.baryon_density_s=self.eos_high.baryon_density_s
self.pressure_s=self.eos_high.pressure_s
self.density_s=self.eos_high.density_s
self.unit_mass=self.eos_high.unit_mass
self.unit_radius=self.eos_high.unit_radius
self.unit_N=self.eos_high.unit_N
def __getstate__(self):
state_high=self.eos_high.__getstate__()
state = self.__dict__.copy()
return (state,state_high)
def __setstate__(self, state_):
state,state_high=state_
self.__dict__.update(state)
self.eos_high.__setstate__(state_high)
def setMaxmass(self,result_maxmaxmass):
self.pc_max,self.mass_max,self.cs2_max=result_maxmaxmass
def eosDensity(self,pressure):
return np.where(pressure<self.p_match,self.eos_low.eosDensity(pressure),self.eos_high.eosDensity(pressure))
def eosBaryonDensity(self,pressure):
return np.where(pressure<self.p_match,self.eos_low.eosBaryonDensity(pressure),self.eos_high.eosBaryonDensity(pressure))
def eosCs2(self,pressure):
return np.where(pressure<self.p_match,self.eos_low.eosCs2(pressure),self.eos_high.eosCs2(pressure))
def eosChempo(self,pressure):
return (pressure+self.eosDensity(pressure))/self.eosBaryonDensity(pressure)
import cPickle
import os
path = "./"
dir_name='Lambda_PNM_calculation_parallel'
error_log=path+dir_name+'/error.log'
if __name__ == '__main__':
try:
os.stat(path+dir_name)
except:
os.mkdir(path+dir_name)
N1=6
N2=15
N3=21
n_s=0.16
m=939
E_pnm = 32-16
L_pnm = np.linspace(30,70,N1)
K_pnm = np.linspace(50,400,N2)
Q_pnm = np.linspace(-400,600,N3)
Preset_Pressure_final=1e-8
Preset_rtol=1e-4
args=[]
from eos_class import EOS_BPS
eos_low=EOS_BPS()
eos_high=[]
eos =[]
for i in range(len(L_pnm)):
for j in range(len(K_pnm)):
for k in range(len(Q_pnm)):
args.append([n_s,m,E_pnm,L_pnm[i],K_pnm[j],Q_pnm[k]])
eos_high.append(EOS_PnmCSS(args[-1]))
#print args[-1]
eos.append(EOS_Sly4_match_PnmCSS(eos_low,eos_high[-1]))
args=np.reshape(np.array(args),(N1,N2,N3,6))
args_flat=np.reshape(np.array(args),(N1*N2*N3,6))
eos =np.reshape(np.array(eos),(N1,N2,N3))
eos_flat=np.array(eos).flatten()
f_file=open(path+dir_name+'/Lambda_PNM_calculation_args.dat','wb')
cPickle.dump(args,f_file)
f_file.close()
f_file=open(path+dir_name+'/Lambda_PNM_calculation_eos.dat','wb')
cPickle.dump(eos,f_file)
f_file.close()
print('%d EoS built with shape (L_n,K_n,Q_n)%s.'%(len(args_flat),np.shape(eos)))
from Lambda_hadronic_calculation import Calculation_maxmass,Calculation_mass_beta_Lambda,Calculation_onepointfour,Calculation_chirpmass_Lambdabeta6
from Parallel_process import main_parallel
f_maxmass_result=path+dir_name+'/Lambda_PNM_calculation_maxmass.dat'
maxmass_result=main_parallel(Calculation_maxmass,eos_flat,f_maxmass_result,error_log)
print('Maximum mass configuration of %d EoS calculated.' %(len(eos_flat)))
logic_maxmass=maxmass_result[:,1]>=2
print('Maximum mass constrain of %d EoS calculated, %d EoS satisfied.' %(len(eos_flat),len(eos_flat[logic_maxmass])))
logic_causality=maxmass_result[:,2]<1
print('Causality constrain of %d EoS calculated, %d EoS satisfied.' %(len(eos_flat),len(eos_flat[logic_causality])))
logic=np.logical_and(logic_maxmass,logic_causality)
print('Maximum mass and causality constrain of %d EoS calculated, %d EoS satisfied.' %(len(eos_flat),len(eos_flat[logic])))
for i in range(len(eos_flat)):
eos_flat[i].setMaxmass(maxmass_result[i])
f_onepointfour_result=path+dir_name+'/Lambda_PNM_calculation_onepointfour.dat'
Properity_onepointfour=main_parallel(Calculation_onepointfour,eos_flat[logic],f_onepointfour_result,error_log)
print('properities of 1.4 M_sun star of %d EoS calculated.' %(len(eos_flat[logic])))
f_mass_beta_Lambda_result=path+dir_name+'/Lambda_PNM_calculation_mass_beta_Lambda.dat'
mass_beta_Lambda_result=main_parallel(Calculation_mass_beta_Lambda,eos_flat[logic],f_mass_beta_Lambda_result,error_log)
print('mass, compactness and tidal Lambda of %d EoS calculated.' %(len(eos_flat[logic])))
f_chirpmass_Lambdabeta6_result=path+dir_name+'/Lambda_hadronic_calculation_chirpmass_Lambdabeta6.dat'
chirp_q_Lambdabeta6_Lambda1Lambda2=main_parallel(Calculation_chirpmass_Lambdabeta6,np.concatenate((mass_beta_Lambda_result,np.tile(Properity_onepointfour[:,3],(40,1,1)).transpose()),axis=1),f_chirpmass_Lambdabeta6_result,error_log)
|
[
"[email protected]"
] | |
6299b8252a37bfdcec01408fc9c1f999e384a38e
|
27fc04a95b0d268adef4d4497c27ea9ae295d8a4
|
/ch09/6.sub.py
|
5129aa1ce0d9395a17421f1d77d219755a2a58f0
|
[] |
no_license
|
s-kyum/Python
|
2b35b333557db0698a3fd305d550baaa5304f206
|
e5b31036acd2bfb79f98ff02d59096a2429eb41f
|
refs/heads/master
| 2023-07-09T18:45:26.179057 | 2021-08-23T03:07:57 | 2021-08-23T03:07:57 | 378,803,615 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 115 |
py
|
import re
p = re.compile('blue|red')
s= p.sub('A', 'blue socks and red shoes') #sub() - B를 A로 변경
print(s)
|
[
"[email protected]"
] | |
aae4b4bc0babf621630c5ce16a27e2c6b8abf57a
|
e8a48749014f372633de65d79bfa26a3ad743d89
|
/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py
|
ba65525ae00b125084e843d2eec2fea2a3ed915e
|
[
"Apache-2.0"
] |
permissive
|
pvcastro/pytorch-pretrained-BERT
|
183b7291972c8d8c66c995647df66c1fe439a763
|
49cd736a288a315d741e5c337790effa4c9fa689
|
refs/heads/master
| 2022-08-19T08:55:16.332585 | 2022-06-30T16:11:08 | 2022-06-30T16:11:08 | 168,367,637 | 1 | 0 |
Apache-2.0
| 2019-01-30T15:39:42 | 2019-01-30T15:39:41 | null |
UTF-8
|
Python
| false | false | 37,964 |
py
|
# coding=utf-8
# Copyright 2022 HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Classes to support TF Vision-Encoder-Text-Decoder architectures"""
import tempfile
import warnings
from typing import Optional
import tensorflow as tf
from ...configuration_utils import PretrainedConfig
from ...modeling_tf_outputs import TFBaseModelOutput, TFSeq2SeqLMOutput
from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFPreTrainedModel, get_initializer, unpack_inputs
from ...tf_utils import shape_list
from ...utils import (
DUMMY_INPUTS,
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ..auto.configuration_auto import AutoConfig
from ..auto.modeling_tf_auto import TFAutoModel, TFAutoModelForCausalLM
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "VisionEncoderDecoderConfig"
DEPRECATION_WARNING = (
"Version v4.17.0 introduces a better way to train encoder-decoder models by computing the loss inside the"
" encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if"
" fine-tuning a model trained with versions anterior to 4.17.0. The decoder_input_ids are now created based on the"
" labels, no need to pass them yourself anymore."
)
VISION_ENCODER_DECODER_START_DOCSTRING = r"""
This class can be used to initialize an image-to-text-sequence model with any pretrained vision autoencoding model
as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via
[`~TFAutoModel.from_pretrained`] function and the decoder is loaded via [`~TFAutoModelForCausalLM.from_pretrained`]
function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
generative task, like image captioning.
The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
Zhou, Wei Li, Peter J. Liu.
Additionally, in [TrOCR: Transformer-based Optical Character Recognition with Pre-trained
Models](https://arxiv.org/abs/2109.10282) it is shown how leveraging large pretrained vision models for optical
character recognition (OCR) yields a significant performance improvement.
After such a Vision-Encoder-Text-Decoder model has been trained/fine-tuned, it can be saved/loaded just like any
other models (see the examples for more information).
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
Parameters:
config ([`VisionEncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
VISION_ENCODER_DECODER_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using the vision's model's feature extractor. For example, using
[`ViTFeatureExtractor`]. See [`ViTFeatureExtractor.__call__`] for details.
decoder_input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
Provide for sequence to sequence training to the decoder. Indices can be obtained using
[`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
decoder_attention_mask (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*):
This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` (`tf.Tensor` of shape `({0}, hidden_size)`) is a tensor of hidden-states at the output
of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(tf.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `({0})`.
decoder_inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
labels (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
kwargs: (*optional*) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
- Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
- With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function.
"""
# Copied from transformers.models.encoder_decoder.modeling_tf_encoder_decoder.shift_tokens_right
def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
if pad_token_id is None:
raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
if decoder_start_token_id is None:
raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = tf.where(
shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
)
if tf.executing_eagerly():
# "Verify that `labels` has only positive values and -100"
assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
# Make sure the assertion op is called by wrapping the result in an identity no-op
with tf.control_dependencies([assert_gte0]):
shifted_input_ids = tf.identity(shifted_input_ids)
return shifted_input_ids
@add_start_docstrings(VISION_ENCODER_DECODER_START_DOCSTRING)
class TFVisionEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLoss):
r"""
[`TFVisionEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture
with one of the base vision model classes of the library as encoder and another one of the base model classes as
decoder when created with the [`~TFAutoModel.from_pretrained`] class method for the encoder and
[`~TFAutoModelForCausalLM.from_pretrained`] class method for the decoder.
"""
config_class = VisionEncoderDecoderConfig
base_model_prefix = "vision_encoder_decoder"
load_weight_prefix = "tf_vision_encoder_decoder_model"
main_input_name = "pixel_values"
def __init__(
self,
config: Optional[PretrainedConfig] = None,
encoder: Optional[TFPreTrainedModel] = None,
decoder: Optional[TFPreTrainedModel] = None,
):
if config is None and (encoder is None or decoder is None):
raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
if config is None:
config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
else:
if not isinstance(config, self.config_class):
raise ValueError(f"config: {config} has to be of type {self.config_class}")
if config.decoder.cross_attention_hidden_size is not None:
if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
raise ValueError(
"If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
" `config.encoder.hidden_size`."
)
# initialize with config
super().__init__(config)
if encoder is None:
encoder = TFAutoModel.from_config(config.encoder, name="encoder")
if decoder is None:
decoder = TFAutoModelForCausalLM.from_config(config.decoder, name="decoder")
self.encoder = encoder
self.decoder = decoder
if self.encoder.config.to_dict() != self.config.encoder.to_dict():
logger.warning(
f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
f" {self.config.encoder}"
)
if self.decoder.config.to_dict() != self.config.decoder.to_dict():
logger.warning(
f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
f" {self.config.decoder}"
)
# make sure that the individual model's config refers to the shared config
# so that the updates to the config will be synced
self.encoder.config = self.config.encoder
self.decoder.config = self.config.decoder
# encoder outputs might need to be projected to different dimension for decoder
if (
self.encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
self.enc_to_dec_proj = tf.keras.layers.Dense(
units=self.decoder.config.hidden_size,
kernel_initializer=get_initializer(config.encoder.initializer_range),
name="enc_to_dec_proj",
)
if self.encoder.get_output_embeddings() is not None:
raise ValueError(
f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
`Dict[str, tf.Tensor]`: The dummy inputs.
"""
decoder_input_ids = tf.constant(DUMMY_INPUTS)
batch_size, seq_len = decoder_input_ids.shape
VISION_DUMMY_INPUTS = tf.random.uniform(
shape=(
batch_size,
self.config.encoder.num_channels,
self.config.encoder.image_size,
self.config.encoder.image_size,
),
dtype=tf.float32,
)
pixel_values = tf.constant(VISION_DUMMY_INPUTS)
# Add `decoder_input_ids` because `self.decoder` requires it.
dummy = {"pixel_values": pixel_values, "decoder_input_ids": decoder_input_ids}
return dummy
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def get_input_embeddings(self):
return self.encoder.get_input_embeddings()
def get_output_embeddings(self):
return self.decoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
return self.decoder.set_output_embeddings(new_embeddings)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Initializing `TFVisionEncoderDecoderModel` from a pytorch checkpoint is not supported currently.
If there are only pytorch checkpoints for a particular encoder-decoder model, a workaround is:
```python
>>> # a workaround to load from pytorch checkpoint
>>> _model = VisionEncoderDecoderModel.from_pretrained("ydshieh/vit-gpt2-coco-en")
>>> _model.encoder.save_pretrained("./encoder")
>>> _model.decoder.save_pretrained("./decoder")
>>> model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
... "./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True
... )
>>> # This is only for copying some specific attributes of this particular model.
>>> model.config = _model.config
```
Example:
```python
>>> from transformers import TFVisionEncoderDecoderModel, ViTFeatureExtractor, GPT2Tokenizer
>>> from PIL import Image
>>> import requests
>>> feature_extractor = ViTFeatureExtractor.from_pretrained("ydshieh/vit-gpt2-coco-en")
>>> decoder_tokenizer = GPT2Tokenizer.from_pretrained("ydshieh/vit-gpt2-coco-en")
>>> model = TFVisionEncoderDecoderModel.from_pretrained("ydshieh/vit-gpt2-coco-en")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> img = Image.open(requests.get(url, stream=True).raw)
>>> pixel_values = feature_extractor(images=img, return_tensors="tf").pixel_values # Batch size 1
>>> output_ids = model.generate(
... pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True
... ).sequences
>>> preds = decoder_tokenizer.batch_decode(output_ids, skip_special_tokens=True)
>>> preds = [pred.strip() for pred in preds]
>>> assert preds == ["a cat laying on top of a couch next to another cat"]
```"""
from_pt = kwargs.pop("from_pt", False)
if from_pt:
raise ValueError(
"Initializing `TFVisionEncoderDecoderModel` from a pytorch checkpoint is not supported currently. Use"
" a tensorflow checkpoint instead. If only the pytorch checkpoints are available, create the encoder"
" and decoder models separately, and use them to initialize `TFVisionEncoderDecoderModel`. Check"
" `TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained()` for more details."
)
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
@classmethod
def from_encoder_decoder_pretrained(
cls,
encoder_pretrained_model_name_or_path: str = None,
decoder_pretrained_model_name_or_path: str = None,
*model_args,
**kwargs
) -> TFPreTrainedModel:
r"""
Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
checkpoints.
Params:
encoder_pretrained_model_name_or_path (`str`, *optional*):
Information necessary to initiate the encoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. An
example is `google/vit-base-patch16-224-in21k`.
- A path to a *directory* containing model weights saved using
[`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case,
`encoder_from_pt` should be set to `True`.
decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to *None*):
Information necessary to initiate the decoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case,
`decoder_from_pt` should be set to `True`.
model_args (remaining positional arguments, *optional*):
All remaning positional arguments will be passed to the underlying model's `__init__` method.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
- To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import TFVisionEncoderDecoderModel
>>> # initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized
>>> model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
... "google/vit-base-patch16-224-in21k", "bert-base-uncased"
... )
>>> # saving model after fine-tuning
>>> model.save_pretrained("./vit-bert")
>>> # load fine-tuned model
>>> model = TFVisionEncoderDecoderModel.from_pretrained("./vit-bert")
```"""
kwargs_encoder = {
argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
# remove encoder, decoder kwargs from kwargs
for key in kwargs_encoder.keys():
del kwargs["encoder_" + key]
for key in kwargs_decoder.keys():
del kwargs["decoder_" + key]
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
encoder = kwargs_encoder.pop("model", None)
if encoder is None:
if encoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_encoder:
encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path)
if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
logger.info(
f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
"from a decoder model. Cross-attention and casual mask are disabled."
)
encoder_config.is_decoder = False
encoder_config.add_cross_attention = False
kwargs_encoder["config"] = encoder_config
kwargs_encoder["name"] = "encoder"
kwargs_encoder["load_weight_prefix"] = cls.load_weight_prefix
encoder = TFAutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
# This is necessary to make `from_pretrained` following `save_pretrained` work correctly
if kwargs_encoder.get("from_pt", None):
del kwargs_encoder["from_pt"]
with tempfile.TemporaryDirectory() as tmp_dirname:
encoder.save_pretrained(tmp_dirname)
del encoder
encoder = TFAutoModel.from_pretrained(tmp_dirname, *model_args, **kwargs_encoder)
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
if decoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_decoder:
decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path)
if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
logger.info(
f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
)
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
kwargs_decoder["config"] = decoder_config
if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
logger.warning(
f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
"make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
"passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
"`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
)
kwargs_decoder["name"] = "decoder"
kwargs_decoder["load_weight_prefix"] = cls.load_weight_prefix
decoder = TFAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
# This is necessary to make `from_pretrained` following `save_pretrained` work correctly
if kwargs_decoder.get("from_pt", None):
del kwargs_decoder["from_pt"]
with tempfile.TemporaryDirectory() as tmp_dirname:
decoder.save_pretrained(tmp_dirname)
del decoder
decoder = TFAutoModelForCausalLM.from_pretrained(tmp_dirname, **kwargs_decoder)
# Make sure these 2 `tf.keras.Model` have fixed names so `from_pretrained` could load model weights correctly.
if encoder.name != "encoder":
raise ValueError("encoder model must be created with the name `encoder`.")
if decoder.name != "decoder":
raise ValueError("decoder model must be created with the name `decoder`.")
# instantiate config with corresponding kwargs
config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
return cls(encoder=encoder, decoder=decoder, config=config)
@unpack_inputs
@add_start_docstrings_to_model_forward(
VISION_ENCODER_DECODER_INPUTS_DOCSTRING.format("batch_size, sequence_length")
)
@replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
pixel_values=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
Returns:
Examples:
```python
>>> from transformers import AutoFeatureExtractor, AutoTokenizer, TFVisionEncoderDecoderModel
>>> from PIL import Image
>>> import requests
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k")
>>> decoder_tokenizer = AutoTokenizer.from_pretrained("gpt2")
>>> # initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
>>> model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
... "google/vit-base-patch16-224-in21k", "gpt2"
... )
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> img = Image.open(requests.get(url, stream=True).raw)
>>> # forward
>>> pixel_values = feature_extractor(images=img, return_tensors="tf").pixel_values # Batch size 1
>>> decoder_input_ids = decoder_tokenizer("Linda Davis", return_tensors="tf").input_ids # Batch size 1
>>> outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids)
>>> # training
>>> outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids, labels=decoder_input_ids)
>>> loss, logits = outputs.loss, outputs.logits
>>> # save and load from pretrained
>>> model.save_pretrained("vit-gpt2")
>>> model = TFVisionEncoderDecoderModel.from_pretrained("vit-gpt2")
>>> # generation
>>> generated = model.generate(pixel_values, decoder_start_token_id=model.config.decoder.bos_token_id)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
# Let the user be responsible for the expected format.
if encoder_outputs is not None:
if return_dict and not isinstance(encoder_outputs, ModelOutput):
raise ValueError(
"If `return_dict=True` and `encoder_outputs` is provided, it should be an instance of "
f"`ModelOutput`. Got an instance {type(encoder_outputs)} for `encoder_outputs`."
)
if encoder_outputs is None:
encoder_inputs = {
"input_ids": pixel_values,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"return_dict": return_dict,
"training": training,
}
# Add arguments to encoder from `kwargs_encoder`
encoder_inputs.update(kwargs_encoder)
if "input_ids" in encoder_inputs:
encoder_inputs["pixel_values"] = encoder_inputs.pop("input_ids")
if encoder_inputs["pixel_values"] is None:
raise ValueError("You have to specify pixel_values")
# Handle the case where the inputs are passed as a single dict which contains `labels`.
# The `labels` shouldn't be passed to `self.encoder` below, because it is a based model without this
# parameter (otherwise, an error occurs when `input_processing` is called inside `self.encoder.call()`).
if "labels" in encoder_inputs:
labels = encoder_inputs.pop("labels")
# handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
if "decoder_input_ids" in encoder_inputs:
decoder_input_ids = encoder_inputs.pop("decoder_input_ids")
# handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
if "decoder_attention_mask" in encoder_inputs:
decoder_attention_mask = encoder_inputs.pop("decoder_attention_mask")
encoder_outputs = self.encoder(**encoder_inputs)
encoder_hidden_states = encoder_outputs[0]
# optionally project encoder_hidden_states
if (
self.encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
batch_size, sequence_length = shape_list(encoder_hidden_states)[:2]
encoder_attention_mask = tf.ones(shape=(batch_size, sequence_length), dtype=tf.int32)
decoder_inputs = {
"input_ids": decoder_input_ids,
"attention_mask": decoder_attention_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
"inputs_embeds": decoder_inputs_embeds,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"use_cache": use_cache,
"past_key_values": past_key_values,
"return_dict": return_dict,
"training": training,
}
# Add arguments to decoder from `kwargs_decoder`
decoder_inputs.update(kwargs_decoder)
decoder_outputs = self.decoder(**decoder_inputs)
logits = decoder_outputs[0]
# Compute loss independent from decoder (as some shift the logits inside them)
loss = None
if labels is not None:
warnings.warn(DEPRECATION_WARNING, FutureWarning)
loss = self.hf_compute_loss(labels, logits)
past_key_values = None
if decoder_inputs["use_cache"]:
past_key_values = decoder_outputs[1]
# The starting index of the remaining elements in `decoder_outputs`
start_index = sum([1 if x is not None else 0 for x in (loss, logits, past_key_values)])
if not decoder_inputs["return_dict"]:
if not isinstance(encoder_outputs, tuple):
encoder_outputs = encoder_outputs.to_tuple()
output = (loss, logits, past_key_values) + decoder_outputs[start_index:] + encoder_outputs
output = tuple([x for x in output if x is not None])
return output
return TFSeq2SeqLMOutput(
loss=loss,
logits=decoder_outputs.logits,
past_key_values=past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
cross_attns = (
tf.convert_to_tensor(output.cross_attentions)
if self.config.output_attentions and output.cross_attentions is not None
else None
)
return TFSeq2SeqLMOutput(
logits=output.logits,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
cross_attentions=cross_attns,
)
def prepare_inputs_for_generation(
self, input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
):
decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past=past)
decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
past_key_values = decoder_inputs.get("past_key_values")
if past_key_values is None:
past_key_values = decoder_inputs.get("past") # e.g. on TF GPT2
input_dict = {
"pixel_values": None, # needs to be passed to make Keras.layer.__call__ happy
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"decoder_input_ids": decoder_inputs["input_ids"],
# TODO (joao): the `TFBaseModelOutput` wrapper should not be needed after the generate refactor is complete
"encoder_outputs": TFBaseModelOutput(last_hidden_state=encoder_outputs[0]),
"past_key_values": past_key_values,
"use_cache": use_cache,
}
return input_dict
def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
def resize_token_embeddings(self, *args, **kwargs):
raise NotImplementedError(
"Resizing the embedding layers via the TFVisionEncoderDecoderModel directly is not supported."
"Please use the respective methods of the wrapped objects (model.decoder.resize_token_embeddings(...))"
)
def _reorder_cache(self, past, beam_idx):
# apply decoder cache reordering here
return self.decoder._reorder_cache(past, beam_idx)
|
[
"[email protected]"
] | |
f8b2e6eb000e9bb4ab907381ef8afbef0d9ae96e
|
453df013de5dc74291db65436011b661d969e4b6
|
/soccer/gameplay2/plays/restarts/kick_penalty.py
|
3476f67818ae84d74caf1477ebca2fda655fab5d
|
[] |
no_license
|
david2194/robocup-software
|
3f04eb7de4b84cafdab1a956df7cc48c3d3d4604
|
6f98c38ddb129ca49be357fc230990c16eadf9d4
|
refs/heads/master
| 2021-01-17T21:39:47.832797 | 2014-07-15T01:31:51 | 2014-07-15T01:31:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 820 |
py
|
import play
import behavior
import robocup
import main
import tactics.line_up
import tactics.penalty
# one robot kicks the ball, the others just line up and wait
class KickPenalty(play.Play):
def __init__(self):
super().__init__(continuous=True)
self.add_transition(behavior.Behavior.State.start,
behavior.Behavior.State.running,
lambda: True,
'immediately')
kicker = tactics.penalty.Penalty()
self.add_subbehavior(kicker, 'kicker', required=True, priority=10)
line = robocup.Segment(robocup.Point(1.5, 1), robocup.Point(1.5, 2.5))
line_up = tactics.line_up.LineUp(line)
@classmethod
def score(cls):
gs = main.game_state()
return 0.0 if gs.is_setup_state() and gs.is_our_penalty() else float("inf")
|
[
"[email protected]"
] | |
0d6c12a20b87eb1a3983e038e756badb1c55e1c1
|
55c24645dd63a1c41037dcfb9fb45bc7bcdea4be
|
/venv/lib/python3.7/site-packages/sqlalchemy/__init__.py
|
a8209abb0731906a86b9be969f0404a04d25f2f6
|
[] |
no_license
|
abdullah-nawaz/flask-boilerplate
|
7c42801a21ee3e6a647cc8a7d92e0285f8e86cad
|
01bc7fe1140e8ec613de4a38546a07ddfbdbd254
|
refs/heads/master
| 2022-12-02T05:06:08.297759 | 2020-06-24T21:36:32 | 2020-06-24T21:36:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,659 |
py
|
# sqlalchemy/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import util as _util # noqa
from .inspection import inspect # noqa
from .schema import BLANK_SCHEMA # noqa
from .schema import CheckConstraint # noqa
from .schema import Column # noqa
from .schema import ColumnDefault # noqa
from .schema import Computed # noqa
from .schema import Constraint # noqa
from .schema import DDL # noqa
from .schema import DefaultClause # noqa
from .schema import FetchedValue # noqa
from .schema import ForeignKey # noqa
from .schema import ForeignKeyConstraint # noqa
from .schema import Index # noqa
from .schema import MetaData # noqa
from .schema import PassiveDefault # noqa
from .schema import PrimaryKeyConstraint # noqa
from .schema import Sequence # noqa
from .schema import Table # noqa
from .schema import ThreadLocalMetaData # noqa
from .schema import UniqueConstraint # noqa
from .sql import alias # noqa
from .sql import all_ # noqa
from .sql import and_ # noqa
from .sql import any_ # noqa
from .sql import asc # noqa
from .sql import between # noqa
from .sql import bindparam # noqa
from .sql import case # noqa
from .sql import cast # noqa
from .sql import collate # noqa
from .sql import column # noqa
from .sql import delete # noqa
from .sql import desc # noqa
from .sql import distinct # noqa
from .sql import except_ # noqa
from .sql import except_all # noqa
from .sql import exists # noqa
from .sql import extract # noqa
from .sql import false # noqa
from .sql import func # noqa
from .sql import funcfilter # noqa
from .sql import insert # noqa
from .sql import intersect # noqa
from .sql import intersect_all # noqa
from .sql import join # noqa
from .sql import lateral # noqa
from .sql import literal # noqa
from .sql import literal_column # noqa
from .sql import modifier # noqa
from .sql import not_ # noqa
from .sql import null # noqa
from .sql import nullsfirst # noqa
from .sql import nullslast # noqa
from .sql import or_ # noqa
from .sql import outerjoin # noqa
from .sql import outparam # noqa
from .sql import over # noqa
from .sql import select # noqa
from .sql import subquery # noqa
from .sql import table # noqa
from .sql import tablesample # noqa
from .sql import text # noqa
from .sql import true # noqa
from .sql import tuple_ # noqa
from .sql import type_coerce # noqa
from .sql import union # noqa
from .sql import union_all # noqa
from .sql import update # noqa
from .sql import within_group # noqa
from .types import ARRAY # noqa
from .types import BIGINT # noqa
from .types import BigInteger # noqa
from .types import BINARY # noqa
from .types import Binary # noqa
from .types import BLOB # noqa
from .types import BOOLEAN # noqa
from .types import Boolean # noqa
from .types import CHAR # noqa
from .types import CLOB # noqa
from .types import DATE # noqa
from .types import Date # noqa
from .types import DATETIME # noqa
from .types import DateTime # noqa
from .types import DECIMAL # noqa
from .types import Enum # noqa
from .types import FLOAT # noqa
from .types import Float # noqa
from .types import INT # noqa
from .types import INTEGER # noqa
from .types import Integer # noqa
from .types import Interval # noqa
from .types import JSON # noqa
from .types import LargeBinary # noqa
from .types import NCHAR # noqa
from .types import NUMERIC # noqa
from .types import Numeric # noqa
from .types import NVARCHAR # noqa
from .types import PickleType # noqa
from .types import REAL # noqa
from .types import SMALLINT # noqa
from .types import SmallInteger # noqa
from .types import String # noqa
from .types import TEXT # noqa
from .types import Text # noqa
from .types import TIME # noqa
from .types import Time # noqa
from .types import TIMESTAMP # noqa
from .types import TypeDecorator # noqa
from .types import Unicode # noqa
from .types import UnicodeText # noqa
from .types import VARBINARY # noqa
from .types import VARCHAR # noqa
from .engine import create_engine # noqa nosort
from .engine import engine_from_config # noqa nosort
__version__ = "1.3.17"
def __go(lcls):
global __all__
from . import events # noqa
from . import util as _sa_util
import inspect as _inspect
__all__ = sorted(
name
for name, obj in lcls.items()
if not (name.startswith("_") or _inspect.ismodule(obj))
)
_sa_util.dependencies.resolve_all("sqlalchemy")
__go(locals())
|
[
"[email protected]"
] | |
6822f81d9f94b272ee76b01d65f926ac917a2f80
|
dfaf6f7ac83185c361c81e2e1efc09081bd9c891
|
/k8sdeployment/k8sstat/python/kubernetes/test/test_runtime_raw_extension.py
|
ee67e7a373c69490627d5edc9482fa2e486fd0ae
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JeffYFHuang/gpuaccounting
|
d754efac2dffe108b591ea8722c831d979b68cda
|
2c63a63c571240561725847daf1a7f23f67e2088
|
refs/heads/master
| 2022-08-09T03:10:28.185083 | 2022-07-20T00:50:06 | 2022-07-20T00:50:06 | 245,053,008 | 0 | 0 |
MIT
| 2021-03-25T23:44:50 | 2020-03-05T02:44:15 |
JavaScript
|
UTF-8
|
Python
| false | false | 968 |
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.runtime_raw_extension import RuntimeRawExtension # noqa: E501
from kubernetes.client.rest import ApiException
class TestRuntimeRawExtension(unittest.TestCase):
"""RuntimeRawExtension unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRuntimeRawExtension(self):
"""Test RuntimeRawExtension"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.runtime_raw_extension.RuntimeRawExtension() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
50c91ad80d12b49cb8dd5fa2e3e23d87d64c3ce0
|
46667df8344db58698838d677bdae377b3c3c53c
|
/Data Manipulation with Pandas/Part 2/25.upsampling-data.py
|
18d91a2eac217beb607b6e8648ee86d48cbb6b62
|
[] |
no_license
|
bennysetiawan/DQLab-Career-2021
|
278577cdddb3852c57f799cd1207b4ff45962960
|
0822d15e3b24cf0146c23456d4b65b0fb00a53fc
|
refs/heads/master
| 2023-06-06T13:24:21.289929 | 2021-06-23T17:09:14 | 2021-06-23T17:09:14 | 379,657,598 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 561 |
py
|
import pandas as pd
# Load dataset https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/LO4/global_air_quality_4000rows.csv
gaq = pd.read_csv('https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/LO4/global_air_quality_4000rows.csv')
gaq['timestamp'] = pd.to_datetime(gaq['timestamp'])
gaq = gaq.set_index('timestamp')
print('Dataset sebelum di-upsampling (5 teratas):\n', gaq.head())
# Upsampling dari daily to hourly dan kita hitung reratanya
gaq_hourly = gaq.resample('H').mean()
print('Upsampling daily to hourly - mean (5 teratas):\n', gaq_hourly.head())
|
[
"[email protected]"
] | |
f04775a90eb47f46df8bc83f530d0483eb919a60
|
b08d42933ac06045905d7c005ca9c114ed3aecc0
|
/src/coefSubset/evaluate/ranks/tenth/rank_1ay7_O.py
|
0e06f6a99063b4335484694384ed33130ec83f0a
|
[] |
no_license
|
TanemuraKiyoto/PPI-native-detection-via-LR
|
d148d53f5eb60a4dda5318b371a3048e3f662725
|
897e7188b0da94e87126a4acc0c9a6ff44a64574
|
refs/heads/master
| 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,204 |
py
|
# 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '1ay7.csv'
identifier = 'O'
thresholdCoef = 0.1
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/tenth/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/tenth/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Drop features with coefficients below threshold
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs = coefs[np.abs(coefs['coefficients']) < thresholdCoef]
dropList = list(coefs.index)
del coefs
df1.drop(dropList, axis = 1, inplace = True)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
|
[
"[email protected]"
] | |
556405e629f0f2151963bc39b08f1197eac1b386
|
78b160d8131f3c4b7aef0d051b040825a9c50e0d
|
/algoexpert/easy/palindromeCheck.py
|
67e4ca78dd3bd4d9c8bce9e602b51280a4a5ece4
|
[
"MIT"
] |
permissive
|
ardakkk/Algorithms-and-Data-Structures
|
744f8c9ffb233b95040e5bdcbddb9f5d2ff7a5ba
|
c428bb0bd7eeb6c34448630f88f13e1329b54636
|
refs/heads/master
| 2021-07-08T22:40:40.361282 | 2020-07-20T10:39:58 | 2020-07-20T10:39:58 | 156,005,721 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,134 |
py
|
# Time: O(n^2) | Space: O(n)
# def isPalindrome(string):
# reversedString = ""
#
# for i in reversed(range(len(string))):
# reversedString += string[i]
# return string == reversedString
# Time: O(n) | Space: O(n)
# def isPlaindrome(string):
# reversedChars = []
#
# for i in reversed(range(len(string))):
# reversedChars.append(string[i])
# return string == "".join(reversedChars)
# Time: O(n) | Space: O(n)
# def isPlaindrome(string, i = 0):
# j = len(string) - 1 - i
# return True if i >= j else string[i] == string[j] and isPlaindrome(string, i + 1)
# Time: O(n) | Space: O(n)
# def isPalindrome(string, i = 0):
# j = len(string) - 1 - i
#
# if i >= j:
# return True
# if string[i] != string[j]:
# return False
#
# return isPalindrome(string, i + 1)
# Time: O(n) | Space: O(1)
def isPalindrome(string):
leftIdx = 0
rightIdx = len(string) - 1
while leftIdx < rightIdx:
if string[leftIdx] != string[rightIdx]:
return False
leftIdx += 1
rightIdx -= 1
return True
print(isPalindrome('abcdcba'))
|
[
"[email protected]"
] | |
85848666ff2722fdc295c2a9d73fb1963e6f41d4
|
15b12d69ac3123d1562986970ce01d7a47d171de
|
/typings/nltk/translate/meteor_score.pyi
|
1cde9aead19696ebb82604513df27febecc0a9d7
|
[
"Apache-2.0"
] |
permissive
|
simplymanas/python-learning
|
9b67b5a7acfb3a7c2455a7d1fc66203a2b419c37
|
75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0
|
refs/heads/master
| 2021-07-11T06:40:24.803589 | 2021-06-20T12:06:02 | 2021-06-20T12:06:02 | 241,769,614 | 5 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,369 |
pyi
|
"""
This type stub file was generated by pyright.
"""
def _generate_enums(hypothesis, reference, preprocess=...):
"""
Takes in string inputs for hypothesis and reference and returns
enumerated word lists for each of them
:param hypothesis: hypothesis string
:type hypothesis: str
:param reference: reference string
:type reference: str
:preprocess: preprocessing method (default str.lower)
:type preprocess: method
:return: enumerated words list
:rtype: list of 2D tuples, list of 2D tuples
"""
...
def exact_match(hypothesis, reference):
"""
matches exact words in hypothesis and reference
and returns a word mapping based on the enumerated
word id between hypothesis and reference
:param hypothesis: hypothesis string
:type hypothesis: str
:param reference: reference string
:type reference: str
:return: enumerated matched tuples, enumerated unmatched hypothesis tuples,
enumerated unmatched reference tuples
:rtype: list of 2D tuples, list of 2D tuples, list of 2D tuples
"""
...
def _match_enums(enum_hypothesis_list, enum_reference_list):
"""
matches exact words in hypothesis and reference and returns
a word mapping between enum_hypothesis_list and enum_reference_list
based on the enumerated word id.
:param enum_hypothesis_list: enumerated hypothesis list
:type enum_hypothesis_list: list of tuples
:param enum_reference_list: enumerated reference list
:type enum_reference_list: list of 2D tuples
:return: enumerated matched tuples, enumerated unmatched hypothesis tuples,
enumerated unmatched reference tuples
:rtype: list of 2D tuples, list of 2D tuples, list of 2D tuples
"""
...
def _enum_stem_match(enum_hypothesis_list, enum_reference_list, stemmer=...):
"""
Stems each word and matches them in hypothesis and reference
and returns a word mapping between enum_hypothesis_list and
enum_reference_list based on the enumerated word id. The function also
returns a enumerated list of unmatched words for hypothesis and reference.
:param enum_hypothesis_list:
:type enum_hypothesis_list:
:param enum_reference_list:
:type enum_reference_list:
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:type stemmer: nltk.stem.api.StemmerI or any class that implements a stem method
:return: enumerated matched tuples, enumerated unmatched hypothesis tuples,
enumerated unmatched reference tuples
:rtype: list of 2D tuples, list of 2D tuples, list of 2D tuples
"""
...
def stem_match(hypothesis, reference, stemmer=...):
"""
Stems each word and matches them in hypothesis and reference
and returns a word mapping between hypothesis and reference
:param hypothesis:
:type hypothesis:
:param reference:
:type reference:
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:type stemmer: nltk.stem.api.StemmerI or any class that
implements a stem method
:return: enumerated matched tuples, enumerated unmatched hypothesis tuples,
enumerated unmatched reference tuples
:rtype: list of 2D tuples, list of 2D tuples, list of 2D tuples
"""
...
def _enum_wordnetsyn_match(enum_hypothesis_list, enum_reference_list, wordnet=...):
"""
Matches each word in reference to a word in hypothesis
if any synonym of a hypothesis word is the exact match
to the reference word.
:param enum_hypothesis_list: enumerated hypothesis list
:param enum_reference_list: enumerated reference list
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:type wordnet: WordNetCorpusReader
:return: list of matched tuples, unmatched hypothesis list, unmatched reference list
:rtype: list of tuples, list of tuples, list of tuples
"""
...
def wordnetsyn_match(hypothesis, reference, wordnet=...):
"""
Matches each word in reference to a word in hypothesis if any synonym
of a hypothesis word is the exact match to the reference word.
:param hypothesis: hypothesis string
:param reference: reference string
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:type wordnet: WordNetCorpusReader
:return: list of mapped tuples
:rtype: list of tuples
"""
...
def _enum_allign_words(enum_hypothesis_list, enum_reference_list, stemmer=..., wordnet=...):
"""
Aligns/matches words in the hypothesis to reference by sequentially
applying exact match, stemmed match and wordnet based synonym match.
in case there are multiple matches the match which has the least number
of crossing is chosen. Takes enumerated list as input instead of
string input
:param enum_hypothesis_list: enumerated hypothesis list
:param enum_reference_list: enumerated reference list
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:type stemmer: nltk.stem.api.StemmerI or any class that implements a stem method
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:type wordnet: WordNetCorpusReader
:return: sorted list of matched tuples, unmatched hypothesis list,
unmatched reference list
:rtype: list of tuples, list of tuples, list of tuples
"""
...
def allign_words(hypothesis, reference, stemmer=..., wordnet=...):
"""
Aligns/matches words in the hypothesis to reference by sequentially
applying exact match, stemmed match and wordnet based synonym match.
In case there are multiple matches the match which has the least number
of crossing is chosen.
:param hypothesis: hypothesis string
:param reference: reference string
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:type stemmer: nltk.stem.api.StemmerI or any class that implements a stem method
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:type wordnet: WordNetCorpusReader
:return: sorted list of matched tuples, unmatched hypothesis list, unmatched reference list
:rtype: list of tuples, list of tuples, list of tuples
"""
...
def _count_chunks(matches):
"""
Counts the fewest possible number of chunks such that matched unigrams
of each chunk are adjacent to each other. This is used to caluclate the
fragmentation part of the metric.
:param matches: list containing a mapping of matched words (output of allign_words)
:return: Number of chunks a sentence is divided into post allignment
:rtype: int
"""
...
def single_meteor_score(reference, hypothesis, preprocess=..., stemmer=..., wordnet=..., alpha=..., beta=..., gamma=...):
"""
Calculates METEOR score for single hypothesis and reference as per
"Meteor: An Automatic Metric for MT Evaluation with HighLevels of
Correlation with Human Judgments" by Alon Lavie and Abhaya Agarwal,
in Proceedings of ACL.
http://www.cs.cmu.edu/~alavie/METEOR/pdf/Lavie-Agarwal-2007-METEOR.pdf
>>> hypothesis1 = 'It is a guide to action which ensures that the military always obeys the commands of the party'
>>> reference1 = 'It is a guide to action that ensures that the military will forever heed Party commands'
>>> round(single_meteor_score(reference1, hypothesis1),4)
0.7398
If there is no words match during the alignment the method returns the
score as 0. We can safely return a zero instead of raising a
division by zero error as no match usually implies a bad translation.
>>> round(meteor_score('this is a cat', 'non matching hypothesis'),4)
0.0
:param references: reference sentences
:type references: list(str)
:param hypothesis: a hypothesis sentence
:type hypothesis: str
:param preprocess: preprocessing function (default str.lower)
:type preprocess: method
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:type stemmer: nltk.stem.api.StemmerI or any class that implements a stem method
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:type wordnet: WordNetCorpusReader
:param alpha: parameter for controlling relative weights of precision and recall.
:type alpha: float
:param beta: parameter for controlling shape of penalty as a
function of as a function of fragmentation.
:type beta: float
:param gamma: relative weight assigned to fragmentation penality.
:type gamma: float
:return: The sentence-level METEOR score.
:rtype: float
"""
...
def meteor_score(references, hypothesis, preprocess=..., stemmer=..., wordnet=..., alpha=..., beta=..., gamma=...):
"""
Calculates METEOR score for hypothesis with multiple references as
described in "Meteor: An Automatic Metric for MT Evaluation with
HighLevels of Correlation with Human Judgments" by Alon Lavie and
Abhaya Agarwal, in Proceedings of ACL.
http://www.cs.cmu.edu/~alavie/METEOR/pdf/Lavie-Agarwal-2007-METEOR.pdf
In case of multiple references the best score is chosen. This method
iterates over single_meteor_score and picks the best pair among all
the references for a given hypothesis
>>> hypothesis1 = 'It is a guide to action which ensures that the military always obeys the commands of the party'
>>> hypothesis2 = 'It is to insure the troops forever hearing the activity guidebook that party direct'
>>> reference1 = 'It is a guide to action that ensures that the military will forever heed Party commands'
>>> reference2 = 'It is the guiding principle which guarantees the military forces always being under the command of the Party'
>>> reference3 = 'It is the practical guide for the army always to heed the directions of the party'
>>> round(meteor_score([reference1, reference2, reference3], hypothesis1),4)
0.7398
If there is no words match during the alignment the method returns the
score as 0. We can safely return a zero instead of raising a
division by zero error as no match usually implies a bad translation.
>>> round(meteor_score(['this is a cat'], 'non matching hypothesis'),4)
0.0
:param references: reference sentences
:type references: list(str)
:param hypothesis: a hypothesis sentence
:type hypothesis: str
:param preprocess: preprocessing function (default str.lower)
:type preprocess: method
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:type stemmer: nltk.stem.api.StemmerI or any class that implements a stem method
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:type wordnet: WordNetCorpusReader
:param alpha: parameter for controlling relative weights of precision and recall.
:type alpha: float
:param beta: parameter for controlling shape of penalty as a function
of as a function of fragmentation.
:type beta: float
:param gamma: relative weight assigned to fragmentation penality.
:type gamma: float
:return: The sentence-level METEOR score.
:rtype: float
"""
...
|
[
"[email protected]"
] | |
e73bc712fb8c9aaa9b6e279837ea9cba1a4624f9
|
09dd58f46b1e914278067a69142230c7af0165c2
|
/blackmamba/lib/flake8/options/aggregator.py
|
5b8ab9c33b475d3ad576e839636fd2baf3f73f86
|
[
"MIT"
] |
permissive
|
zrzka/blackmamba
|
4e70262fbe3702553bf5d285a81b33eb6b3025ea
|
b298bc5d59e5aea9d494282910faf522c08ebba9
|
refs/heads/master
| 2021-01-01T18:43:19.490953 | 2020-01-20T08:26:33 | 2020-01-20T08:26:33 | 98,410,391 | 72 | 12 |
MIT
| 2020-01-20T08:26:35 | 2017-07-26T10:21:15 |
Python
|
UTF-8
|
Python
| false | false | 3,255 |
py
|
"""Aggregation function for CLI specified options and config file options.
This holds the logic that uses the collected and merged config files and
applies the user-specified command-line configuration on top of it.
"""
import logging
from flake8.options import config
LOG = logging.getLogger(__name__)
def aggregate_options(manager, config_finder, arglist=None, values=None):
"""Aggregate and merge CLI and config file options.
:param flake8.options.manager.OptionManager manager:
The instance of the OptionManager that we're presently using.
:param flake8.options.config.ConfigFileFinder config_finder:
The config file finder to use.
:param list arglist:
The list of arguments to pass to ``manager.parse_args``. In most cases
this will be None so ``parse_args`` uses ``sys.argv``. This is mostly
available to make testing easier.
:param optparse.Values values:
Previously parsed set of parsed options.
:returns:
Tuple of the parsed options and extra arguments returned by
``manager.parse_args``.
:rtype:
tuple(optparse.Values, list)
"""
# Get defaults from the option parser
default_values, _ = manager.parse_args([], values=values)
# Get original CLI values so we can find additional config file paths and
# see if --config was specified.
original_values, _ = manager.parse_args(arglist)
# Make our new configuration file mergerator
config_parser = config.MergedConfigParser(
option_manager=manager,
config_finder=config_finder,
)
# Get the parsed config
parsed_config = config_parser.parse(original_values.config,
original_values.isolated)
# Extend the default ignore value with the extended default ignore list,
# registered by plugins.
extended_default_ignore = manager.extended_default_ignore.copy()
LOG.debug('Extended default ignore list: %s',
list(extended_default_ignore))
extended_default_ignore.update(default_values.ignore)
default_values.ignore = list(extended_default_ignore)
LOG.debug('Merged default ignore list: %s', default_values.ignore)
extended_default_select = manager.extended_default_select.copy()
LOG.debug('Extended default select list: %s',
list(extended_default_select))
default_values.extended_default_select = extended_default_select
# Merge values parsed from config onto the default values returned
for config_name, value in parsed_config.items():
dest_name = config_name
# If the config name is somehow different from the destination name,
# fetch the destination name from our Option
if not hasattr(default_values, config_name):
dest_name = config_parser.config_options[config_name].dest
LOG.debug('Overriding default value of (%s) for "%s" with (%s)',
getattr(default_values, dest_name, None),
dest_name,
value)
# Override the default values with the config values
setattr(default_values, dest_name, value)
# Finally parse the command-line options
return manager.parse_args(arglist, default_values)
|
[
"[email protected]"
] | |
6d41bc6c5b7e28373bc88fa9ad52239f056dbc2c
|
36821b9fcdbefe88a60f584e7d39695ca5fe6177
|
/codeforces/1453/A.py
|
a93ebbcf9a29319282fbae77a215b4aaceb35e41
|
[] |
no_license
|
shubham409/CodeSubmits
|
231fc40a64ad97323e558ba2fa252c62f34c7809
|
5da4d9cc87d4ac8f54175723c2acf77fc5784f21
|
refs/heads/master
| 2023-06-26T06:17:30.255973 | 2021-06-03T18:24:00 | 2021-07-29T20:35:01 | 329,399,083 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 304 |
py
|
def fun(ls, lt):
st = set(ls)
count = 0
for i in lt:
if(i in st):
count += 1
print(count)
T = int(input())
for i in range(T):
n, k = list(map(int, input().split()))
ls = list(map(int, input().split()))
lt = list(map(int, input().split()))
fun(ls, lt)
|
[
"[email protected]"
] | |
58789926c4ec41d87ecb91c85728560a035ea6c8
|
2c3e2d7da1e62bd75229fad0c8e18431a420b8a1
|
/tidy_headers/_parse_array.py
|
ac1f4780b57b533b14d09b060e35fe2b661aa445
|
[
"MIT"
] |
permissive
|
ksunden/tidy_headers
|
9526c3b522257f9dec4729fcdbcc09e7db68b6b3
|
060942204b5bb87a8b209e81e1b64fd3cbb0691f
|
refs/heads/master
| 2020-03-13T02:55:24.394455 | 2017-11-13T03:08:06 | 2017-11-13T03:08:06 | 130,934,077 | 0 | 0 | null | 2018-04-25T01:34:24 | 2018-04-25T01:34:24 | null |
UTF-8
|
Python
| false | false | 2,092 |
py
|
"""Parse array."""
# --- import -------------------------------------------------------------------------------------
import re
import numpy as np
from ._utilities import flatten_list
# --- parse --------------------------------------------------------------------------------------
def array2string(array, sep='\t'):
"""Generate a string from an array with useful formatting.
Great for writing arrays into single lines in files.
See Also
--------
string2array
"""
np.set_printoptions(threshold=array.size)
string = np.array2string(array, separator=sep)
string = string.replace('\n', sep)
string = re.sub(r'({})(?=\1)'.format(sep), '', string)
return string
def string2array(string, sep='\t'):
"""Generate an array from a string created using array2string.
See Also
--------
array2string
"""
# discover size
size = string.count('\t') + 1
# discover dimensionality
dimensionality = 0
while string[dimensionality] == '[':
dimensionality += 1
# discover shape
shape = []
for i in range(1, dimensionality + 1)[::-1]:
to_match = '[' * (i - 1)
count_positive = string.count(to_match + ' ')
count_negative = string.count(to_match + '-')
shape.append(count_positive + count_negative)
shape[-1] = size / shape[-2]
for i in range(1, dimensionality - 1)[::-1]:
shape[i] = shape[i] / shape[i - 1]
shape = tuple([int(s) for s in shape])
# import list of floats
lis = string.split(' ')
# annoyingly series of negative values get past previous filters
lis = flatten_list([i.split('-') for i in lis])
for i, item in enumerate(lis):
bad_chars = ['[', ']', '\t', '\n']
for bad_char in bad_chars:
item = item.replace(bad_char, '')
lis[i] = item
for i in range(len(lis))[::-1]:
try:
lis[i] = float(lis[i])
except ValueError:
lis.pop(i)
# create and reshape array
arr = np.array(lis)
arr.shape = shape
# finish
return arr
|
[
"[email protected]"
] | |
d8bb8e646968f06a0614abc39cd6ba7e62e1df63
|
ddd35c693194aefb9c009fe6b88c52de7fa7c444
|
/Live 10.1.18/VCM600/VCM600.py
|
d7805481023c57c96160c4fb4feb1534cdf912e5
|
[] |
no_license
|
notelba/midi-remote-scripts
|
819372d9c22573877c7912091bd8359fdd42585d
|
e3ec6846470eed7da8a4d4f78562ed49dc00727b
|
refs/heads/main
| 2022-07-30T00:18:33.296376 | 2020-10-04T00:00:12 | 2020-10-04T00:00:12 | 301,003,961 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,780 |
py
|
# uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.8.5 (default, Aug 12 2020, 00:00:00)
# [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
# Embedded file name: c:\Jenkins\live\output\Live\win_64_static\Release\python-bundle\MIDI Remote Scripts\VCM600\VCM600.py
# Compiled at: 2020-07-14 15:33:46
from __future__ import absolute_import, print_function, unicode_literals
import Live
from _Framework.ControlSurface import ControlSurface
from _Framework.InputControlElement import *
from _Framework.SliderElement import SliderElement
from _Framework.ButtonElement import ButtonElement
from _Framework.EncoderElement import EncoderElement
from _Framework.ChannelStripComponent import ChannelStripComponent
from _Framework.DeviceComponent import DeviceComponent
from _Framework.ClipSlotComponent import ClipSlotComponent
from _Framework.SceneComponent import SceneComponent
from _Framework.SessionComponent import SessionComponent
from _Framework.ChannelTranslationSelector import ChannelTranslationSelector
from .ViewTogglerComponent import ViewTogglerComponent
from .MixerComponent import MixerComponent
from .TransportComponent import TransportComponent
NUM_TRACKS = 12
class VCM600(ControlSurface):
""" Script for Vestax's VCM600 Controller """
def __init__(self, c_instance):
ControlSurface.__init__(self, c_instance)
with self.component_guard():
self._setup_session_control()
self._setup_mixer_control()
self._setup_device_control()
self._setup_transport_control()
self._setup_view_control()
def _setup_session_control(self):
is_momentary = True
down_button = ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 89)
up_button = ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 90)
session = SessionComponent(NUM_TRACKS, 0)
session.set_select_buttons(down_button, up_button)
session.selected_scene().set_launch_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 87))
track_stop_buttons = [ ButtonElement(is_momentary, MIDI_NOTE_TYPE, index, 68) for index in range(NUM_TRACKS)
]
session.set_stop_track_clip_buttons(tuple(track_stop_buttons))
for index in range(NUM_TRACKS):
session.selected_scene().clip_slot(index).set_launch_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, index, 69))
def _setup_mixer_control(self):
is_momentary = True
mixer = MixerComponent(NUM_TRACKS, 2)
for track in range(NUM_TRACKS):
strip = mixer.channel_strip(track)
strip.set_volume_control(SliderElement(MIDI_CC_TYPE, track, 23))
strip.set_pan_control(EncoderElement(MIDI_CC_TYPE, track, 10, Live.MidiMap.MapMode.absolute))
strip.set_send_controls((
EncoderElement(MIDI_CC_TYPE, track, 19, Live.MidiMap.MapMode.absolute),
EncoderElement(MIDI_CC_TYPE, track, 20, Live.MidiMap.MapMode.absolute)))
strip.set_solo_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, track, 64))
strip.set_mute_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, track, 63))
strip.set_crossfade_toggle(ButtonElement(is_momentary, MIDI_NOTE_TYPE, track, 65))
eq = mixer.track_eq(track)
eq.set_gain_controls(tuple([ EncoderElement(MIDI_CC_TYPE, track, 18 - index, Live.MidiMap.MapMode.absolute) for index in range(3)
]))
eq.set_cut_buttons(tuple([ ButtonElement(is_momentary, MIDI_NOTE_TYPE, track, 62 - index) for index in range(3)
]))
filter = mixer.track_filter(track)
filter.set_filter_controls(EncoderElement(MIDI_CC_TYPE, track, 22, Live.MidiMap.MapMode.absolute), EncoderElement(MIDI_CC_TYPE, track, 21, Live.MidiMap.MapMode.absolute))
for ret_track in range(2):
strip = mixer.return_strip(ret_track)
strip.set_volume_control(SliderElement(MIDI_CC_TYPE, 12, 22 + ret_track))
strip.set_pan_control(EncoderElement(MIDI_CC_TYPE, 12, 20 + ret_track, Live.MidiMap.MapMode.absolute))
strip.set_mute_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 78 + ret_track))
mixer.set_crossfader_control(SliderElement(MIDI_CC_TYPE, 12, 8))
mixer.set_prehear_volume_control(EncoderElement(MIDI_CC_TYPE, 12, 24, Live.MidiMap.MapMode.absolute))
mixer.master_strip().set_volume_control(SliderElement(MIDI_CC_TYPE, 12, 7))
mixer.master_strip().set_pan_control(EncoderElement(MIDI_CC_TYPE, 12, 10, Live.MidiMap.MapMode.absolute))
return mixer
def _setup_device_control(self):
is_momentary = True
device_bank_buttons = []
device_param_controls = []
for index in range(8):
device_bank_buttons.append(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 70 + index))
device_param_controls.append(EncoderElement(MIDI_CC_TYPE, 12, 12 + index, Live.MidiMap.MapMode.absolute))
device = DeviceComponent()
device.set_bank_buttons(tuple(device_bank_buttons))
device.set_parameter_controls(tuple(device_param_controls))
device_translation_selector = ChannelTranslationSelector()
device_translation_selector.set_controls_to_translate(tuple(device_param_controls))
device_translation_selector.set_mode_buttons(tuple(device_bank_buttons))
self.set_device_component(device)
def _setup_transport_control(self):
is_momentary = True
transport = TransportComponent()
transport.set_play_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 80))
transport.set_record_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 81))
transport.set_nudge_buttons(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 86), ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 85))
transport.set_loop_button(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 84))
transport.set_punch_buttons(ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 82), ButtonElement(is_momentary, MIDI_NOTE_TYPE, 12, 83))
transport.set_tempo_control(SliderElement(MIDI_CC_TYPE, 12, 26), SliderElement(MIDI_CC_TYPE, 12, 25))
def _setup_view_control(self):
is_momentary = True
view = ViewTogglerComponent(NUM_TRACKS)
view.set_buttons(tuple([ ButtonElement(is_momentary, MIDI_NOTE_TYPE, track, 67) for track in range(NUM_TRACKS)
]), tuple([ ButtonElement(is_momentary, MIDI_NOTE_TYPE, track, 66) for track in range(NUM_TRACKS)
]))
# okay decompiling /home/deniz/data/projects/midiremote/Live 10.1.18/VCM600/VCM600.pyc
|
[
"[email protected]"
] | |
c63fca29896bfff9b615895fc46e9674f2c87b44
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_292/ch79_2020_04_07_15_56_18_513329.py
|
d012de9b08ecbc09c2525d1628c5a9a84c203598
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 131 |
py
|
def monta_dicionario(l1,l2):
dicionario = {}
for i in range(len(l1)):
dicionario[l1[i]]=l2[i]
return dicionario
|
[
"[email protected]"
] | |
08959099af5bd095a8dc537ede88a16da5dbe231
|
797e83cd492c22c8b7e456b76ae9efb45e102e30
|
/chapter1_A_Sneak_Preview/Step2/dump_db_pickle.py
|
2efbafd3db26064e7f0fb1eacb1af56595a51304
|
[] |
no_license
|
skyaiolos/ProgrammingPython4th
|
013e2c831a6e7836826369d55aa9435fe91c2026
|
a6a98077440f5818fb0bd430a8f9a5d8bf0ce6d7
|
refs/heads/master
| 2021-01-23T11:20:38.292728 | 2017-07-20T03:22:59 | 2017-07-20T03:22:59 | 93,130,254 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 449 |
py
|
import pickle
# dbfile = open('people-pickle', 'rb') # use binary mode files in 3.X
with open('people-pickle', 'rb') as f:
db = pickle.load(f)
for key in db:
print(key, '=>\n ', db[key])
print(db['sue']['name'])
# bob =>
# {'name': 'Bob Smith', 'age': 42, 'pay': 30000, 'job': 'dev'}
# sue =>
# {'name': 'Sue Jones', 'age': 45, 'pay': 40000, 'job': 'hdw'}
# tom =>
# {'name': 'Tom', 'age': 50, 'pay': 0, 'job': None}
# Sue Jones
|
[
"[email protected]"
] | |
00c0734af882609c9d0bb4bb27ff77f501034d52
|
cdbaec17aa8411a1455b42520154cc9f30da3550
|
/Leetcode 5/Pacific Atlantic Water Flow 2.py
|
5ad9ed017d084ffd7100e845cc4497821387d475
|
[] |
no_license
|
PiyushChaturvedii/My-Leetcode-Solutions-Python-
|
bad986978a7e72a3fda59b652cda79802377ab2f
|
86138195f6f343f0acc97da286f4f4811a0d0e48
|
refs/heads/master
| 2021-10-09T20:19:11.186191 | 2019-01-03T05:15:33 | 2019-01-03T05:15:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,307 |
py
|
class Solution(object):
def pacificAtlantic(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
m = len(matrix)
n = len(matrix[0]) if m else 0
if m * n == 0: return []
topEdge = [(0, y) for y in range(n)]
leftEdge = [(x, 0) for x in range(m)]
pacific = set(topEdge + leftEdge)
bottomEdge = [(m - 1, y) for y in range(n)]
rightEdge = [(x, n - 1) for x in range(m)]
atlantic = set(bottomEdge + rightEdge)
def bfs(vset):
dz = zip((1, 0, -1, 0), (0, 1, 0, -1))
queue = list(vset)
while queue:
hx, hy = queue.pop(0)
for dx, dy in dz:
nx, ny = hx + dx, hy + dy
if 0 <= nx < m and 0 <= ny < n:
if matrix[nx][ny] >= matrix[hx][hy]:
if (nx, ny) not in vset:
queue.append((nx, ny))
vset.add((nx, ny))
bfs(pacific)
bfs(atlantic)
result = pacific & atlantic
return map(list, result)
matrix=[[1,2,2,3,5],[3,2,3,4,4],[2,4,5,3,1],[6,7,1,4,5],[5,1,1,2,4]]
c=Solution().pacificAtlantic(matrix)
|
[
"[email protected]"
] | |
1fbe035cdeff7017e360ea5dbf43f22876d2e3a9
|
af7df9d77a2545b54d8cd03e7f4633dce6125f4a
|
/ch08/viewer-pil.py
|
a954ddc59ca3ec169dfbc48de4909a0fb22381eb
|
[] |
no_license
|
socrates77-sh/PP4E
|
71e6522ea2e7cfd0c68c1e06ceb4d0716cc0f0bd
|
c92e69aea50262bfd63e95467ae4baf7cdc2f22f
|
refs/heads/master
| 2020-05-29T08:46:47.380002 | 2018-11-16T10:38:44 | 2018-11-16T10:38:44 | 69,466,298 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 725 |
py
|
"""
show one image with PIL photo replacement object
handles many more image types; install PIL first: placed in Lib\site-packages
"""
import os
import sys
from tkinter import *
from PIL.ImageTk import PhotoImage # <== use PIL replacement class
# rest of code unchanged
imgdir = 'E:\\workspace\\PP4E-Examples-1.2\\Examples\\PP4E\\Gui\\PIL\\images'
imgfile = 'florida-2009-1.jpg' # does gif, jpg, png, tiff, etc.
if len(sys.argv) > 1:
imgfile = sys.argv[1]
imgpath = os.path.join(imgdir, imgfile)
win = Tk()
win.title(imgfile)
imgobj = PhotoImage(file=imgpath) # now JPEGs work!
Label(win, image=imgobj).pack()
win.mainloop()
print(imgobj.width(), imgobj.height()) # show size in pixels on exit
|
[
"[email protected]"
] | |
4b1f81a7f96f17aceb49489dc87ce9196f26aebb
|
8f24e443e42315a81028b648e753c50967c51c78
|
/rllib/models/jax/jax_action_dist.py
|
864cd065cee6c7efd858dbb23cc9e1fbc01c5e88
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
simon-mo/ray
|
d07efdada8d05c6e10417f96e8dfc35f9ad33397
|
1e42e6cd15e2fb96c217cba8484e59ed0ef4b0c8
|
refs/heads/master
| 2023-03-06T00:09:35.758834 | 2022-12-23T18:46:48 | 2022-12-23T18:46:48 | 122,156,396 | 4 | 2 |
Apache-2.0
| 2023-03-04T08:56:56 | 2018-02-20T04:47:06 |
Python
|
UTF-8
|
Python
| false | false | 2,423 |
py
|
import time
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_jax, try_import_tfp
from ray.rllib.utils.typing import TensorType, List
jax, flax = try_import_jax()
tfp = try_import_tfp()
class JAXDistribution(ActionDistribution):
"""Wrapper class for JAX distributions."""
@override(ActionDistribution)
def __init__(self, inputs: List[TensorType], model: ModelV2):
super().__init__(inputs, model)
# Store the last sample here.
self.last_sample = None
# Use current time as pseudo-random number generator's seed.
self.prng_key = jax.random.PRNGKey(seed=int(time.time()))
@override(ActionDistribution)
def logp(self, actions: TensorType) -> TensorType:
return self.dist.log_prob(actions)
@override(ActionDistribution)
def entropy(self) -> TensorType:
return self.dist.entropy()
@override(ActionDistribution)
def kl(self, other: ActionDistribution) -> TensorType:
return self.dist.kl_divergence(other.dist)
@override(ActionDistribution)
def sample(self) -> TensorType:
# Update the state of our PRNG.
_, self.prng_key = jax.random.split(self.prng_key)
self.last_sample = jax.random.categorical(self.prng_key, self.inputs)
return self.last_sample
@override(ActionDistribution)
def sampled_action_logp(self) -> TensorType:
assert self.last_sample is not None
return self.logp(self.last_sample)
class JAXCategorical(JAXDistribution):
"""Wrapper class for a JAX Categorical distribution."""
@override(ActionDistribution)
def __init__(self, inputs, model=None, temperature=1.0):
if temperature != 1.0:
assert temperature > 0.0, "Categorical `temperature` must be > 0.0!"
inputs /= temperature
super().__init__(inputs, model)
self.dist = tfp.experimental.substrates.jax.distributions.Categorical(
logits=self.inputs
)
@override(ActionDistribution)
def deterministic_sample(self):
self.last_sample = self.inputs.argmax(axis=1)
return self.last_sample
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return action_space.n
|
[
"[email protected]"
] | |
1578d7b129691847b352ad44b707bf582bf35fbd
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Latn.TOP/Sun-ExtA_12/pdf_to_json_test_Latn.TOP_Sun-ExtA_12.py
|
e426ed44c203532496b95fe8493f518f68f56d58
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804 | 2020-02-27T15:54:48 | 2020-02-27T15:54:48 | 243,359,934 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 311 |
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.TOP/Sun-ExtA_12/udhr_Latn.TOP_Sun-ExtA_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"[email protected]"
] | |
4745b8d827dfe75a35c8fa2d314cc3b102d77917
|
8edd8241d25612081ec6ae0b83064c25e372f09a
|
/backend/test_r1_dev_9459/settings.py
|
540afcd60e77936a6747c534bc24915f4302c98d
|
[] |
no_license
|
crowdbotics-apps/test-r1-dev-9459
|
89e7c07601c3b99b22bb3af69b05adeee5fd7eb1
|
d68ea32a223823a1f7822ad65b63490e09c13bac
|
refs/heads/master
| 2022-12-14T20:55:42.958720 | 2020-08-27T13:35:57 | 2020-08-27T13:35:57 | 290,593,518 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,836 |
py
|
"""
Django settings for test_r1_dev_9459 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "test_r1_dev_9459.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "test_r1_dev_9459.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"[email protected]"
] | |
9e8c98d739ff55d5f3cdf1c3ed99ca911570979e
|
7b750c5c9df2fb05e92b16a43767c444404de7ae
|
/src/leetcode/python3/leetcode5.py
|
857e394520e476e114ce1ce401e547ae69f40be6
|
[] |
no_license
|
renaissance-codes/leetcode
|
a68c0203fe4f006fa250122614079adfe6582d78
|
de6db120a1e709809d26e3e317c66612e681fb70
|
refs/heads/master
| 2022-08-18T15:05:19.622014 | 2022-08-05T03:34:01 | 2022-08-05T03:34:01 | 200,180,049 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,032 |
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
寻找最长回文字符串
"""
# 比较朴素的思路,使用额外空间存储短字符串是否是回文字符串,时间5968ms, 效率不够高
class Solution:
def longestPalindrome(self, s: str) -> str:
if len(s) < 2:
return s
s_metric = [[1 if i == j else 0 for j in range(len(s))] for i in range(len(s))]
longest_s = s[0]
longest_len = 1
while len(s) - longest_len:
for i in range(len(s) - longest_len):
if longest_len == 1:
if s[i] == s[i + longest_len]:
s_metric[i][i + longest_len] = 1
longest_s = s[i:i + longest_len + 1]
else:
if s_metric[i + 1][i + longest_len - 1] and s[i] == s[i + longest_len]:
s_metric[i][i + longest_len] = 1
longest_s = s[i:i + longest_len + 1]
longest_len += 1
return longest_s
|
[
"[email protected]"
] | |
99925636614be0b001f3dc702e26454b535d8fdd
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/nWtgKSNGQ3sB52rQ8_14.py
|
20a0ee77246aa2d2795342055f6e8c705f8ad112
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 120 |
py
|
def evenly_divisible(a, b, c):
sum = 0
for i in range(a,b+1):
if i % c == 0:
sum = sum + i
return sum
|
[
"[email protected]"
] | |
b7b2f98d94eecd10c62d5ce1a1476589118c05b1
|
c36679186f669c6e3bd1c106c96d4a17be1f5ab1
|
/Practice_Anisul/290.py
|
c2e4ee4e68883a91ecbbd0a0b95aa42bad8a765f
|
[] |
no_license
|
touhiduzzaman-tuhin/python-code-university-life
|
60a3d671b200a6f5222c6d176c13c5f20f013509
|
6d2e3d90d430faa5c83fe79e7fb1ebe516994762
|
refs/heads/master
| 2023-03-22T15:18:10.636203 | 2021-03-06T18:52:04 | 2021-03-06T18:52:04 | 332,467,190 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 100 |
py
|
import re
p = r"a{1,3}$"
if re.match(p, "aaa"):
print("Match")
else:
print("Not Match")
|
[
"[email protected]"
] | |
5f518d5a2d3485884b423ab1d9f6a7b2e6acd87b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03200/s568843924.py
|
40d7ef36da7f147dcae37fe4fd5f54e43d59e969
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 261 |
py
|
s = list(input())
len_s = len(s)
len_b = s.count('B')
b_index_list = []
for i in range(len_s):
if s[i] == 'B':
b_index_list.append(i)
b_index_list.reverse()
l0 = len_s - 1
cnt = 0
for b in b_index_list:
cnt += l0 - b
l0 -= 1
print(cnt)
|
[
"[email protected]"
] | |
7dd7415ee5474dea20b15224b3b981dc2bb0b6cc
|
33f32d78087491e989289c46e5d2df5400e23946
|
/leetcode/Unsorted_Algorthm_Problems/Split_a_String_in_Balanced_Strings.py
|
f69cac1b2923e15b2c29e2164c5e53af3d96043f
|
[] |
no_license
|
xulleon/algorithm
|
1b421989423640a44339e6edb21c054b6eb47a30
|
b1f93854006a9b1e1afa4aadf80006551d492f8a
|
refs/heads/master
| 2022-10-08T19:54:18.123628 | 2022-09-29T05:05:23 | 2022-09-29T05:05:23 | 146,042,161 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 410 |
py
|
# https://leetcode.com/problems/split-a-string-in-balanced-strings/
class Solution:
def balancedStringSplit(self, s: str) -> int:
# variable labled assume start with R. RLLLLRRRLR
count, l, r, = 0, 0, 0
for char in s:
if char == 'R':
r += 1
else:
l += 1
if r == l:
count += 1
return count
|
[
"[email protected]"
] | |
e17020abef3c21e15e8849965d0e461d1633248a
|
ffcd795f30483a19d2717f08b1aaf59a7fd4fd7e
|
/Math Quiz.py
|
6f82a81c20f6e0ab747e6c4a7bd8755010a2179d
|
[] |
no_license
|
Botany-Downs-Secondary-College/mathsquiz-simonbargh
|
5791b4810790128878e7cd28678c3d4af3beb07d
|
d5aba85e9d522248827301130976fe5d5a45e11a
|
refs/heads/main
| 2023-03-13T18:05:25.291569 | 2021-02-22T21:28:58 | 2021-02-22T21:28:58 | 337,539,960 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,402 |
py
|
from tkinter import *
from tkinter import ttk
import random
import time
class Welcome:
def __init__(self, parent):
def Questions():
# Checking whether the user's entry details meet the requirements
'''def show_questions():
try:
if self.NameEntry.get() == "":
self.EntryErrorLabel.configure(text = "Enter your name.")
self.NameEntry.focus()
except ValueError:
self.EntryErrorLabel.configure(text = "Enter your age as a number.")
self.AgeEntry.delete(0, END)
self.AgeEntry.focus()'''
if len(self.NameEntry.get()) >= 1:
if len(self.AgeEntry.get()) >= 1: # and self.AgeEntry.get() is not int:
if clicked.get() == "Easy" or clicked.get() == "Medium" or clicked.get() == "Hard":
frames = Quiz(root)
self.Welcome.grid_forget()
else:
self.EntryErrorLabel.configure(text = "Choose a difficulty level.")
else:
self.EntryErrorLabel.configure(text = "Enter your age.")
else:
self.EntryErrorLabel.configure(text = "Enter your name.")
# Welcome Frame
self.Welcome = Frame(parent)
self.Welcome.grid(row = 0, column = 0)
self.TitleLabel = Label(self.Welcome, text = "Welcome to Maths Quiz!", bg = "lightblue", fg = "blue", width = 24, padx = 30, pady = 10, font = ("Time", "12", "italic", "bold"))
self.TitleLabel.grid(columnspan = 2)
self.NextButton = ttk.Button(self.Welcome, text = "Next", command = Questions)
self.NextButton.grid(row = 5, column = 1, pady = 10)
# Name Label
self.NameLabel = Label(self.Welcome, text = "Name", anchor = W, fg = "black", width = 10, padx = 30, pady = 10, font = ("Time", "12", "bold"))
self.NameLabel.grid(row = 2, column = 0)
# Age Label
self.AgeLabel = Label(self.Welcome, text = "Age", anchor = W, fg = "black", width = 10, padx = 30, pady = 10, font = ("Time", "12", "bold"))
self.AgeLabel.grid(row = 3, column = 0)
# Name Entry
self.NameEntry = ttk.Entry(self.Welcome, width = 20)
self.NameEntry.grid(row = 2, column = 1, columnspan = 2)
# Age Entry
self.AgeEntry = ttk.Entry(self.Welcome, width = 20)
self.AgeEntry.grid(row = 3, column = 1, columnspan = 2)
# Difficulty Level
self.DifficultyLabel = Label(self.Welcome, text = "Difficulty Level", anchor = W, fg = "black", width = 10, padx = 30, pady = 10, font = ("Time", "12", "bold"))
self.DifficultyLabel.grid(row = 4, column = 0)
# Difficulty Options
options = ["Easy", "Medium", "Hard"]
clicked = StringVar()
clicked.set("Select an Option")
diff_level = OptionMenu(self.Welcome, clicked, *options)
diff_level.grid(row = 4, column = 1)
# Warning Error Label
self.EntryErrorLabel = Label(self.Welcome, text = "", fg = "red", width = 10, padx = 50, pady = 10)
self.EntryErrorLabel.grid(row = 6, column = 0, columnspan = 2)
class Quiz:
def __init__(self, parent):
def Welcome_Page():
frames = Welcome(root)
self.Quiz.grid_forget()
# Quiz Frame
self.Quiz = Frame(parent)
self.Quiz.grid(row = 0, column = 0)
self.TitleLabel = Label(self.Quiz, text = "Questions", bg = "lightblue", fg = "black", width = 20, padx = 30, pady = 10)
self.TitleLabel.grid(columnspan = 2)
self.BackButton = ttk.Button(self.Quiz, text = "Back", command = Welcome_Page)
self.BackButton.grid(row = 8, column = 1, pady = 10)
if __name__ == "__main__":
root = Tk()
frames = Welcome(root)
root.title("Quiz")
root.mainloop()
|
[
"[email protected]"
] | |
f81c28130549573707b4356a568985697bca6482
|
98df3e98230d74d036cb86b6a7fa7c1b83444f67
|
/vertical_cell_decomposition.py
|
697678b64e95a091e3ae3b7bc5d5cd21cb10961a
|
[] |
no_license
|
lilyhoanghg/path-planning
|
c4e63182a108a817ef88ca257e62eb1431b1f464
|
191634124cde8f2f44db9bcbd0b73257562255dc
|
refs/heads/master
| 2021-08-18T21:53:28.421037 | 2017-11-24T01:53:54 | 2017-11-24T01:53:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 17,833 |
py
|
# Find a path avoiding obstacles using Vertical Cell Decomposition
# Author -- Shikhar Dev Gupta
import sys
from helpers.graph import *
from helpers.geometry import *;
import matplotlib.pyplot as plt
# Check for empty lines
file_handler = open("input_file","r");
raw_data = file_handler.read();
raw_data = raw_data.split("\n");
if(len(raw_data) <2):
print("Incorrect format of the input file");
exit;
def parse_input_line(line):
temp2 = [];
line = [i.strip() for i in line.split(",")];
vertex = [];
for index,i in enumerate(line):
if(i[0] == "("):
i = i[1:];
if(i[len(i)-1] == ")"):
i= i[:-1];
vertex.append(int(i));
if(index%2 != 0):
temp2.append(vertex);
vertex = [];
return temp2;
# Draw the obstacles and point the source and the destination----------------------------------------------
def draw_problem():
bnd_x = [i.x for i in boundary];
bnd_x.append(boundary[0].x);
bnd_y = [i.y for i in boundary];
bnd_y.append(boundary[0].y);
poly_x = [];
poly_y = []
# Draw the boundary
plt.plot(bnd_x, bnd_y);
for index, i in enumerate(obstacles):
poly_x.append([p[0] for p in i]);
poly_y.append([p[1] for p in i]);
plt.fill( poly_x[index], poly_y[index], color="#512DA8");
plt.plot(source.x, source.y, marker="o");
plt.plot(dest.x, dest.y, marker="o");
plt.annotate('Source', xy=(source.x, source.y), xytext=(source.x+5, source.y-6) );
plt.annotate('Destination', xy=(dest.x, dest.y), xytext=(dest.x-4, dest.y-10) );
# Extract vertices----------------------------------------------
temp = parse_input_line(raw_data[0]);
boundary = [point(i[0], i[1]) for i in temp];
# Extract source and dest
temp = parse_input_line(raw_data[len(raw_data)-1]);
source = point(temp[0][0], temp[0][1]);
dest = point(temp[1][0], temp[1][1]);
# Extract obstacles
obstacles = [];
for i in raw_data[1:len(raw_data)-1]:
obstacles.append(parse_input_line(i) );
#sort by x-values
sorted_vertices = [];
for index,i in enumerate(obstacles):
for j in i:
j.append(index);
sorted_vertices.append(j);
sorted_vertices.sort(key=lambda x: x[0]);
# Draw the problem
draw_problem();
new_sorted_vertices = [];
for i in sorted_vertices:
temp = point(i[0], i[1], i[2]);
new_sorted_vertices.append(temp);
new_obstacles = [];
for index, i in enumerate(obstacles):
temp_obs = [];
for j in i:
temp = point(j[0], j[1], index);
temp_obs.append(temp);
new_obstacles.append(temp_obs);
#-----------------------------------------------------------
# Find vertical lines
open_line_segments = [];
y_limit_lower = boundary[0].y;
y_limit_upper = boundary[2].y;
for pt in new_sorted_vertices:
curr_line_segment = [ point(pt.x, y_limit_lower), point(pt.x, y_limit_upper) ];
lower_obs_pt = curr_line_segment[0];
upper_obs_pt = curr_line_segment[1];
upper_gone = False;
lower_gone = False;
break_now = False;
# Find intersection points with the vertical proposed lines. the intersection function returns false if segments are same, so no need to worry about same segment checking
for index,obs in enumerate(new_obstacles):
# Add the first point again for the last line segment of a polygon.
obs.append( obs[0] );
for vertex_index in range(len(obs)-1 ):
res = segment_intersection( curr_line_segment[0], curr_line_segment[1], obs[vertex_index], obs[vertex_index+1]);
if (res!=-1):
if ( index == pt.obstacle ):
if pt.equals( res ) == False:
if ( res.y > pt.y ):
upper_gone = True;
elif ( res.y < pt.y ):
lower_gone = True;
else:
if pt.equals( res ) == False:
if ( upper_gone is False ):
if ( (res.y > pt.y) and res.y < (upper_obs_pt.y) ):
upper_obs_pt = res;
if ( lower_gone is False ):
if ( (res.y < pt.y) and (res.y > lower_obs_pt.y) ):
lower_obs_pt = res;
if( upper_gone is True and lower_gone is True ):
break_now = True;
#No need to check for current point anymore...completely blocked
if(break_now is True):
break;
# Draw the vertical cell lines
if(lower_gone is False):
plt.plot( [lower_obs_pt.x, pt.x], [lower_obs_pt.y, pt.y] );
if(upper_gone is False):
plt.plot( [pt.x, upper_obs_pt.x], [pt.y, upper_obs_pt.y] );
# Add to the global segment list
if (lower_gone and upper_gone):
open_line_segments.append([None, None]);
elif (lower_gone):
open_line_segments.append([None, upper_obs_pt]);
elif (upper_gone):
open_line_segments.append([lower_obs_pt, None]);
else:
open_line_segments.append([lower_obs_pt, upper_obs_pt]);
#------------------------------------------------------
# Find Polygon cells naiively. Will improve next.
cells = [];
for index1 in range(len(open_line_segments) ):
curr_segment = open_line_segments[index1];
curr_vertex = new_sorted_vertices[index1];
break_now = False;
done = [False, False, True];
if( curr_segment[0] is None ):
done[0] = True;
if( curr_segment[1] is None ):
done[1] = True;
if( curr_segment[1] is None and open_line_segments[index1][0] is None):
done[2] = False;
for index2 in range(index1+1, len(open_line_segments) ):
next_segment = open_line_segments[index2];
next_vertex = new_sorted_vertices[index2];
double_index1 = -2;
double_index2 = -2;
lines_to_check = [];
trapezoids = [];
double_check = False;
if ( next_segment[0] is not None and next_segment[1] is not None ):
double_check = True;
if( done[0] is False ):
if( double_check ):
double_index1 = len(lines_to_check);
lines_to_check.append( [centroid([curr_segment[0], curr_vertex]), centroid([next_segment[0], next_vertex]), 0]);
lines_to_check.append( [centroid([curr_segment[0], curr_vertex]), centroid([next_segment[1], next_vertex]), 0]);
trapezoids.append([ curr_segment[0], next_segment[0], next_vertex, curr_vertex ]);
trapezoids.append([ curr_segment[0], next_vertex, next_segment[1], curr_vertex ]);
elif ( next_segment[0] is not None ):
lines_to_check.append( [centroid([curr_segment[0], curr_vertex]), centroid([next_segment[0], next_vertex]), 0]);
trapezoids.append([ curr_segment[0], next_segment[0], next_vertex, curr_vertex ]);
elif( next_segment[1] is not None ):
lines_to_check.append( [centroid([curr_segment[0], curr_vertex]), centroid([next_segment[1], next_vertex]), 0]);
trapezoids.append([ curr_segment[0], next_vertex, next_segment[1], curr_vertex ]);
else:
lines_to_check.append( [centroid([curr_segment[0], curr_vertex]), next_vertex, 0]);
trapezoids.append([ curr_segment[0], next_vertex, curr_vertex ]);
if( done[1] is False ):
if( double_check ):
double_index2 = len(lines_to_check);
lines_to_check.append( [centroid([curr_segment[1], curr_vertex]), centroid([next_segment[0], next_vertex]), 1]);
lines_to_check.append( [centroid([curr_segment[1], curr_vertex]), centroid([next_segment[1], next_vertex]), 1]);
trapezoids.append([ curr_vertex, next_segment[0], next_vertex , point(curr_segment[1].x, curr_segment[1].y,curr_segment[1].obstacle, 34)]);
trapezoids.append([ curr_vertex, next_vertex, next_segment[1], curr_segment[1] ]);
elif ( next_segment[1] is not None ):
lines_to_check.append( [centroid([curr_segment[1], curr_vertex]), centroid([next_segment[1], next_vertex]), 1]);
trapezoids.append([ curr_vertex, next_vertex, next_segment[1], curr_segment[1] ]);
elif( next_segment[0] is not None ):
lines_to_check.append( [centroid([curr_segment[1], curr_vertex]), centroid([next_segment[0], next_vertex]), 1]);
trapezoids.append([ curr_vertex, next_segment[0], next_vertex , curr_segment[1] ]);
else:
lines_to_check.append( [centroid([curr_segment[1], curr_vertex]), next_vertex, 1]);
trapezoids.append([ curr_vertex, next_vertex, curr_segment[1] ]);
if( done[2] is False ):
if(double_check):
double_index = len(lines_to_check);
lines_to_check.append( [curr_vertex, centroid([next_segment[0], next_vertex]), 2]);
trapezoids.append([ curr_vertex,next_segment[0], next_vertex ]);
lines_to_check.append( [curr_vertex, centroid([next_segment[1], next_vertex]), 2]);
trapezoids.append([ curr_vertex, next_vertex, next_segment[1] ]);
elif ( next_segment[0] is not None ):
lines_to_check.append( [curr_vertex, centroid([next_segment[0], next_vertex]), 2]);
trapezoids.append([ curr_vertex,next_segment[0], next_vertex ]);
elif( next_segment[1] is not None ):
lines_to_check.append( [curr_vertex, centroid([next_segment[1], next_vertex]), 2]);
trapezoids.append([ curr_vertex, next_vertex, next_segment[1] ]);
# Will this ever occur though??
else:
lines_to_check.append( [curr_vertex, next_vertex, 2]);
trapezoids.append([curr_vertex, next_vertex]);
temp_to_remove = [];
for index5,q in enumerate(lines_to_check):
ok = [True, True, True];
for index3,obs in enumerate(new_obstacles):
# Add the last line to make closed polygon
obs.append( obs[0] );
for index4 in range(len(obs)-1):
if (segment_intersection( q[0], q[1], obs[index4], obs[index4+1]) != -1):
ok[q[2]] = False;
if(index5 not in temp_to_remove):
temp_to_remove.append(index5);
if ( ok[q[2]] is True ):
done[q[2]] = True;
for i in range(len(lines_to_check)):
if i not in temp_to_remove:
cells.append(trapezoids[i]);
if( done[0] == True and done[1] == True and done[2] == True ):
break;
to_draw =[];
for i in cells:
i.append(i[0]);
to_draw.append(i);
#-------------------------------------------------------
# Merge overlapping Polygons
quad_cells = [i for i in cells if len(i)>3];
tri_cells = [i for i in cells if len(i)==3];
others = [i for i in cells if len(i)<3];
quads_to_remove = [];
quads_to_add = [];
quads_to_remove = [];
quads_to_add = [];
for index_cell in range(len(quad_cells)):
for index_cell2,cell in enumerate(quad_cells):
if(index_cell != index_cell2):
if(quad_cells[index_cell][0].x == cell[0].x and quad_cells[index_cell][1].x == cell[1].x):
temp1 = list(quad_cells[index_cell]);
temp1.append(temp1[0]);
temp2 = list(cell);
temp2.append(temp2[0]);
area1 = polygon_area(temp1,4); area2 = polygon_area(temp2,4);
new_quad=[];
new_quad.append( point(temp1[0].x, min(temp1[0].y, temp2[0].y)) );
new_quad.append( point(temp1[1].x, min(temp1[1].y, temp2[1].y)) );
new_quad.append( point(temp1[1].x, max(temp1[2].y, temp2[2].y)) );
new_quad.append( point(temp1[0].x, max(temp1[3].y, temp2[3].y)) );
new_quad.append( point(temp1[0].x, min(temp1[0].y, temp2[0].y)) );
area3 = polygon_area(new_quad, 4);
if( area1 + area2 >= area3):
#merge
quads_to_remove.append(index_cell);
quads_to_remove.append(index_cell2);
quads_to_add.append(new_quad);
quads_to_remove = list(set(quads_to_remove));
for index in sorted(quads_to_remove, reverse=True):
del quad_cells[index];
for i in quads_to_add:
quad_cells.append(i);
# Remove duplicates
to_remove = [];
for index1 in range(len(quad_cells)):
for index2 in range(index1+1, len(quad_cells)):
duplicate = True;
for k,m in zip(quad_cells[index1], quad_cells[index2]):
if k.equals(m) is False:
duplicate = False;
break;
if(duplicate is True):
if index2 not in to_remove:
to_remove.append(index2);
for index in sorted(to_remove, reverse=True):
del quad_cells[index];
# One more pass to remove extra quads generated because of cross - segments
quads_to_remove = [];
for index1 in range(len(quad_cells)):
for index2 in range(len(quad_cells)):
if(index1 != index2 and quad_cells[index1][0].x == quad_cells[index2][0].x and quad_cells[index1][1].x == quad_cells[index2][1].x):
if( (quad_cells[index1][0].y<= quad_cells[index2][0].y) and (quad_cells[index1][1].y<= quad_cells[index2][1].y)
and (quad_cells[index1][2].y>= quad_cells[index2][2].y) and (quad_cells[index1][3].y >= quad_cells[index2][3].y)):
quads_to_remove.append(index2);
quads_to_remove = list(set(quads_to_remove) );
for index in sorted(quads_to_remove, reverse=True):
del quad_cells[index];
#------------------------------------------------------
# Add boundary lines
if( boundary[0].x != new_sorted_vertices[0].x):
quad_cells.append([boundary[0], point(new_sorted_vertices[0].x, y_limit_lower), point(new_sorted_vertices[0].x, y_limit_upper), boundary[3]]);
if( boundary[1].x != new_sorted_vertices[len(new_sorted_vertices)-1].x):
quad_cells.append([point(new_sorted_vertices[len(new_sorted_vertices)-1].x ,y_limit_lower), boundary[1], boundary[2], point(new_sorted_vertices[len(new_sorted_vertices)-1].x, y_limit_upper) ]);
#-------------------------------------------------------
# Plot final cells
to_draw = quad_cells+tri_cells+others;
for i in to_draw:
x = [j.x for j in i];
y = [j.y for j in i];
plt.plot(x, y);
#----------------------------------------------------------------------
# Get the graph
graph_vertices = [];
graph_edges = [];
for index1 in range(len(quad_cells)):
same_boundary = [];
for index2 in range(len(quad_cells)):
if(index1 != index2):
if( (quad_cells[index1][1].x == quad_cells[index2][0].x ) and ((quad_cells[index1][2].y in [quad_cells[index2][0].y, quad_cells[index2][3].y]) or (quad_cells[index1][1].y in [quad_cells[index2][0].y, quad_cells[index2][3].y]) ) ):
same_boundary.append(index2);
temp = quad_cells[index1][0:4];
centroid_vertex = centroid(temp);
place = centroid_vertex.find_point(graph_vertices)
if( place == -1):
graph_vertices.append(centroid_vertex);
if(len(same_boundary)==1):
temp_edge_middle = centroid([quad_cells[index1][1], quad_cells[index1][2]]);
graph_vertices.append(temp_edge_middle);
n = len(graph_vertices)-1;
if(place != -1):
graph_edges.append([place, n]);
else:
graph_edges.append([n-1, n]);
temp = quad_cells[same_boundary[0]][0:4];
curr_centroid_vertex = centroid(temp);
place2 = curr_centroid_vertex.find_point(graph_vertices);
if( place2 == -1 ):
graph_vertices.append(curr_centroid_vertex);
graph_edges.append([n, n+1]);
else:
graph_edges.append([n, place2]);
elif(len(same_boundary)>1):
n = len(graph_vertices)-1;
if(place != -1):
use = place;
else:
use = n;
for index, i in enumerate(same_boundary):
temp = quad_cells[i][0:4];
curr_centroid_vertex = centroid(temp);
temp_edge_middle = centroid([quad_cells[i][0], quad_cells[i][3]]);
graph_vertices.append(temp_edge_middle);
pl1 =len(graph_vertices)-1;
hmmm= curr_centroid_vertex.find_point(graph_vertices);
if (hmmm == -1):
graph_vertices.append(curr_centroid_vertex);
pl2 =len(graph_vertices)-1;
else:
pl2 = hmmm;
graph_edges.append([use, pl1]);
graph_edges.append([pl1, pl2]);
# Add source and dest to graph
# Find the smallest distance vertex on graph and see if its clear to traverse
# Source------------------------------
min_ind = -1; min = 9999999;
for index, i in enumerate(graph_vertices):
if( check_obstruction(new_obstacles, [source, i]) is True ):
dist = find_dist(i, source);
if( dist < min):
min = dist;
min_ind = index;
graph_vertices.append(source);
m = len(graph_vertices)-1;
graph_edges.append([min_ind, m]);
# Destination------------------------------------
min_ind = -1; min = 9999999;
for index, i in enumerate(graph_vertices):
if( check_obstruction(new_obstacles, [dest, i]) is True ):
dist = find_dist(i, dest);
if( dist < min):
min = dist;
min_ind = index;
graph_vertices.append(dest);
m = len(graph_vertices)-1;
graph_edges.append([min_ind, m]);
# Convert graph in adjacency list format
graph = [];
for j in range(len(graph_vertices)):
graph.append([]);
for i in graph_edges:
if(i[0]==j):
graph[j].append(i[1]);
elif(i[1]==j):
graph[j].append(i[0]);
path = bfs(graph, len(graph_vertices)-2, len(graph_vertices)-1);
if(path is None):
print "No path found. Sorry";
sys.exit();
else:
print "Path found." ;
# Draw everything--------------
for index,i in enumerate(graph_vertices):
plt.annotate(str(index), xy=(i.x, i.y), xytext=(i.x+2, i.y-2) );
# plt.plot(i.x,i.y, marker="x");
for i in graph_edges:
temp_x = [graph_vertices[i[0]].x, graph_vertices[i[1]].x];
temp_y = [graph_vertices[i[0]].y, graph_vertices[i[1]].y];
plt.plot(temp_x,temp_y);
# draw path
temp_x = [graph_vertices[i].x for i in path];
temp_y = [graph_vertices[i].y for i in path];
plt.plot(temp_x,temp_y, color="#0F0F0F", linewidth=2);
#----------------------------------------------------
# output into a file
file_output = open("vertical_cell_output", "w" );
str_to_write = "";
for index in range(len(graph_vertices)):
str_to_write = str_to_write + ", "+str(index)+":"+"("+ str(int(graph_vertices[index].x) )+ ", "+ str(int(graph_vertices[index].y) ) + ")";
str_to_write = str_to_write[1:];
total_write = str_to_write+"\n";
str_to_write="";
for i in graph:
if (i == []):
continue;
str_to_write = str_to_write + ",(";
for j in i:
str_to_write = str_to_write + str(j) + ",";
str_to_write = str_to_write[:-1];
str_to_write = str_to_write + ")";
str_to_write = str_to_write[1:];
total_write = total_write+ str_to_write + "\n";
str_to_write = "";
str_to_write =','.join(str(x) for x in path);
total_write = total_write + str_to_write;
file_output.write(total_write);
print "Output written to file.. Drawing the result";
plt.show();
|
[
"="
] |
=
|
8221804a8b71f27558952a6fff2ea180d901387e
|
0e1a0329e1b96405d3ba8426fd4f935aa4d8b04b
|
/scraper/merge.py
|
15c94f3e038d5c181e3f5898d9c5efcb34e92473
|
[] |
no_license
|
ugik/Blitz
|
6e3623a4a03309e33dcc0b312800e8cadc26d28c
|
740f65ecaab86567df31d6a0055867be193afc3d
|
refs/heads/master
| 2021-05-03T20:15:20.516014 | 2015-03-11T12:33:34 | 2015-03-11T12:33:34 | 25,015,963 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,418 |
py
|
import xlrd, xlwt
import glob, os.path
def merge_xls (in_dir="./", out_file="merged_output.xls"):
xls_files = glob.glob(in_dir + "*.xls")
sheet_names = [os.path.basename(v)[:-4] for v in xls_files]
sheet_excl = [os.path.basename(v)[:-4] for v in xls_files if
len(os.path.basename(v)[:-4]) > 29]
merged_book = xlwt.Workbook()
if in_dir[-1:] != "/": in_dir = in_dir + "/"
xls_files.sort()
if xls_files:
for k, xls_file in enumerate(xls_files):
print "---> Processing file %s" % (xls_file)
if len (sheet_names[k]) <= 29:
book = xlrd.open_workbook(xls_file)
if book.nsheets == 1:
ws = merged_book.add_sheet(sheet_names[k])
sheet = book.sheet_by_index(0)
for rx in range(sheet.nrows):
for cx in range(sheet.ncols):
ws.write(rx, cx, sheet.cell_value(rx, cx))
elif book.nsheets in range(2, 100):
for sheetx in range(book.nsheets):
sheet0n = sheet_names[k]+str(sheetx+1).zfill(2)
ws = merged_book.add_sheet(sheet0n)
sheet = book.sheet_by_index(sheetx)
for rx in range(sheet.nrows):
for cx in range(sheet.ncols):
ws.write(rx, cx, sheet.cell_value(rx, cx))
else:
print "ERROR *** File %s has %s sheets (maximum is 99)"
% (xls_file, book.nsheets)
raise
else:
print "WARNING *** File name too long: <%s.xls> (maximum is
29 chars) " % (sheet_names[k])
print "WARNING *** File <%s.xls> was skipped." %
(sheet_names[k])
merged_book.save(out_file)
print
print "---> Merged xls file written to %s using the following source
files: " % (out_file)
for k, v in enumerate(sheet_names):
if len(v) <= 29:
print "\t", str(k+1).zfill(3), "%s.xls" % (v)
print
if sheet_excl:
print "--> The following files were skipped because the file
name exceeds 29 characters: "
for k, v in enumerate(sheet_excl):
print "\t", str(k+1).zfill(3), v
else:
print "NOTE *** No xls files in %s. Nothing to do." % (in_dir)
|
[
"[email protected]"
] | |
65c4b8431162da40aeb8bb0e06f47b86611eb1cd
|
4855b0f5ccab56ca0bd6bd47c1b4147403263c5d
|
/musicautobot/multitask_transformer/learner.py
|
c17347153ad3f95764d05e6ae6dff7aca1e0eca3
|
[] |
no_license
|
David-D-Chen/musicautobot
|
6c88ff6d52ea7b0d777b71a59dff88fc8a21bfa7
|
fd8145a20a070ec3aa20f8cc74fc38c0b6896a53
|
refs/heads/master
| 2020-07-13T01:58:50.092878 | 2019-08-28T17:29:59 | 2019-08-28T17:29:59 | 204,963,031 | 0 | 0 | null | 2019-08-28T16:22:11 | 2019-08-28T15:22:11 | null |
UTF-8
|
Python
| false | false | 13,689 |
py
|
from fastai.basics import *
from ..vocab import *
from ..utils.top_k_top_p import top_k_top_p
from ..utils.midifile import is_empty_midi
from ..music_transformer.transform import *
from ..music_transformer.learner import filter_invalid_indexes
from .model import get_multitask_model
from .dataloader import *
def multitask_model_learner(data:DataBunch, config:dict=None, drop_mult:float=1.,
pretrained_path:PathOrStr=None, **learn_kwargs) -> 'LanguageLearner':
"Create a `Learner` with a language model from `data` and `arch`."
vocab = data.vocab
vocab_size = len(vocab)
model = get_multitask_model(vocab_size, config=config, drop_mult=drop_mult, pad_idx=vocab.pad_idx)
metrics = [AverageMultiMetric(partial(m, pad_idx=vocab.pad_idx)) for m in [mask_acc, lm_acc, c2m_acc, m2c_acc]]
loss_func = MultiLoss(ignore_index=data.vocab.pad_idx)
learn = MultitaskLearner(data, model, loss_func=loss_func, metrics=metrics, **learn_kwargs)
if pretrained_path:
state = torch.load(pretrained_path, map_location='cpu')
get_model(model).load_state_dict(state['model'], strict=False)
return learn
class MultitaskLearner(Learner):
def predict_nw(self, item:MusicItem, n_words:int=128,
temperatures:float=(1.0,1.0), min_bars=4,
top_k=30, top_p=0.6):
"Return the `n_words` that come after `text`."
self.model.reset()
new_idx = []
vocab = self.data.vocab
x, pos = item.to_tensor(), item.get_pos_tensor()
last_pos = pos[-1] if len(pos) else 0
y = torch.tensor([0])
start_pos = last_pos
sep_count = 0
bar_len = SAMPLE_FREQ * 4 # assuming 4/4 time
vocab = self.data.vocab
repeat_count = 0
for i in progress_bar(range(n_words), leave=True):
batch = { 'lm': { 'x': x[None], 'pos': pos[None] } }, y
logits = self.pred_batch(batch=batch)['lm'][-1][-1]
prev_idx = new_idx[-1] if len(new_idx) else vocab.pad_idx
# Temperature
# Use first temperatures value if last prediction was duration
temperature = temperatures[0] if vocab.is_duration_or_pad(prev_idx) else temperatures[1]
repeat_penalty = max(0, np.log(repeat_count/4)/5) * temperature
temperature += repeat_penalty
if temperature != 1.: logits = logits / temperature
# Filter
# bar = 16 beats
filter_value = -float('Inf')
if ((last_pos - start_pos) // 16) <= min_bars: logits[vocab.bos_idx] = filter_value
logits = filter_invalid_indexes(logits, prev_idx, vocab, filter_value=filter_value)
logits = top_k_top_p(logits, top_k=top_k, top_p=top_p, filter_value=filter_value)
# Sample
probs = F.softmax(logits, dim=-1)
idx = torch.multinomial(probs, 1).item()
# Update repeat count
num_choices = len(probs.nonzero().view(-1))
if num_choices <= 2: repeat_count += 1
else: repeat_count = 0
if prev_idx==vocab.sep_idx:
duration = idx - vocab.dur_range[0]
last_pos = last_pos + duration
bars_pred = (last_pos - start_pos) // 16
abs_bar = last_pos // 16
# if (bars % 8 == 0) and (bars_pred > min_bars): break
if (i / n_words > 0.80) and (abs_bar % 4 == 0): break
if idx==vocab.bos_idx:
print('Predicted BOS token. Returning prediction...')
break
new_idx.append(idx)
x = x.new_tensor([idx])
pos = pos.new_tensor([last_pos])
pred = vocab.to_music_item(np.array(new_idx))
full = item.append(pred)
return pred, full
def predict_mask(self, masked_item:MusicItem,
temperatures:float=(1.0,1.0),
top_k=20, top_p=0.8):
x = masked_item.to_tensor()
pos = masked_item.get_pos_tensor()
y = torch.tensor([0])
vocab = self.data.vocab
self.model.reset()
mask_idxs = (x == vocab.mask_idx).nonzero().view(-1)
repeat_count = 0
for midx in progress_bar(mask_idxs, leave=True):
prev_idx = x[midx-1]
# Using original positions, otherwise model gets too off track
# pos = torch.tensor(-position_enc(xb[0].cpu().numpy()), device=xb.device)[None]
# Next Word
logits = self.pred_batch(batch=({ 'msk': { 'x': x[None], 'pos': pos[None] } }, y) )['msk'][0][midx]
# Temperature
# Use first temperatures value if last prediction was duration
temperature = temperatures[0] if vocab.is_duration_or_pad(prev_idx) else temperatures[1]
repeat_penalty = max(0, np.log(repeat_count/4)/5) * temperature
temperature += repeat_penalty
if temperature != 1.: logits = logits / temperature
# Filter
filter_value = -float('Inf')
special_idxs = [vocab.bos_idx, vocab.sep_idx, vocab.stoi[EOS]]
logits[special_idxs] = filter_value # Don't allow any special tokens (as we are only removing notes and durations)
logits = filter_invalid_indexes(logits, prev_idx, vocab, filter_value=filter_value)
logits = top_k_top_p(logits, top_k=top_k, top_p=top_p, filter_value=filter_value)
# Sampling
probs = F.softmax(logits, dim=-1)
idx = torch.multinomial(probs, 1).item()
# Update repeat count
num_choices = len(probs.nonzero().view(-1))
if num_choices <= 2: repeat_count += 1
else: repeat_count = 0
x[midx] = idx
return vocab.to_music_item(x.cpu().numpy())
def predict_s2s(self, input_item:MusicItem, target_item:MusicItem, n_words:int=256,
temperatures:float=(1.0,1.0), top_k=30, top_p=0.8,
use_memory=True):
vocab = self.data.vocab
# Input doesn't change. We can reuse the encoder output on each prediction
with torch.no_grad():
inp, inp_pos = input_item.to_tensor(), input_item.get_pos_tensor()
x_enc = self.model.encoder(inp[None], inp_pos[None])
# target
targ = target_item.data.tolist()
targ_pos = target_item.position.tolist()
last_pos = targ_pos[-1]
self.model.reset()
repeat_count = 0
max_pos = input_item.position[-1] + SAMPLE_FREQ * 4 # Only predict until both tracks/parts have the same length
x, pos = inp.new_tensor(targ), inp_pos.new_tensor(targ_pos)
for i in progress_bar(range(n_words), leave=True):
# Predict
with torch.no_grad():
dec = self.model.decoder(x[None], pos[None], x_enc)
logits = self.model.head(dec)[-1, -1]
# Temperature
# Use first temperatures value if last prediction was duration
prev_idx = targ[-1] if len(targ) else vocab.pad_idx
temperature = temperatures[0] if vocab.is_duration_or_pad(prev_idx) else temperatures[1]
repeat_penalty = max(0, np.log(repeat_count/4)/5) * temperature
temperature += repeat_penalty
if temperature != 1.: logits = logits / temperature
# Filter
filter_value = -float('Inf')
logits = filter_invalid_indexes(logits, prev_idx, vocab, filter_value=filter_value)
logits = top_k_top_p(logits, top_k=top_k, top_p=top_p, filter_value=filter_value)
# Sample
probs = F.softmax(logits, dim=-1)
idx = torch.multinomial(probs, 1).item()
# Update repeat count
num_choices = len(probs.nonzero().view(-1))
if num_choices <= 2: repeat_count += 1
else: repeat_count = 0
if idx == vocab.bos_idx | idx == vocab.stoi[EOS]:
print('Predicting BOS/EOS')
break
if prev_idx == vocab.sep_idx:
duration = idx - vocab.dur_range[0]
last_pos = last_pos + duration
if last_pos > max_pos:
print('Predicted past counter-part length. Returning early')
break
targ_pos.append(last_pos)
targ.append(idx)
if use_memory:
# Relying on memory for kv. Only need last prediction index
x, pos = inp.new_tensor([targ[-1]]), inp_pos.new_tensor([targ_pos[-1]])
else:
# Reset memory after each prediction, since we feeding the whole sequence every time
self.model.reset()
x, pos = inp.new_tensor(targ), inp_pos.new_tensor(targ_pos)
return vocab.to_music_item(np.array(targ))
# High level prediction functions from midi file
def nw_predict_from_midi(learn, midi=None, n_words=400,
temperatures=(1.0,1.0), top_k=30, top_p=0.6, seed_len=None, **kwargs):
vocab = learn.data.vocab
seed = MusicItem.from_file(midi, vocab) if not is_empty_midi(midi) else MusicItem.empty(vocab)
if seed_len is not None: seed = seed.trim_to_beat(seed_len)
pred, full = learn.predict_nw(seed, n_words=n_words, temperatures=temperatures, top_k=top_k, top_p=top_p, **kwargs)
return full
def s2s_predict_from_midi(learn, midi=None, n_words=200,
temperatures=(1.0,1.0), top_k=24, top_p=0.7, seed_len=None, pred_melody=True, **kwargs):
multitrack_item = MultitrackItem.from_file(midi, learn.data.vocab)
melody, chords = multitrack_item.melody, multitrack_item.chords
inp, targ = (chords, melody) if pred_melody else (melody, chords)
# if seed_len is passed, cutoff sequence so we can predict the rest
if seed_len is not None: targ = targ.trim_to_beat(seed_len)
pred = learn.predict_s2s(inp, targ, n_words=n_words, temperatures=temperatures, top_k=top_k, top_p=top_p, **kwargs)
part_order = (pred, inp) if pred_melody else (inp, pred)
return MultitrackItem(*part_order)
def mask_predict_from_midi(learn, midi=None, predict_notes=True,
temperatures=(1.0,1.0), top_k=30, top_p=0.7, section=None, **kwargs):
item = MusicItem.from_file(midi, learn.data.vocab)
masked_item = item.mask_pitch(section) if predict_notes else item.mask_duration(section)
pred = learn.predict_mask(masked_item, temperatures=temperatures, top_k=top_k, top_p=top_p, **kwargs)
return pred
# LOSS AND METRICS
class MultiLoss():
def __init__(self, ignore_index=None):
"Loss mult - Mask, NextWord, Seq2Seq"
self.loss = CrossEntropyFlat(ignore_index=ignore_index)
def __call__(self, inputs:Dict[str,Tensor], targets:Dict[str,Tensor])->Rank0Tensor:
losses = [self.loss(inputs[key], target) for key,target in targets.items()]
return sum(losses)
def acc_ignore_pad(input:Tensor, targ:Tensor, pad_idx)->Rank0Tensor:
if input is None or targ is None: return None
n = targ.shape[0]
input = input.argmax(dim=-1).view(n,-1)
targ = targ.view(n,-1)
mask = targ != pad_idx
return (input[mask]==targ[mask]).float().mean()
def acc_index(inputs, targets, key, pad_idx):
return acc_ignore_pad(inputs.get(key), targets.get(key), pad_idx)
def mask_acc(inputs, targets, pad_idx): return acc_index(inputs, targets, 'msk', pad_idx)
def lm_acc(inputs, targets, pad_idx): return acc_index(inputs, targets, 'lm', pad_idx)
def c2m_acc(inputs, targets, pad_idx): return acc_index(inputs, targets, 'c2m', pad_idx)
def m2c_acc(inputs, targets, pad_idx): return acc_index(inputs, targets, 'm2c', pad_idx)
class AverageMultiMetric(AverageMetric):
"Updated fastai.AverageMetric to support multi task metrics."
def on_batch_end(self, last_output, last_target, **kwargs):
"Update metric computation with `last_output` and `last_target`."
if not is_listy(last_target): last_target=[last_target]
val = self.func(last_output, *last_target)
if val is None: return
self.count += first_el(last_target).size(0)
if self.world:
val = val.clone()
dist.all_reduce(val, op=dist.ReduceOp.SUM)
val /= self.world
self.val += first_el(last_target).size(0) * val.detach().cpu()
def on_epoch_end(self, last_metrics, **kwargs):
"Set the final result in `last_metrics`."
if self.count == 0: return add_metrics(last_metrics, 0)
return add_metrics(last_metrics, self.val/self.count)
# MODEL LOADING
class MTTrainer(LearnerCallback):
"`Callback` that regroups lr adjustment to seq_len, AR and TAR."
def __init__(self, learn:Learner, dataloaders=None, starting_mask_window=1):
super().__init__(learn)
self.count = 1
self.mw_start = starting_mask_window
self.dataloaders = dataloaders
def on_epoch_begin(self, **kwargs):
"Reset the hidden state of the model."
model = get_model(self.learn.model)
model.reset()
model.encoder.mask_size = max(self.count+self.mw_start, 100)
def on_epoch_end(self, last_metrics, **kwargs):
"Finish the computation and sends the result to the Recorder."
if self.dataloaders is not None:
self.learn.data = self.dataloaders[self.count % len(self.dataloaders)]
self.count += 1
|
[
"[email protected]"
] | |
518c06d15803865852c043709aede6d2df28e37c
|
b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1
|
/tensorflow/contrib/image/python/ops/sparse_image_warp.py
|
c4801ca68f029d70be33f3ba6af51d4429f5fdd9
|
[
"Apache-2.0"
] |
permissive
|
uve/tensorflow
|
e48cb29f39ed24ee27e81afd1687960682e1fbef
|
e08079463bf43e5963acc41da1f57e95603f8080
|
refs/heads/master
| 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 |
Apache-2.0
| 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null |
UTF-8
|
Python
| false | false | 8,719 |
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image warping using sparse flow defined at control points."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.image.python.ops import dense_image_warp
from tensorflow.contrib.image.python.ops import interpolate_spline
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
def _get_grid_locations(image_height, image_width):
"""Wrapper for np.meshgrid."""
y_range = np.linspace(0, image_height - 1, image_height)
x_range = np.linspace(0, image_width - 1, image_width)
y_grid, x_grid = np.meshgrid(y_range, x_range, indexing='ij')
return np.stack((y_grid, x_grid), -1)
def _expand_to_minibatch(np_array, batch_size):
"""Tile arbitrarily-sized np_array to include new batch dimension."""
tiles = [batch_size] + [1] * np_array.ndim
return np.tile(np.expand_dims(np_array, 0), tiles)
def _get_boundary_locations(image_height, image_width, num_points_per_edge):
"""Compute evenly-spaced indices along edge of image."""
y_range = np.linspace(0, image_height - 1, num_points_per_edge + 2)
x_range = np.linspace(0, image_width - 1, num_points_per_edge + 2)
ys, xs = np.meshgrid(y_range, x_range, indexing='ij')
is_boundary = np.logical_or(
np.logical_or(xs == 0, xs == image_width - 1),
np.logical_or(ys == 0, ys == image_height - 1))
return np.stack([ys[is_boundary], xs[is_boundary]], axis=-1)
def _add_zero_flow_controls_at_boundary(control_point_locations,
control_point_flows, image_height,
image_width, boundary_points_per_edge):
"""Add control points for zero-flow boundary conditions.
Augment the set of control points with extra points on the
boundary of the image that have zero flow.
Args:
control_point_locations: input control points
control_point_flows: their flows
image_height: image height
image_width: image width
boundary_points_per_edge: number of points to add in the middle of each
edge (not including the corners).
The total number of points added is
4 + 4*(boundary_points_per_edge).
Returns:
merged_control_point_locations: augmented set of control point locations
merged_control_point_flows: augmented set of control point flows
"""
batch_size = tensor_shape.dimension_value(control_point_locations.shape[0])
boundary_point_locations = _get_boundary_locations(image_height, image_width,
boundary_points_per_edge)
boundary_point_flows = np.zeros([boundary_point_locations.shape[0], 2])
type_to_use = control_point_locations.dtype
boundary_point_locations = constant_op.constant(
_expand_to_minibatch(boundary_point_locations, batch_size),
dtype=type_to_use)
boundary_point_flows = constant_op.constant(
_expand_to_minibatch(boundary_point_flows, batch_size), dtype=type_to_use)
merged_control_point_locations = array_ops.concat(
[control_point_locations, boundary_point_locations], 1)
merged_control_point_flows = array_ops.concat(
[control_point_flows, boundary_point_flows], 1)
return merged_control_point_locations, merged_control_point_flows
def sparse_image_warp(image,
source_control_point_locations,
dest_control_point_locations,
interpolation_order=2,
regularization_weight=0.0,
num_boundary_points=0,
name='sparse_image_warp'):
"""Image warping using correspondences between sparse control points.
Apply a non-linear warp to the image, where the warp is specified by
the source and destination locations of a (potentially small) number of
control points. First, we use a polyharmonic spline
(`tf.contrib.image.interpolate_spline`) to interpolate the displacements
between the corresponding control points to a dense flow field.
Then, we warp the image using this dense flow field
(`tf.contrib.image.dense_image_warp`).
Let t index our control points. For regularization_weight=0, we have:
warped_image[b, dest_control_point_locations[b, t, 0],
dest_control_point_locations[b, t, 1], :] =
image[b, source_control_point_locations[b, t, 0],
source_control_point_locations[b, t, 1], :].
For regularization_weight > 0, this condition is met approximately, since
regularized interpolation trades off smoothness of the interpolant vs.
reconstruction of the interpolant at the control points.
See `tf.contrib.image.interpolate_spline` for further documentation of the
interpolation_order and regularization_weight arguments.
Args:
image: `[batch, height, width, channels]` float `Tensor`
source_control_point_locations: `[batch, num_control_points, 2]` float
`Tensor`
dest_control_point_locations: `[batch, num_control_points, 2]` float
`Tensor`
interpolation_order: polynomial order used by the spline interpolation
regularization_weight: weight on smoothness regularizer in interpolation
num_boundary_points: How many zero-flow boundary points to include at
each image edge.Usage:
num_boundary_points=0: don't add zero-flow points
num_boundary_points=1: 4 corners of the image
num_boundary_points=2: 4 corners and one in the middle of each edge
(8 points total)
num_boundary_points=n: 4 corners and n-1 along each edge
name: A name for the operation (optional).
Note that image and offsets can be of type tf.half, tf.float32, or
tf.float64, and do not necessarily have to be the same type.
Returns:
warped_image: `[batch, height, width, channels]` float `Tensor` with same
type as input image.
flow_field: `[batch, height, width, 2]` float `Tensor` containing the dense
flow field produced by the interpolation.
"""
image = ops.convert_to_tensor(image)
source_control_point_locations = ops.convert_to_tensor(
source_control_point_locations)
dest_control_point_locations = ops.convert_to_tensor(
dest_control_point_locations)
control_point_flows = (
dest_control_point_locations - source_control_point_locations)
clamp_boundaries = num_boundary_points > 0
boundary_points_per_edge = num_boundary_points - 1
with ops.name_scope(name):
batch_size, image_height, image_width, _ = image.get_shape().as_list()
# This generates the dense locations where the interpolant
# will be evaluated.
grid_locations = _get_grid_locations(image_height, image_width)
flattened_grid_locations = np.reshape(grid_locations,
[image_height * image_width, 2])
flattened_grid_locations = constant_op.constant(
_expand_to_minibatch(flattened_grid_locations, batch_size), image.dtype)
if clamp_boundaries:
(dest_control_point_locations,
control_point_flows) = _add_zero_flow_controls_at_boundary(
dest_control_point_locations, control_point_flows, image_height,
image_width, boundary_points_per_edge)
flattened_flows = interpolate_spline.interpolate_spline(
dest_control_point_locations, control_point_flows,
flattened_grid_locations, interpolation_order, regularization_weight)
dense_flows = array_ops.reshape(flattened_flows,
[batch_size, image_height, image_width, 2])
warped_image = dense_image_warp.dense_image_warp(image, dense_flows)
return warped_image, dense_flows
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.