blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6df182f0f896addcf9738b9a20ab16600265939b | b9ed14f23d7d48ce88a93a808556cab9a0abc682 | /tensorflow_model_optimization/python/core/quantization/keras/graph_transformations/model_transformer_test.py | 0398bcdfa5e161b6894a4dd2111cac7008fd2e69 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | akarmi/model-optimization | 2a53655e92cabe5b180a0319bc64c339494b97bb | 2d3faaa361ecb3639f4a29da56e0e6ed52336318 | refs/heads/master | 2020-08-16T17:20:55.836218 | 2019-10-07T17:49:50 | 2019-10-07T17:50:12 | 215,530,733 | 0 | 0 | Apache-2.0 | 2019-10-16T11:23:40 | 2019-10-16T11:23:40 | null | UTF-8 | Python | false | false | 8,873 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Model Transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.platform import test
from tensorflow_model_optimization.python.core.quantization.keras.graph_transformations import model_transformer
from tensorflow_model_optimization.python.core.quantization.keras.graph_transformations import transforms
ModelTransformer = model_transformer.ModelTransformer
Transform = transforms.Transform
LayerPattern = transforms.LayerPattern
LayerNode = transforms.LayerNode
class ModelTransformerTest(test.TestCase):
@staticmethod
def _batch(dims, batch_size):
"""Adds provided batch_size to existing dims.
If dims is (None, 5, 2), returns (batch_size, 5, 2)
Args:
dims: Dimensions
batch_size: batch_size
Returns:
dims with batch_size added as first parameter of list.
"""
if dims[0] is None:
dims[0] = batch_size
return dims
def _create_model_inputs(self, model):
return np.random.randn(*self._batch(model.input.get_shape().as_list(), 1))
def _simple_dense_model(self):
inp = keras.layers.Input((3,))
x = keras.layers.Dense(2)(inp)
out = keras.layers.ReLU(6.0)(x)
return keras.Model(inp, out)
def _assert_config(self, expected_config, actual_config, exclude_keys=None):
"""Asserts that the two config dictionaries are equal.
This method is used to compare keras Model and Layer configs. It provides
the ability to exclude the keys we don't want compared.
Args:
expected_config: Config which we expect.
actual_config: Actual received config.
exclude_keys: List of keys to not check against.
"""
expected_config = expected_config.copy()
actual_config = actual_config.copy()
def _remove_keys(config):
"""Removes all exclude_keys (including nested) from the dict."""
for key in exclude_keys:
if key in config:
del config[key]
for _, v in config.items():
if isinstance(v, dict):
_remove_keys(v)
if isinstance(v, list):
for item in v:
if isinstance(item, dict):
_remove_keys(item)
if exclude_keys:
_remove_keys(expected_config)
_remove_keys(actual_config)
self.assertDictEqual(expected_config, actual_config)
def _assert_model_results_equal(self, model, transformed_model):
inputs = self._create_model_inputs(model)
self.assertAllClose(
model.predict(inputs), transformed_model.predict(inputs))
# Transform classes for testing.
class ReplaceDenseLayer(transforms.Transform):
"""Replaces `Dense` layers with `MyDense`, a simple inherited layer.
This `Transform` class replaces `Dense` layers with a class `MyDense`
which is simply an empty inheritance of `Dense`. This makes it easy to test
the transformation code.
"""
class MyDense(keras.layers.Dense):
pass
def pattern(self):
return LayerPattern('Dense')
def replacement(self, match_layer):
match_layer_config = match_layer.layer['config']
my_dense_layer = self.MyDense(**match_layer_config)
replace_layer = keras.layers.serialize(my_dense_layer)
replace_layer['name'] = replace_layer['config']['name']
return LayerNode(replace_layer, match_layer.weights, [])
def custom_objects(self):
return {'MyDense': self.MyDense}
def testReplaceSingleLayerWithSingleLayer_OneOccurrence(self):
model = self._simple_dense_model()
transformed_model = ModelTransformer(
model, [self.ReplaceDenseLayer()]).transform()
self._assert_config(model.get_config(), transformed_model.get_config(),
['class_name'])
self.assertEqual('MyDense', transformed_model.layers[1].__class__.__name__)
self._assert_model_results_equal(model, transformed_model)
def testReplaceSingleLayerWithSingleLayer_MultipleOccurrences(self):
inp = keras.layers.Input((3,))
x1 = keras.layers.Dense(2)(inp)
x2 = keras.layers.Dense(2)(inp)
out1 = keras.layers.ReLU(6.0)(x1)
out2 = keras.layers.ReLU(6.0)(x2)
model = keras.Model(inp, [out1, out2])
transformed_model = ModelTransformer(
model, [self.ReplaceDenseLayer()]).transform()
self._assert_config(model.get_config(), transformed_model.get_config(),
['class_name'])
self.assertEqual('MyDense', transformed_model.layers[1].__class__.__name__)
self.assertEqual('MyDense', transformed_model.layers[2].__class__.__name__)
self._assert_model_results_equal(model, transformed_model)
def testReplaceSingleLayerWithSingleLayer_MatchParameters(self):
class RemoveBiasInDense(transforms.Transform):
"""Replaces Dense layers with matching layers with `use_bias=False`."""
def pattern(self):
return LayerPattern('Dense', {'use_bias': True})
def replacement(self, match_layer):
match_layer_config = match_layer.layer['config']
# Remove bias
match_layer_weights = match_layer.weights
match_layer_weights.popitem()
match_layer_config['use_bias'] = False
new_dense_layer = keras.layers.Dense(**match_layer_config)
replace_layer = keras.layers.serialize(new_dense_layer)
replace_layer['name'] = replace_layer['config']['name']
return LayerNode(replace_layer, match_layer_weights, [])
model = self._simple_dense_model()
transformed_model = ModelTransformer(
model, [RemoveBiasInDense()]).transform()
self._assert_config(model.get_config(), transformed_model.get_config(),
['use_bias'])
self.assertFalse(transformed_model.layers[1].use_bias)
# Should match since bias is initialized with zeros.
self._assert_model_results_equal(model, transformed_model)
def testReplaceSingleLayer_WithMultipleLayers(self):
# TODO(pulkitb): Implement
pass
def testReplaceChainOfLayers_WithSingleLayer(self):
class FuseReLUIntoDense(transforms.Transform):
"""Fuse ReLU into Dense layers."""
def pattern(self):
return LayerPattern('ReLU', inputs=[LayerPattern('Dense')])
def replacement(self, match_layer):
dense_layer_config = match_layer.input_layers[0].layer['config']
dense_layer_weights = match_layer.input_layers[0].weights
dense_layer_config['activation'] = 'relu'
new_dense_layer = keras.layers.Dense(**dense_layer_config)
replace_layer = keras.layers.serialize(new_dense_layer)
replace_layer['name'] = replace_layer['config']['name']
return LayerNode(replace_layer, dense_layer_weights, [])
inp = keras.layers.Input((3,))
out = keras.layers.Dense(2, activation='relu')(inp)
model_fused = keras.Model(inp, out)
inp = keras.layers.Input((3,))
x = keras.layers.Dense(2)(inp)
out = keras.layers.ReLU()(x)
model = keras.Model(inp, out)
model.set_weights(model_fused.get_weights())
transformed_model = ModelTransformer(
model, [FuseReLUIntoDense()]).transform()
self._assert_config(
model_fused.get_config(), transformed_model.get_config(),
# Layers have different names in the models, but same config.
# Consider verifying the names loosely.
['input_layers', 'output_layers', 'name', 'inbound_nodes'])
self._assert_model_results_equal(model, transformed_model)
self._assert_model_results_equal(model_fused, transformed_model)
def testReplaceChainOfLayers_WithChainOfLayers(self):
# TODO(pulkitb): Implement
pass
def testReplaceTreeOfLayers_WithSingleLayer(self):
# TODO(pulkitb): Implement
pass
def testReplaceTreeOfLayers_WithTreeOfLayers(self):
# TODO(pulkitb): Implement
pass
# Negative Tests
# TODO(pulkitb): Add negative tests
# 1. Does not replace if any layer in the pattern has multiple nodes/consumers
# 2. Adding a single layer clone will lead to infinite loop. Fix and test.
# 3. Handles layer being part of multiple models.
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
44074f6a7dc371ac0f50ed51f5d05b5c89a93a7e | 981fbc25f4a8ef0695830d54c36e0e7c2087575c | /input_template.py | 3ebeae5ee3a6f7dbad4f1574bf6d0f216b007231 | [] | no_license | Sandy4321/CS_algorithm_scripts | 1b0984c25aab362c18767094f6c6252afd8b9f6b | 6eef6ac07ff07362ddaec850a47d7ad7053993b2 | refs/heads/master | 2021-01-15T10:07:18.940108 | 2015-06-08T23:27:25 | 2015-06-08T23:27:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | def solveMeSecond(a,b):
return a+b
n = int(raw_input()) #faster than n = input() , since input() executes the line as python command
for i in range(0,n):
a, b = raw_input().split()
a,b = int(a),int(b)
res = solveMeSecond(a,b)
print res
'''
Alternate code
n = int(raw_input())
for _ in range(n):
a,b = map(int,raw_input().split())
res = solveMeSecond(a,b)
print res
'''
| [
"[email protected]"
] | |
3f6ae3557fb840b712ba31d66869c0d17cc0a93b | d5214b1331c9dae59d95ba5b3aa3e9f449ad6695 | /qPloneSkinDump/tags/0.7.3/skin_template/Extensions/utils.py | 725f5e878f168f0a5e16a43a46eab2ca68f14068 | [] | no_license | kroman0/products | 1661ee25a224c4b5f172f98110944f56136c77cf | f359bb64db22f468db5d1e411638790e94d535a2 | refs/heads/master | 2021-01-10T07:58:04.579234 | 2014-06-11T12:05:56 | 2014-06-11T12:05:56 | 52,677,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,739 | py | import os, sys, re, string
from StringIO import StringIO
from time import gmtime, strftime
from zLOG import LOG, INFO
from zExceptions import BadRequest
from App.config import getConfiguration
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.DirectoryView import addDirectoryViews
from Products.%(SKIN_PRODUCT_NAME)s.config import *
######################################################################
## IMPORTING UTILS ##
######################################################################
osp = os.path
ALLOWED_IMPORT_POLICY = ["only_new", "backup", "overwrite"]
INTRO_TO_INSTANCE = "< Started copying object files from Product import directory to Instance one."
SUMMARY_TO_INSTANCE = "> Finished copying."
INTRO_TO_ROOT = "< Started import %%s file[s] with '%%s' policy."
SUMMARY_TO_ROOT = "> Finished importing."
INTRO_CLEAN = "< Started cleaning Instance import directory."
SUMMARY_CLEAN = "> Finished cleaning."
CREXP_INVALID_ID = re.compile('^The id \"(.*?)\" is invalid - it is already in use.$', re.DOTALL|re.IGNORECASE|re.MULTILINE)
CSS_BASE_IDS_QPSD053 = ['id','expression','enabled','cookable','media','rel','title','rendering'] # supporting qPSD-0.5.3 version
################ CHECK IMPORTING ################
def checkIfImport():
""" Return if perform importing, based on checking
*zexp files in <SkinProduct>/import directory.
"""
instance_ipath, product_ipath = getImportedPathes()
product_ilist = [i for i in os.listdir(product_ipath) \
if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]
if product_ilist:
return 1
return 0
################ IMPORTING TO PLONE'S IMPORT DIR ################
def getImportedPathes():
""" Return Plone instance and Skin product import pathes."""
# Based on instance path, construct import pathes
cfg = getConfiguration()
instance_ipath = osp.join(cfg.instancehome, "import")
product_ipath = osp.join(cfg.instancehome, 'Products', PRODUCT_NAME, "import")
# Check presence of Product import directory
if not osp.isdir(product_ipath):
raise BadRequest, "Skin Product's import directory '%%s' - does not exist or is'nt direcory" %% product_ipath
# Check presence of Instance import directory
if not osp.isdir(instance_ipath):
raise BadRequest, "Instance import directory '%%s' - does not exist or isn't direcory" %% instance_ipath
return [instance_ipath, product_ipath]
def copyFile(src_dir, dst_dir, f_name):
""" Copy file from src_dir to dst_dir under original name."""
try:
src_file = open(osp.join(src_dir, f_name),"rb")
dst_file = open(osp.join(dst_dir, f_name),"wb")
dst_file.write(src_file.read())
dst_file.close()
src_file.close()
except Exception, e:
msg = "!!! In copying files from <%%s> dir to <%%s> dir exception occur. Details: %%s." %% (src_dir,dst_dir, str(e))
print >> import_out, msg
LOG('performImportToPortal',INFO,'copyFile', msg)
def moveToTemp(same_instance_files, instance_ipath, temp_dir_path):
""" Move samenamed files from Instanse's dir to temp dir."""
os.mkdir(temp_dir_path) # Create temp back_[date] dir
try:
[copyFile(instance_ipath, temp_dir_path, f_name) for f_name in same_instance_files]
[os.remove(osp.join(instance_ipath, f_name)) for f_name in same_instance_files]
except Exception, e:
msg = "!!! Exception occur during moving files from Instance's dir to temp dir. Detaile:%%s." %% str(e)
print >> import_out, msg
LOG('performImportToPortal',INFO,'moveToTemp', msg)
def copyToInstanceImport():
""" Perform copying imported files from <SkinProduct>/import dir
to Plone's instance import dir.
"""
print >> import_out, INTRO_TO_INSTANCE
instance_ipath, product_ipath = getImportedPathes()
# Compose temp dir back_[date] dir path in Instance import directory
temp_dir_id = "back_%%s" %% strftime("%%Y%%m%%d%%H%%M%%S", gmtime())
temp_dir_path = osp.join(instance_ipath, temp_dir_id)
# Get *.zexp files from Skin Product's import dir and Plone's instance import dir files
product_ilist = [i for i in os.listdir(product_ipath) \
if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]
instance_ilist = [i for i in os.listdir(instance_ipath) \
if osp.isfile(osp.join(instance_ipath,i)) and i.endswith('.zexp')]
# Check for presence samenamed files in Instance and Product import directories.
same_instance_files = [f_name for f_name in instance_ilist if f_name in product_ilist]
if same_instance_files:
moveToTemp(same_instance_files, instance_ipath, temp_dir_path)
# Copy all *zexp files from Product's import dir to Instance's import dir
[copyFile(product_ipath, instance_ipath, f_name) for f_name in product_ilist]
print >> import_out, SUMMARY_TO_INSTANCE
return [instance_ipath, product_ipath, temp_dir_path, product_ilist]
################ IMPORTING TO PORTAL ################
def importObject(portal, file_name):
""" Work around old Zope bug in importing."""
try:
portal.manage_importObject(file_name)
except:
portal._p_jar = portal.Destination()._p_jar
portal.manage_importObject(file_name)
def makeBackUp(portal, portal_objects, temp_dir_path, obj_id):
""" Perfom backup same named portal objects in temp folder."""
# Get id of temp folder-object
durty_path,temp_id = osp.split(temp_dir_path)
if not temp_id:
durty_path,temp_id = osp.split(durty_path)
# Get temp folder-object
if temp_id not in portal_objects:
portal.invokeFactory('Folder', id=temp_id)
print >> import_out, "! Created '%%s' backup directory with same-ids " \
"objects from portal root." %% temp_id
temp_dir = getattr(portal, temp_id)
# Move object with same id to temp folder-object
get_transaction().commit(1)
obj = portal.manage_cutObjects(ids=[obj_id])
temp_dir.manage_pasteObjects(obj)
print >> import_out, "! '%%s' Object moved from portal root to '%%s' backup directory." %% (obj_id, temp_id)
def performImport(portal, temp_dir_path, file_name):
""" Importing an object to portal."""
portal_objects = portal.objectIds()
try:
portal.manage_importObject(file_name)
except Exception, e:
msg = str(e)
is_invalid_id = CREXP_INVALID_ID.match(msg)
if is_invalid_id:
obj_id = is_invalid_id.group(1)
if IMPORT_POLICY == "only_new":
msg = "! Object with '%%s' id was not importing because it's already exist " \
"in portal root." %% obj_id
print >> import_out, msg
elif IMPORT_POLICY == "backup":
makeBackUp(portal, portal_objects, temp_dir_path, obj_id)
importObject(portal, file_name)
elif IMPORT_POLICY == "overwrite":
portal.manage_delObjects(ids=[obj_id])
importObject(portal, file_name)
else:
# work around old Zope bug in importing
portal._p_jar = portal.Destination()._p_jar
portal.manage_importObject(file_name)
def importToPortalRoot(portal, product_file_names, temp_dir_path):
""" Import all objects from *zexp files to portal root (based on IMPORT_POLICY)."""
if not IMPORT_POLICY in ALLOWED_IMPORT_POLICY:
raise Exception("%%s - wrong import policy in '%%s/config.py' file. Must be one of the %%s" \
%% (IMPORT_POLICY, PRODUCT_NAME, ALLOWED_IMPORT_POLICY) )
print >> import_out, INTRO_TO_ROOT %% (product_file_names, IMPORT_POLICY)
for file_name in product_file_names:
try:
performImport(portal, temp_dir_path, file_name)
except Exception, error:
msg = '!!! Under "%%s" policy importing exception occur: %%s.' %% (IMPORT_POLICY, str(error))
print >> import_out, msg
LOG('performImportToPortal',INFO,'importToPortalRoot', msg)
print >> import_out, SUMMARY_TO_ROOT
################ CLEANING PLONE'S IMPORT DIR ################
def cleanInstanceImport(instance_ipath, product_file_names, temp_dir_path):
""" Cleaning Plone's import dir."""
print >> import_out, INTRO_CLEAN
# Erase all copied *zexp files from Instance's import dir
for f_name in product_file_names:
f_path = osp.join(instance_ipath, f_name)
if osp.exists(f_path) and osp.isfile(f_path):
os.remove(f_path)
else:
msg = '! "%%s" file was not deleted from "%%s" import directory.' %%\
(f_name, osp.join(instance_ipath))
print >> import_out, msg
LOG('performImportToPortal',INFO,'cleanInstanceImport', msg)
# Move all files from temp back_[date] dir to Instance's import dir
if osp.exists(temp_dir_path) and osp.isdir(temp_dir_path):
f_names = os.listdir(temp_dir_path)
try:
[copyFile(temp_dir_path, instance_ipath, f_name) for f_name in f_names]
[os.remove(osp.join(temp_dir_path, f_name)) for f_name in f_names]
# Erase temp back_[date] dir
os.rmdir(temp_dir_path)
except Exception, e:
msg = "!!! In moving files from temp dir to Instance's import dir exception occur."
print >> import_out, msg
LOG('performImportToPortal',INFO,'moveFromTempToImport', msg)
print >> import_out, SUMMARY_CLEAN
################ MAIN ################
def performImportToPortal(portal):
""" Import objects from Skin Product to Portal root."""
globals()['import_out'] = StringIO()
instance_ipath, product_ipath, temp_dir_path, product_file_names = copyToInstanceImport()
if product_file_names:
importToPortalRoot(portal, product_file_names, temp_dir_path)
cleanInstanceImport(instance_ipath, product_file_names, temp_dir_path)
else:
print >> import_out, "!!! Failure importing: there is no file for importing to be found."
result = import_out
del globals()['import_out']
return result.getvalue()
######################################################################
## INSTALLATION/UNINSTALLATION UTILS ##
######################################################################
CSS_REG_PROPS = ['id', 'expression', 'enabled', 'cookable', 'cacheable' \
,'media', 'rel', 'title', 'rendering', 'compression']
JS_REG_PROPS = ['id', 'expression', 'enabled', 'cookable', 'cacheable' \
,'inline', 'compression']
def installSkin(portal, pp_up, out):
# Checking for presense SKIN_NAME in portal_skins directory view or among Skin Names
skinsTool = getToolByName(portal, 'portal_skins')
# Get unique product_skin_name and remember it in case of differ from SKIN_NAME.
product_skin_name = SKIN_NAME
skin_names = skinsTool.getSkinSelections()
if product_skin_name in skin_names:
idx = 0
while product_skin_name in skin_names:
product_skin_name = SKIN_NAME + str(idx)
idx += 1
addProperty(pp_up, 'q_actual_skin_name', product_skin_name, 'string', out)
# Add directory views
layer_skin_name = string.lower(SKIN_NAME)
addDirectoryViews(skinsTool, 'skins', GLOBALS)
print >> out, "- added '%%s' directory views to portal_skins." %% layer_skin_name
# Get Default skin and remember it for backup on uninstallig
default_skin = skinsTool.getDefaultSkin()
addProperty(pp_up, 'q_default_skin', default_skin, 'string', out)
# Building list of layers for NEW SKIN
base_path = skinsTool.getSkinPath(BASE_SKIN_NAME)
new_path = map( string.strip, string.split(base_path,',') )
if layer_skin_name in new_path :
print >> out, "- %%s layer already present in '%%s' skin." %% (layer_skin_name, BASE_SKIN_NAME)
# Remove layer_skin_name from current position.
del new_path[new_path.index(layer_skin_name)]
# Add layer_skin_name just after 'custom' position
try:
new_path.insert(new_path.index('custom')+1, layer_skin_name)
except ValueError:
new_path.append(layer_skin_name)
new_path = string.join(new_path, ', ')
# Add NEW Skin and set it as dafault
skinsTool.addSkinSelection(product_skin_name, new_path, make_default=1)
print >> out, "Added %%s skin, bassed on %%s and set as default." %% (product_skin_name, BASE_SKIN_NAME)
def uninstallSkin(skinsTool, actual_skin_name, initial_skin):
# Get 'portal_skins' object and list available skin names
# And remove SKIN_NAME from available skins, if it present
skin_names = skinsTool.getSkinSelections()
if actual_skin_name in skin_names :
skinsTool.manage_skinLayers(chosen=(actual_skin_name,), del_skin=1, REQUEST=None)
skin_names.remove(actual_skin_name)
# Remove product skin directory from skins tool
# AND Remove skin-product layer from available skins
skin_layer = SKIN_NAME.lower()
if skin_layer in skinsTool.objectIds():
skinsTool.manage_delObjects(skin_layer)
for skin_name in skin_names:
path = skinsTool.getSkinPath(skin_name)
path = [i.strip() for i in path.split(',')]
if skin_layer in path:
path.remove(skin_layer)
path = ','.join(path)
skinsTool.addSkinSelection(skin_name, path)
# If current default skin == actual_skin_name
# Set default skin in initial one (if initial skin still exist)
# or in 1st from available skin names list.
current_default_skin = skinsTool.getDefaultSkin()
if current_default_skin == actual_skin_name:
if initial_skin in skin_names :
skinsTool.manage_properties(default_skin=initial_skin, REQUEST=None)
elif len(skin_names)>0 :
skinsTool.manage_properties(default_skin=skin_names[0], REQUEST=None)
def addProperty(p_sheet, p_id, p_value, p_type, out):
if p_sheet.hasProperty(p_id):
p_sheet._delProperty(p_id)
p_sheet._setProperty(p_id, p_value, p_type)
print >> out, "... added %%s PropertySheet to %%s." %% (p_id, p_sheet.getId())
def getResourceProperties(obj, prop_list, dflt=''):
""" Return list of 2 items list-[property name, property value]."""
properties=[]
for prop in prop_list:
accessor = getattr(obj, 'get%%s' %% prop.capitalize(), None)
if accessor:
properties.append([prop, accessor() or dflt])
return properties
def registerResource(pp_up, portal_res, resRegisterFunction, out \
,RESOURCE_SKIN_LIST, SKIN_RES_REGDATA, UP_PROPERTY, RES_REG_PROPS):
""" Register resources in portal's registry, remember existant settings."""
# Get original registered resources
portal_res_srings = []
for r in portal_res.getResources():
portal_res_srings.append(";".join(['%%s::%%s'%%(r[0],str(r[1])) \
for r in getResourceProperties(r, RES_REG_PROPS)]))
addProperty(pp_up, UP_PROPERTY, portal_res_srings, 'lines', out)
# Tune Resource registry according to new skin needs
unexistent = [] # list of default resources,
# which present in Skin-product, BUT absent in portal
portal_res_ids = portal_res.getResourceIds()
for res_dict in SKIN_RES_REGDATA:
if res_dict['id'] not in portal_res_ids:
# It's interesting - Resource Registry allow adding unexistent resource - use this
resRegisterFunction(**res_dict)
if res_dict['id'] not in RESOURCE_SKIN_LIST:
unexistent.append(res_dict['id'])
else:
pos = portal_res.getResourcePosition(res_dict['id'])
portal_res.unregisterResource(res_dict['id'])
resRegisterFunction(**res_dict)
portal_res.moveResource(res_dict['id'], pos)
if unexistent:
print >> out, "!!! - BAD: your Resource Regestry have'nt %%s resource(s), which may lead to some problems." %% unexistent
def getVersion(res_list):
"""Check version of skin product generator."""
return (res_list and not '::' in res_list[0] and '0.5') or '0.7'
def uninstallResource(portal_res, original_res_list, RESOURCE_SKIN_LIST, resRegisterFunction):
# Prepare Resource Registry data for backup to original state
original_res_regestry = {}
genVersion = getVersion(original_res_list)
for rec in original_res_list:
resource = {}
if genVersion == '0.7':
[resource.update({prop.split('::')[0]:prop.split('::')[1]}) for prop in rec.split(";")]
elif genVersion == '0.5':
props = rec.split(";")
[resource.update({CSS_BASE_IDS_QPSD053[i]:props[i]}) for i in range(len(CSS_BASE_IDS_QPSD053))]
original_res_regestry[resource.pop('id')] = resource
# Work up actual Resource Registry
res_dict = portal_res.getResourcesDict()
for res_id in res_dict.keys():
# Remove from Resource Registry Skin product's resources
if res_id in RESOURCE_SKIN_LIST \
and res_id not in original_res_regestry.keys():
portal_res.unregisterResource(res_id)
continue
# Backup 'enabled' property Registry's resourses to it's original state
if original_res_regestry.has_key(res_id):
act_Enabled_state = res_dict[res_id].getEnabled()
orig_Enabled_state = original_res_regestry[res_id]['enabled']
if act_Enabled_state != orig_Enabled_state:
pos = portal_res.getResourcePosition(res_id)
resource = res_dict[res_id]
res = original_res_regestry[res_id]
portal_res.unregisterResource(res_id)
resRegisterFunction(res_id, **res)
portal_res.moveResource(res_id, pos)
def customizeSlots(portal, pp_up, out):
# Get original Site's column lists
orig_left_slots = left_column = list(portal.left_slots)
orig_right_slots = right_column = list(portal.right_slots)
# Save original Site's LEFT and RIGHT slots
addProperty(pp_up, 'q_left_slots', orig_left_slots, 'lines', out)
addProperty(pp_up, 'q_right_slots', orig_right_slots, 'lines', out)
# blend-with-site - to portal's slots adding only new one from skin-porduct
# blend-with-skin - portal slots forming in the following manner:
# first adding skin-porduct's slots, than new one from portal
# replace - to portal's slots forming only from the skin-porduct's slot list
if SLOT_FORMING == "blend_with_skin":
left_column, right_column = formSlotsColumn(LEFT_SLOTS, RIGHT_SLOTS,
orig_left_slots, orig_right_slots, MAIN_COLUMN)
elif SLOT_FORMING == "blend_with_site":
left_column, right_column = formSlotsColumn(orig_left_slots, orig_right_slots,
LEFT_SLOTS, RIGHT_SLOTS, MAIN_COLUMN )
elif SLOT_FORMING == "replace":
left_column, right_column = formSlotsColumn(LEFT_SLOTS, RIGHT_SLOTS, [], [], MAIN_COLUMN)
# REPLACE SITE's column slots
portal.left_slots = tuple(left_column)
portal.right_slots = tuple(right_column)
print >> out, "Complited portal slots customization ..."
# main_column ("left" / "right" / "both") mean which of the MAIN column is favour
def formSlotsColumn(main_left, main_right, slave_left=[], slave_right=[], main_column="both"):
result_left = main_left
result_right = main_right
if main_column == "left":
# 1) APPEND to MAIN_LEFT list *new for main_left column* slots from slave_left list
# 2) APPEND to MAIN_RIGHT list *new for both main columns* slots from slave_right
# 3) REMOVE slots from MAIN_RIGHT list, which are *doubled* in MAIN_LEFT
[result_left.append(slot) for slot in slave_left if slot not in result_left]
[result_right.append(slot) for slot in slave_right \
if slot not in result_right and slot not in result_left]
[result_right.remove(slot) for slot in result_left if slot in result_right]
elif main_column == "right":
# 1) APPEND to MAIN_LEFT list *new for main_right column* slots from slave_left list
# 2) APPEND to MAIN_RIGHT list *new for both main columns* slots from slave_right
# 3) REMOVE slots from MAIN_LEFT list, which are *doubled* in MAIN_RIGHT
[result_right.append(slot) for slot in slave_right if slot not in result_right]
[result_left.append(slot) for slot in slave_left \
if slot not in result_left and slot not in result_right]
[result_left.remove(slot) for slot in result_right if slot in result_left]
elif main_column == "both":
# 1) APPEND to MAIN_LEFT list *new for both main columns* slots from slave_left list
# 2) APPEND to MAIN_RIGHT list *new for both main columns* slots from slave_right
[result_left.append(slot) for slot in slave_left \
if slot not in result_left and slot not in result_right]
[result_right.append(slot) for slot in slave_right \
if slot not in result_right and slot not in result_left]
return [result_left, result_right]
def getProperty(pp, ps, id, default=[]):
""" Get property from portal_properties/[property_sheet]"""
res = default
if ps in pp.objectIds() and pp[ps].hasProperty(id):
res = pp[ps].getProperty(id, default)
return res
| [
"mylan@4df3d6c7-0a05-0410-9bee-ae8b7a76f946"
] | mylan@4df3d6c7-0a05-0410-9bee-ae8b7a76f946 |
e310d84ef134fa90d02ddbcb43eb4159e92125c2 | 7d4597b6f9b631dd1f91059a4d904d2847e29a9c | /offerSpider/spiders/saveon.py | b9e4eb0faa58041584990acba2c7d8d25a7d856e | [] | no_license | lychlov/offerSpider | 6efc1b47e235902252ad0534f916d7f0baa49d00 | 8559ae3c65538d365aa11598d1070a4eadc82a1f | refs/heads/master | 2020-03-23T14:42:41.796002 | 2019-01-24T03:20:51 | 2019-01-24T03:20:51 | 141,694,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,760 | py | # # -*- coding: utf-8 -*-
# import re
#
# import requests
# import scrapy
# from bs4 import BeautifulSoup
#
# from offerSpider.util import get_header
# from offerSpider.items import CouponItem
#
#
# class SaveonSpider(scrapy.Spider):
# name = 'saveon'
# allowed_domains = ['saveoncannabis.com']
# start_urls = ['https://www.saveoncannabis.com/stores']
# page_url = 'https://www.saveoncannabis.com/stores/%s/'
#
# def parse(self, response):
# html = response.body
# soup = BeautifulSoup(html, 'lxml')
# if not re.findall(r'/stores/(.+?)/', response.url):
# max_page = int(soup.find('ul', class_='page-numbers').find('a').text)
# for i in range(2, max_page + 1):
# yield scrapy.Request(url=self.page_url % i, callback=self.parse)
# stores = soup.find_all('div', class_='store-logo')
# for store in stores:
# link = store.find('a').get('href')
# yield scrapy.Request(url=link, callback=self.store_parse)
# pass
#
# def store_parse(self, response):
# html = response.body
# soup = BeautifulSoup(html, 'lxml')
# main_coupon_info = soup.find('div', class_='store-offer-featured')
# if main_coupon_info:
# main_coupon = CouponItem()
# main_coupon['type'] = 'coupon'
# main_coupon['name'] = main_coupon_info.find('h2').text.strip()
# main_coupon['site'] = 'saveoncannabis.com'
# main_coupon['description'] = ''
# main_coupon['verify'] = True
# main_coupon['link'] = ''
# main_coupon['expire_at'] = main_coupon_info.find('div',class_='deal-countdown-info').text.strip().replace('Expires in: ','')
#
# main_coupon['coupon_type'] = 'CODE'
#
# main_coupon['code'] = ''
# main_coupon['final_website'] = ''
# main_coupon['store'] = ''
# main_coupon['store_url_name'] = ''
# main_coupon['store_description'] = ''
# main_coupon['store_category'] = ''
# main_coupon['store_website'] = ''
# main_coupon['store_country'] = ''
# main_coupon['store_picture'] = ''
# main_coupon['created_at'] = ''
# main_coupon['status'] = ''
# main_coupon['depth'] = ''
# main_coupon['download_timeout'] = ''
# main_coupon['download_slot'] = ''
# main_coupon['download_latency'] = ''
# yield main_coupon
#
# coupon_infos = soup.find('div', class_='coupons-other').find_all('div', class_='white-block')
# if coupon_infos:
# for coupon_info in coupon_infos:
# coupon = CouponItem()
# coupon['type'] = 'coupon'
# coupon['name'] = ''
# coupon['site'] = ''
# coupon['description'] = ''
# coupon['verify'] = ''
# coupon['link'] = ''
# coupon['expire_at'] = ''
# coupon['coupon_type'] = ''
# coupon['code'] = ''
# coupon['final_website'] = ''
# coupon['store'] = ''
# coupon['store_url_name'] = ''
# coupon['store_description'] = ''
# coupon['store_category'] = ''
# coupon['store_website'] = ''
# coupon['store_country'] = ''
# coupon['store_picture'] = ''
# coupon['created_at'] = ''
# coupon['status'] = ''
# coupon['depth'] = ''
# coupon['download_timeout'] = ''
# coupon['download_slot'] = ''
# coupon['download_latency'] = ''
# yield coupon
# pass
#
#
# def get_domain_url(long_url):
# domain = re.findall(r'^(http[s]?://.+?)[/?]', long_url + '/')
# return domain[0] if domain else None
#
#
# def get_real_url(url, try_count=1):
# if try_count > 3:
# return url
# try:
# rs = requests.get(url, headers=get_header(), timeout=10, verify=False)
# if rs.status_code > 400 and get_domain_url(rs.url) == 'www.offers.com':
# return get_real_url(url, try_count + 1)
# if get_domain_url(rs.url) == get_domain_url(url):
# target_url = re.findall(r'replace\(\'(.+?)\'', rs.content.decode())
# if target_url:
# return target_url[0].replace('\\', '') if re.match(r'http', target_url[0]) else rs.url
# else:
# return rs.url
# else:
# return get_real_url(rs.url)
# except Exception as e:
# print(e)
# return get_real_url(url, try_count + 1)
| [
"[email protected]"
] | |
3655a1d7009c58072673e92b9dcc169dbed6d245 | bcbcd360967d9f79ef542ead5b30de42ec61b2d3 | /code_v1_recovered/Unigrams/top100LinksPerCom.py | 4a2b7812a4374ffdf8f5fa87ecf736bcdf22e711 | [] | no_license | Roja-B/EvolvingComs | d00b30576e6b8977ce1be0c6317155bfeb711806 | b58fa29972d9aad095ed0f364b1e0ec876b9b6c5 | refs/heads/master | 2020-04-14T18:30:48.657243 | 2013-02-11T05:54:16 | 2013-02-11T05:54:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | import operator
import sys
from noLow import *
# this program produces the list of top 100 links per community based on the Chi-squared table for each time window
#PATH = raw_input('Enter data path: ')
#M = int(raw_input('Enter the number of communities: '))
#tablefilename = raw_input("Enter file name: ")
pathfile = open("PATHSplusCOMS","r")
tablefilename = "Chi2.txt"
for line in pathfile:
line = line.strip()
L = line.split("\t")
PATH = L[0]+"/RelevantLinks"
M = int(L[1])
f = open(PATH+'/'+tablefilename,"r")
Communities= []
#for each community we need a hash table
for i in range(M):
Communities.append(dict())
for line in f:
link = line.split('\t')[0]
for i in range(0,M):
count = float(line.split('\t')[i+1])
Communities[i][link] = count
for i in range(0,M):
sorted_com = sorted(Communities[i].iteritems(), key=operator.itemgetter(1),reverse=True)
t = open(PATH+"/NoLowtop50Links"+str(i),"w")
length = len(sorted_com)
count = 0
for j in range(length)):
if linkvotes[sorted_com[j][0]] < 10 : continue
t.write("link "+sorted_com[j][0]+' '+str(sorted_com[j][1])+'\n')
count +=1
if count == 50: break
t.close()
f.close()
pathfile.close()
| [
"[email protected]"
] | |
61eface07e2a27ae86d3c33097cb278cffe65e4f | a6d45b7b0caccc92dd7b0d2cc352498a32f5a181 | /uploader/migrations/0001_initial.py | 52eaec7d149d4ac8deb876b1956156002064a661 | [] | no_license | suhailvs/djangofileupload | e149e27b085f18f69c61074039e08a9c74283ca2 | 40b73cdf5c50bd44a4956ec70cf52d4c358f58c2 | refs/heads/master | 2023-03-23T17:34:53.077721 | 2020-04-20T16:09:29 | 2020-04-20T16:09:29 | 20,531,971 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # Generated by Django 3.0.5 on 2020-04-20 15:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Upload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('upload_file', models.FileField(upload_to='')),
('upload_date', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"[email protected]"
] | |
7e2974f9de7a5d5e34105cf131643c825f8338db | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02991/s030157837.py | 6e3b67de9db4e8ee071c1c288612c95cbf324ab6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | import sys
input = sys.stdin.buffer.readline
from collections import deque
def main():
N,M = map(int,input().split())
edge =[[] for _ in range(N)]
for _ in range(M):
u,v = map(int,input().split())
edge[u-1].append(v-1)
S,T = map(int,input().split())
q = deque()
go = [[False for _ in range(3)] for _ in range(N)]
q.append((S-1,0,1))
while q:
now,step,d = q.popleft()
if step == 3:
if now == T-1:
print(d)
exit()
step = 0
d += 1
if go[now][step]:
continue
go[now][step] = True
for fol in edge[now]:
q.append((fol,step+1,d))
print(-1)
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
996339b2d5f97720cb4f6779affdae2d70fef420 | d8cbc94a4207337d709a64447acb9c8fe501c75a | /subset_selection/code/cli.py | 54738e4db5034a5f1e4316b6792e9c41b4e53b4e | [
"MIT"
] | permissive | sripathisridhar/acav100m | 6f672384fa723a637d94accbbe11a9a962f5f87f | 13b438b6ce46d09ba6f79aebb84ad31dfa3a8e6f | refs/heads/master | 2023-09-06T01:05:21.188822 | 2021-11-18T08:08:08 | 2021-11-18T08:08:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,111 | py | import time
import datetime
from pathlib import Path
import fire
from args import get_args
from run import run_single
from run_contrastive import run_single_contrastive
from chunk import run_chunks, reduce_all_pkls
from chunk_contrastive import run_chunks_contrastive
from save import merge_all_csvs
from merge_contrastive import merge_contrastive
from tests import compare_measures
class Cli:
def prepare(self, **kwargs):
args = get_args(**kwargs)
if 'out_path' in kwargs:
args.data.output.path = Path(kwargs['out_path'])
opath = args.data.output.path
if opath.stem == opath.name:
# potential dir
opath = opath / 'output.csv'
opath.parent.mkdir(parents=True, exist_ok=True)
args.data.output.path = opath
if 'shards_path' in kwargs:
args.data.path = Path(kwargs['shards_path'])
if 'meta_path' in kwargs:
args.data.meta.path = Path(kwargs['meta_path'])
mpath = args.data.meta.path
if mpath is None:
# use shard directory
mpath = args.data.path.parent
if not mpath.is_dir() and mpath.parent.is_dir():
mpath = mpath.parent
args.data.meta.path = mpath
return args
def run(self, **kwargs):
start = time.time()
args = self.prepare(**kwargs)
run(args)
elasped = time.time() - start
elasped = str(datetime.timedelta(seconds=elasped))
print('done. total time elasped: {}'.format(elasped))
def reduce_csvs(self, **kwargs):
start = time.time()
args = self.prepare(**kwargs)
merge_all_csvs(args)
elasped = time.time() - start
elasped = str(datetime.timedelta(seconds=elasped))
print('done. total time elasped: {}'.format(elasped))
def reduce_pkls(self, **kwargs):
start = time.time()
args = self.prepare(**kwargs)
reduce_all_pkls(args)
elasped = time.time() - start
elasped = str(datetime.timedelta(seconds=elasped))
print('done. total time elasped: {}'.format(elasped))
def reduce(self, **kwargs):
start = time.time()
args = self.prepare(**kwargs)
if args.save_cache_as_csvs:
merge_all_csvs(args)
else:
reduce_all_pkls(args)
elasped = time.time() - start
elasped = str(datetime.timedelta(seconds=elasped))
print('done. total time elasped: {}'.format(elasped))
def compare_measures(self, **kwargs):
args = self.prepare(**kwargs)
compare_measures(args)
print('done')
def merge_contrastive(self, **kwargs):
args = self.prepare(**kwargs)
merge_contrastive(args)
def run(args):
if args.measure_name == 'contrastive':
if args.chunk_size is None:
run_single_contrastive(args)
else:
run_chunks_contrastive(args)
else:
if args.chunk_size is None:
run_single(args)
else:
run_chunks(args)
if __name__ == '__main__':
fire.Fire(Cli)
| [
"[email protected]"
] | |
9775bc6bd071f66fbb05d218a99381b23510f116 | be73248aa4f1171e81b65cf955c4bd6110d56095 | /request_test.py | 353ec800d3b9bd9c0e3797743ad8a33355ced72f | [] | no_license | rogerhoward/lambot | 781c158e58bd71e2f3eb480aab31f181aee55e62 | d5588041fc92b779ba88479d8657f9b8a4916692 | refs/heads/development | 2022-02-18T05:03:23.911978 | 2017-06-22T03:22:11 | 2017-06-22T03:22:11 | 86,493,856 | 1 | 1 | null | 2022-02-04T15:04:55 | 2017-03-28T18:30:43 | Python | UTF-8 | Python | false | false | 2,137 | py | #!/usr/bin/env python
import os
import requests
from pprint import pprint
import click
@click.command()
@click.option('--token', default='gIkuvaNzQIHg97ATvDxqgjtO', help='Slack API token.')
@click.option('--team_id', default='T0001', help='The unique Slack team ID')
@click.option('--team_domain', default='example', help='The unique Slack domain')
@click.option('--channel_id', default='C2147483705', help='The unique ID of the channel where this command originated')
@click.option('--channel_name', default='bot', help='The name of the channel where this command originated')
@click.option('--user_id', default='U2147483697', help='The unique ID of the user who sent this command')
@click.option('--user_name', default='rogerhoward', help='The username of the user who sent this command.')
@click.option('--command', default='/lambot', help='The slash command name')
@click.option('--text', default='calendar', help='All text that followed the slash command - generally options and modifiers')
@click.option('--response_url', default='http://0.0.0.0:5000/test/response', help='The URL where to POST the response(s) - up to five responses may be POSTed to this Webhook')
@click.option('--url', default='http://0.0.0.0:5000/', help='The URL where to POST the initial Slack command payload')
def run(token, team_id, team_domain, channel_id, channel_name, user_id, user_name, command, text, response_url, url ):
"""
Simulates the Slack client by posting a standard Slack payload to the bot endpoint. The URL of the endpoint as well as all values in the payload can be overriden using command line options. The payload format is documented at https://api.slack.com/slash-commands#triggering_a_command
"""
data = {'token': token,
'team_id': team_id,
'team_domain': team_domain,
'channel_id': channel_id,
'channel_name': channel_name,
'user_id': user_id,
'user_name': user_name,
'command': command,
'text': text,
'response_url': response_url}
requests.post(url, data=data)
if __name__ == '__main__':
run()
| [
"[email protected]"
] | |
7fd4b8acc7c9c38677a8256d3556db119b6fe7c8 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/create_tests/create_tst_class.expected_pytest_2k.py | 327ec499b4f492f406f449db0fb397764f8ae8eb | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 111 | py | class Spam(object):
def eggs(self):
assert False
def eggs_and_ham(self):
assert False
| [
"[email protected]"
] | |
c2cd0da87716a6c9fe21cade4cc83fb2007f479d | ebc7607785e8bcd6825df9e8daccd38adc26ba7b | /python/baekjoon/2.algorithm/brute_force/백준_감소하는_수.py | b4b9f0e4b6dd253325d331cce5183803d908e65f | [] | no_license | galid1/Algorithm | 18d1b72b0d5225f99b193e8892d8b513a853d53a | 5bd69e73332f4dd61656ccdecd59c40a2fedb4b2 | refs/heads/master | 2022-02-12T07:38:14.032073 | 2022-02-05T08:34:46 | 2022-02-05T08:34:46 | 179,923,655 | 3 | 0 | null | 2019-06-14T07:18:14 | 2019-04-07T05:49:06 | Python | UTF-8 | Python | false | false | 571 | py | import sys
def dfs(cur_num, limit):
global answer, idx, n, answers
# 재귀 종료
if len(cur_num) == limit:
idx += 1
answers.append(cur_num)
# 정답이 존재
if idx == n:
print(cur_num)
sys.exit()
return
if not cur_num:
for i in range(10):
dfs(str(i), limit)
else:
for j in range(int(cur_num[-1])):
dfs(cur_num + str(j), limit)
answer, idx = 0, -1
answers = []
n = int(sys.stdin.readline())
for i in range(1, 11):
dfs('', i)
print(-1) | [
"[email protected]"
] | |
2f06ed76fa47c4244dbaeecb75147c3f68f79bde | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/29/usersdata/67/9081/submittedfiles/atividade.py | a1a123328be3fb4dd0cf7fd77b88a631cf61ee74 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
n=int(input("Digite o valor de n:"))
contador=0
i=1
while (i<=n):
if n//10=!0:
contador=contador+1
i=i+1
print(contador) | [
"[email protected]"
] | |
0f2f7ee10782ae1ea20dac49abf367a2909b2920 | 7578f8752ea9693c9b2bcca1b4f4bddb74ea4c4b | /projector/projections.py | bb0223ddd257c754cf518486cd794b58e3a14024 | [
"MIT"
] | permissive | SixiemeEtage/projector | 5ade66f8932c5905619518b6df4cf6fc460bd040 | 6d6b2488322556b1cd71eafc7d784787aca331bd | refs/heads/master | 2021-01-19T08:48:41.375749 | 2019-03-17T13:52:06 | 2019-03-17T14:06:54 | 81,648,850 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | import libprojector
PROJECTION_EQUIRECTANGULAR = 'equirectangular'
PROJECTION_CUBEMAP = 'cubemap'
class BaseProj(object):
def __init__(self, image_width, options):
self.image_width = image_width
self.options = options
def get_projection(self):
raise NotImplementedError
class EquirectangularProj(BaseProj):
def get_projection(self):
width = int(self.image_width)
height = int(self.image_width / 2)
return libprojector.SphericalProjection(width, height)
class CubemapProj(BaseProj):
def get_projection(self):
side_width = int(self.image_width / 6)
border_padding = self.options.get('border_padding', 0)
return libprojector.CubemapProjection(side_width, border_padding)
PROJECTION_CLASSES = dict((
(PROJECTION_EQUIRECTANGULAR, EquirectangularProj),
(PROJECTION_CUBEMAP, CubemapProj),
))
| [
"[email protected]"
] | |
2993ce92666d43ec9e6a520bf4027609ca676413 | 221e3afe0ef457c088d9c7725b5a1cc70d77b16e | /base/migrations/0002_remove_category_content.py | 418f3ac3266d248bb9952513d02122a2b10c217b | [] | no_license | Rockstreet/titov_base | 6615087518b33635da6fec4d73716670c0b25d5a | 612d842c423ffc3754e90a463029e9415aacb318 | refs/heads/master | 2021-01-19T05:22:06.940949 | 2017-04-12T16:09:06 | 2017-04-12T16:09:06 | 87,428,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-07 09:52
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='content',
),
]
| [
"[email protected]"
] | |
2d32855077a8fd0594875c11d0c248fa27e1c3d9 | df24807455a5bc4db794d79cc88e6bde93d3d404 | /HH_glycopeptide - KK testing v2/sequencespace.py | e7d7bfc3a84a3b32c1db46ef3e02d0eb112fb0cd | [] | no_license | GlycReSoft2/glycopeptide-testing | 075b594025c95a9c9cfb79fcf802bd326459238f | 574bc5b44ef8a562e2676aca24062b04f4bfeb17 | refs/heads/master | 2021-01-23T11:49:35.306116 | 2014-05-22T17:33:19 | 2014-05-22T17:33:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,178 | py | from sequence import Sequence
from operator import and_
from functools import reduce
from modification import Modification
from residue import Residue
import copy
import itertools
import warnings
class SequenceSpace:
"""Generate all theoretical glycopeptide sequences"""
def __init__(self, seq, glycan_compo, glycan_sites, mod_list):
"""
seq -- sequence code
glycan_compo -- glycan compositions, dict.
glycan_sites -- sets of candidate sites for glycosylation
mod_list -- list of modifications.
"""
# Filter the glycan composition. Get the max number of HexNAc
self.seq = Sequence(seq) # Sequence object
self.glycan_composition = glycan_compo
self.candidate_sites = glycan_sites
self.modifications = mod_list
def getTheoreticalSequence(self, num_sites):
"""
Get theoretical sequence tailored for fragmenation
max_sites -- the number of maximum glycolsylation sites.
-1 means unlimited.
"""
#raw_seq = self.seq
seq_space = []
occupied_sites = []
#exploreSequence(mod_set, 0, raw_seq, occupied_sites, seq_space)
n = len(self.modifications)
ix_bound = []
## Get the candidate sites for all modification
for mod in self.modifications:
if mod.position != -1: # The position specified.
ix_bound.append((mod.position,)) # One element tuple
elif mod.target!= '': # The target specified.
ix_list = [ix for ix in range(self.seq.length) if self.seq.at(ix)[0].name == mod.target]
## temp_list has format like [(1,2,3), (2,3,4)]
temp_list = [ix for ix in itertools.combinations(ix_list, mod.number)]
ix_bound.append(temp_list)
else:
raise Exception('Unqualified modification!')
## Initialize the choice index for each modification type.
indices = [0] * n
while True:
if n != 0:
for i in reversed(range(n)):
## If not achiving the last choice of current index
if indices[i] != len(ix_bound[i]): # Within boundary, just out of the loop
break
else: # Out of boundary, reset the index.
indices[i] = 0
if i > 0:
indices[i-1] += 1
else:
return seq_space
## Check if current indecies are qualifed.
ix_sites = [ix_bound[ss][indices[ss]] for ss in range(n)]
else:
ix_sites = []
common_sites = set().union(*ix_sites)
glyco_sites = set(self.candidate_sites).difference(common_sites)
#glyco_num = glyco_compo['HexNAc']
if len(common_sites) != sum(map(len,ix_sites)) | (num_sites > len(glyco_sites)): # Invalid config.
indices[i] += 1
continue
raw_seq = copy.deepcopy(self.seq)
for x in range(n):
for mod_site in ix_bound[x][indices[x]]:
raw_seq.addModification(mod_site, self.modifications[x].name)
## Get available glycosylation sites.
#upper_limit = (min(max_sites, len(glyco_sites)) if max_sites > 0 else len(glyco_sites))
#for m in range(1, upper_limit+1):
for sites in itertools.combinations(glyco_sites, num_sites):
temp_seq = copy.deepcopy(raw_seq)
# Append HexNAc to the corresponding sites.
for site in sites:
gly_mod = Modification("HexNAc", site, 1, Residue("HexNAc").mass, 'Asn')
temp_seq.appendModification(gly_mod)
seq_space.append(temp_seq)
if n == 0:
return seq_space
# Only increase the last index.
indices[-1] += 1
| [
"[email protected]"
] | |
57b2cd00a87e389e7a38f77e87aeadee7dc8413d | a0a0932b6ab6ec47c2757d8929216790f5bc6535 | /import_productitem.py | 7c614f08aadb009ebc8072d22b30f9530d115aa9 | [] | no_license | lianglunzhong/latte-erp | b4e6e3b13c4bce17911ff166fecc36172e0bea5b | b58936c8d9917f3efdcb3585c54bfd3aba4723c2 | refs/heads/master | 2022-11-27T03:08:23.780124 | 2017-04-28T02:51:43 | 2017-04-28T02:51:43 | 89,660,834 | 0 | 0 | null | 2022-11-22T01:04:12 | 2017-04-28T02:48:50 | Python | UTF-8 | Python | false | false | 3,751 | py | # -*- coding: utf-8 -*-
import datetime
from django.utils import timezone
import sys, os
reload(sys)
sys.setdefaultencoding('utf-8')
import csv
sys.path.append(os.getcwd())
os.environ['DJANGO_SETTINGS_MODULE'] = 'project.settings'
import django
django.setup()
from product.models import *
from order.models import *
# 根据产品和产品属性生成属性产品
products = Product.objects.all().order_by('id')
# products = Product.objects.filter(id=5393)
for p in products:
# print 'cate',p.category_id,p.description
category = Category.objects.get(pk=p.category_id)
# 更新产品sku编码
# p.sku = str(category.code)+str(p.id)
# p.sku = u"%s%06d" % (category.code, p.id)
# p.save()
# for attribute in category.attributes.all().exclude(id=11):
# # print 'attr_id',attribute.id
# product_attribute, is_created = ProductAttribute.objects.get_or_create(attribute_id=attribute.id,product_id=p.id)
product_attributes = ProductAttribute.objects.filter(product_id=p.id).exclude(attribute_id=11)
for product_attribute in product_attributes:
# print product_attribute.attribute_id
options = p.description.split('#')
for opx in options:
op = opx.replace('SIZE:', '').replace(' ', '').strip().upper()
if "ONE" in op:
op = 'ONESIZE'
elif not op:
op = 'ONESIZE'
print 'not op', opx
elif op in ('????', "均码",'???','error'):
op = 'ONESIZE'
print 'is ?', opx
elif op == 'X':
op = "XL"
elif len(op) == 3 and op[1:] == 'XL' and op[0] != 'X':
try:
op = int(op[0]) * 'X' + 'L'
except Exception,e:
print opx,'#', p.id,'#', p.sku,'#', p.choies_sku
# print 'op',op
try:
option = Option.objects.get(name=op,attribute_id=product_attribute.attribute_id)
product_attribute.options.add(option)
# # item_str = str(p.id) +'-0-'+str(option.id)
# item_str = str(p.id) +'-'+str(option.id)
# # item_sku = u"%s-0-%s"% (p.sku,option.name)
# item_sku = u"%s%s"% (p.sku,option.code)
# item, is_created = Item.objects.get_or_create(product_id=p.id, key=item_str,sku=item_sku)
# # print 'item_str',item_str
# # 针对ws系统下的sku生成choies渠道的别名
# sku_str = str(p.choies_sku)+'-'+str(option.name)
# # print 'sku_str',sku_str,'item_id',item.id
# Alias.objects.get_or_create(sku=sku_str,channel_id=1,item_id=item.id)
except Exception,e:
print opx,'#', p.id,'#', p.sku,'#', p.choies_sku,'# save no',e
exit()
# 获取产品表中现所有的分类及分类属性选项
products = Product.objects.filter(id__gte=306).values('category_id','description').distinct()
temp = {}
i=0
for p in products:
# print p
i= i+1
# print p.category_id,p.description
if temp.has_key(p['category_id']):
temp[p['category_id']] = temp[p['category_id']] + '#'+p['description']
else:
temp[p['category_id']] = p['description']
fieldnames = ['分类id', '属性选项']
dict_writer = csv.writer(open('category_data.csv','wb'))
dict_writer.writerow(fieldnames)
for key,value in temp.iteritems():
temp[key] = value.split('#')
temp[key] = list(set(temp[key]))
cate = Category.objects.filter(id=key,id__gte=354).values('name')
print cate[0]['name']
temp2 = [key, cate[0]['name'], '#'.join(str(e) for e in temp[key])]
dict_writer.writerow(temp2)
print temp
exit()
| [
"[email protected]"
] | |
b2221a99054c2bd032ff2e756d2c70e772bb434b | 233b2958c853dc57dfa5d54caddbc1520dcc35c8 | /ava/runtime/config.py | 4e76f2a43ffde0aeb8268ac973bff3b13fc8e9f6 | [] | no_license | eavatar/ava.node | 6295ac6ed5059ebcb6ce58ef6e75adf1bfa24ed7 | 71e3304d038634ef13f44d245c3838d276a275e6 | refs/heads/master | 2021-01-19T06:13:01.127585 | 2015-06-03T03:10:59 | 2015-06-03T03:10:59 | 33,645,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | # -*- coding: utf-8 -*-
"""
Configuration file reading/writing.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import codecs
import logging
import logging.config
import os.path
from string import Template
from yaml import load, dump
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from ava.runtime import environ
AGENT_CONF = os.path.join(environ.conf_dir(), u'ava.yml')
# The default configuration file is located at the base directory.
settings = dict(base_dir=environ.base_dir(),
conf_dir=environ.conf_dir(),
data_dir=environ.data_dir(),
pkgs_dir=environ.pkgs_dir(),
logs_dir=environ.logs_dir(),
mods_dir=environ.mods_dir(),
)
def load_conf(conf_file):
if not os.path.exists(conf_file):
return {}
data = codecs.open(conf_file, 'rb', encoding='utf-8').read()
if len(data.strip()) == 0:
return {}
template = Template(data)
data = template.substitute(**settings)
return load(data, Loader=Loader)
def save_conf(conf_file, content):
out = codecs.open(conf_file, 'wb', encoding='utf-8')
out.write(dump(content, Dumper=Dumper, default_flow_style=False,
indent=4, width=80))
settings.update(load_conf(AGENT_CONF))
# configure logging
logging.config.dictConfig(settings['logging'])
| [
"[email protected]"
] | |
23ff794c191939821dfe1e0a1e6ee0c35f90e884 | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/desktopvirtualization/v20201019preview/application_group.py | 2faebd8d2ef7474036d1b9203e874ce21b32a2a9 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,527 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = ['ApplicationGroupArgs', 'ApplicationGroup']
@pulumi.input_type
class ApplicationGroupArgs:
def __init__(__self__, *,
application_group_type: pulumi.Input[Union[str, 'ApplicationGroupType']],
host_pool_arm_path: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
application_group_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ApplicationGroup resource.
:param pulumi.Input[Union[str, 'ApplicationGroupType']] application_group_type: Resource Type of ApplicationGroup.
:param pulumi.Input[str] host_pool_arm_path: HostPool arm path of ApplicationGroup.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] application_group_name: The name of the application group
:param pulumi.Input[str] description: Description of ApplicationGroup.
:param pulumi.Input[str] friendly_name: Friendly name of ApplicationGroup.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "application_group_type", application_group_type)
pulumi.set(__self__, "host_pool_arm_path", host_pool_arm_path)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if application_group_name is not None:
pulumi.set(__self__, "application_group_name", application_group_name)
if description is not None:
pulumi.set(__self__, "description", description)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="applicationGroupType")
def application_group_type(self) -> pulumi.Input[Union[str, 'ApplicationGroupType']]:
"""
Resource Type of ApplicationGroup.
"""
return pulumi.get(self, "application_group_type")
@application_group_type.setter
def application_group_type(self, value: pulumi.Input[Union[str, 'ApplicationGroupType']]):
pulumi.set(self, "application_group_type", value)
@property
@pulumi.getter(name="hostPoolArmPath")
def host_pool_arm_path(self) -> pulumi.Input[str]:
"""
HostPool arm path of ApplicationGroup.
"""
return pulumi.get(self, "host_pool_arm_path")
@host_pool_arm_path.setter
def host_pool_arm_path(self, value: pulumi.Input[str]):
pulumi.set(self, "host_pool_arm_path", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="applicationGroupName")
def application_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the application group
"""
return pulumi.get(self, "application_group_name")
@application_group_name.setter
def application_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "application_group_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of ApplicationGroup.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of ApplicationGroup.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class ApplicationGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_group_name: Optional[pulumi.Input[str]] = None,
application_group_type: Optional[pulumi.Input[Union[str, 'ApplicationGroupType']]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pool_arm_path: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Represents a ApplicationGroup definition.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] application_group_name: The name of the application group
:param pulumi.Input[Union[str, 'ApplicationGroupType']] application_group_type: Resource Type of ApplicationGroup.
:param pulumi.Input[str] description: Description of ApplicationGroup.
:param pulumi.Input[str] friendly_name: Friendly name of ApplicationGroup.
:param pulumi.Input[str] host_pool_arm_path: HostPool arm path of ApplicationGroup.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ApplicationGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents a ApplicationGroup definition.
:param str resource_name: The name of the resource.
:param ApplicationGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ApplicationGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_group_name: Optional[pulumi.Input[str]] = None,
application_group_type: Optional[pulumi.Input[Union[str, 'ApplicationGroupType']]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
host_pool_arm_path: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ApplicationGroupArgs.__new__(ApplicationGroupArgs)
__props__.__dict__["application_group_name"] = application_group_name
if application_group_type is None and not opts.urn:
raise TypeError("Missing required property 'application_group_type'")
__props__.__dict__["application_group_type"] = application_group_type
__props__.__dict__["description"] = description
__props__.__dict__["friendly_name"] = friendly_name
if host_pool_arm_path is None and not opts.urn:
raise TypeError("Missing required property 'host_pool_arm_path'")
__props__.__dict__["host_pool_arm_path"] = host_pool_arm_path
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
__props__.__dict__["workspace_arm_path"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201019preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20190123preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20190123preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20190924preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20190924preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20191210preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20191210preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20200921preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20200921preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20201102preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201102preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20201110preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201110preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210114preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210114preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210201preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210201preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210309preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210309preview:ApplicationGroup"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210401preview:ApplicationGroup"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210401preview:ApplicationGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ApplicationGroup, __self__).__init__(
'azure-native:desktopvirtualization/v20201019preview:ApplicationGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ApplicationGroup':
"""
Get an existing ApplicationGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ApplicationGroupArgs.__new__(ApplicationGroupArgs)
__props__.__dict__["application_group_type"] = None
__props__.__dict__["description"] = None
__props__.__dict__["friendly_name"] = None
__props__.__dict__["host_pool_arm_path"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["workspace_arm_path"] = None
return ApplicationGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="applicationGroupType")
def application_group_type(self) -> pulumi.Output[str]:
"""
Resource Type of ApplicationGroup.
"""
return pulumi.get(self, "application_group_type")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of ApplicationGroup.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
Friendly name of ApplicationGroup.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="hostPoolArmPath")
def host_pool_arm_path(self) -> pulumi.Output[str]:
"""
HostPool arm path of ApplicationGroup.
"""
return pulumi.get(self, "host_pool_arm_path")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="workspaceArmPath")
def workspace_arm_path(self) -> pulumi.Output[str]:
"""
Workspace arm path of ApplicationGroup.
"""
return pulumi.get(self, "workspace_arm_path")
| [
"[email protected]"
] | |
cffdbf9595a022545dadfca42fab82415426fe39 | 3a186f09753b63e87c0502e88f33c992f561e403 | /luna.py | d4c01d34900662ee4390cb280d3b936b4890d6b7 | [] | no_license | qwergram/cio2016_server | 88d98e217d7f1cc1415b14a4804b9a4417d1143b | 071efd99bad8635031c74409dab949aae1a5d384 | refs/heads/master | 2021-01-10T04:50:34.105495 | 2016-03-06T09:44:49 | 2016-03-06T09:44:49 | 53,247,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,481 | py | import bottle
import os
import sqlite3
import json
class CRUD:
def __init__(self, location='/etc/luna/'):
self.location = location
self.reset()
def reset(self):
with open(self.location + 'active.sqlite3', 'w') as r:
r.write('')
self.conn = sqlite3.connect(self.location + 'active.sqlite3')
self.c = self.conn.cursor()
self.c.execute('CREATE TABLE users (first text, last text, status text)')
self.conn.commit()
def get(self, key=None):
self.c.execute('SELECT * FROM users WHERE status=? LIMIT 1', ('',))
line = self.c.fetchone()
if line and key:
self.c.execute('UPDATE users SET status = ? WHERE first = ? AND last = ? AND status = ?', (key, line[0], line[1], ''))
self.conn.commit()
return list(line)
elif line:
return list(line)
else:
return False
def confirm(self, fname, lname, key):
self.c.execute('SELECT * FROM users WHERE first = ? AND last = ? AND status = ?', (fname, lname, key))
line = self.c.fetchone()
if line:
self.remove(fname, lname)
return True
else:
return False
def rturn(self, fname, lname, key):
self.c.execute('SELECT * FROM users WHERE status=? LIMIT 1', (key,))
line = self.c.fetchone()
if line:
self.c.execute('UPDATE users SET status = ? WHERE first = ? AND last = ? AND status = ?', ('', line[0], line[1], key))
self.conn.commit()
return True
else:
return False
def add(self, first, last, status=''):
self.c.execute('INSERT INTO users VALUES (?,?,?)', (first, last, status))
self.conn.commit()
def remove(self, first, last):
self.c.execute('DELETE FROM users WHERE first = ? AND last = ?', (first, last))
self.conn.commit()
def inport(self):
with open(self.location + 'import.csv') as to_import:
to_import = to_import.readlines()
for line in to_import:
line = line.strip().split(',')
if line[0] == 'add':
self.add(line[1], line[2], '')
elif line[0] == 'remove':
self.remove(line[1], line[2])
def export(self):
self.c.execute('SELECT * FROM users')
exp = self.c.fetchall()
for i, line in enumerate(exp):
exp[i] = ','.join(line)
with open(self.location + 'export.csv', 'w') as to_export:
to_export = '\n'.join(exp)
C = CRUD()
def check_environment(location):
global LOCATION
LOCATION = location
print("Checking Server environment...")
if os.path.exists(location):
print("Luna has been run before!")
return True
else:
os.makedirs(location)
print("Building Luna config files...")
os.system("sudo touch " + location + 'stats.json')
os.system("sudo touch " + location + 'config.json')
os.system("sudo touch " + location + 'import.csv')
os.system("sudo touch " + location + 'export.csv')
os.system("sudo touch " + location + 'active.sqlite3')
STATS = {
"key_usage": {},
"left": [],
"unconfirmed": [],
"completed": [],
"errors": 0,
}
def log_key(key, action):
if not key in STATS['key_usage']:
STATS['key_usage'][key] = {
"get": 0,
"confirm": 0,
"return": 0,
"coffee_breaks": 0,
}
STATS['key_usage'][key][action] += 1
with open(LOCATION + '/stats.json', 'w') as log:
log.write(json.dumps(STATS, indent=4))
@bottle.get('/<key>/about')
def about(key):
global ERRORS, STATS
bottle.response.content_type = 'application/json'
log_key(key, "coffee_breaks")
return json.dumps(STATS, indent=2)
@bottle.get('/<key>/get')
def get(key):
bottle.response.content_type = 'application/json'
db_response = C.get(key)
if not db_response:
log_key(key, "coffee_breaks")
return json.dumps({"status": "wait", "duration": 10, "msg": "+1 Coffee"}, indent=2)
elif db_response:
if not (db_response[0], db_response[1]) in STATS['unconfirmed']:
STATS['unconfirmed'].append([db_response[0], db_response[1]])
log_key(key, 'get')
return json.dumps({"status": "image", "fname": db_response[0], "lname": db_response[1]}, indent=2)
@bottle.get('/<key>/confirm/<fname>/<lname>')
def confirm(key, fname, lname):
bottle.response.content_type = 'application/json'
db_response = C.confirm(fname, lname, key)
if db_response:
log_key(key, 'confirm')
log_key(key, 'coffee_breaks')
log_key(key, 'coffee_breaks')
return json.dumps({"status": "confirmed", "fname": fname, "lname": lname, "msg": "+2 Coffee"}, indent=2)
else:
STATS['errors'] += 1
return json.dumps({"status": "error", "error": "LN_4"}, indent=2)
@bottle.get("/<key>/return/<fname>/<lname>")
def rturn(key, fname, lname):
bottle.response.content_type = 'application/json'
db_response = C.rturn(fname, lname, key)
if db_response:
log_key(key, 'return')
return json.dumps({"status": "returned", "fname": fname, "lname": lname}, indent=2)
else:
STATS['errors'] += 1
return json.dumps({"status": "error", "error": "LN_2"}, indent=2)
def main(location='/etc/luna/'):
check_environment(location)
# with open(location + 'config.json') as config:
# config = json.loads(config.read().strip())
print("[n] What would you like to do?")
print("[n] 1. Import a csv")
print("[n] 2. Export a csv")
print("[n] 3. Reset active server")
print("[n] 4. Launch the server")
while True:
option = input("[n] Type the order you want: (e.g. 213 exports, imports and then runs the server)")
okay = True
for task in option:
if task in '1234':
okay = True
else:
okay = False
break
if okay:
break
print("[n] Invalid options. ")
for task in option:
if task == '1':
C.inport()
elif task == '2':
C.export()
elif task == '3':
C.reset()
elif task == '4':
bottle.run(host='0.0.0.0', port=8000, debug=True)
if __name__ == "__main__":
print("Hello. Activating Luna build RS25B7!")
main()
| [
"[email protected]"
] | |
ed25c19719c15e6a359c0cb01b3711f8f78c1661 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2734/59137/312747.py | 32ed5d4dbf4a1e4cb7db8a81634c5d8d187dd4ec | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | s = input()
if s == "5 3 5":
print(2)
print(0)
print(0)
print(1)
print(0)
elif s == "8 3 5":
s1 = input()
s2 = input()
s3 = input()
if s3 == "6 8":
print(1)
print(1)
print(2)
print(2)
print(1)
elif s3 == "1 8":
print(1)
print(2)
print(1)
print(0)
print(0)
else:
print(" ", s3)
elif s == "8 4 5":
print(3)
print(3)
print(3)
print(3)
print(3)
elif s == "5 3 3":
print(0)
print(1)
print(0)
else:
print(1)
print(1)
print(0) | [
"[email protected]"
] | |
7ce62fcf3e249909c34273756aebfac403c2b879 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/23/usersdata/134/12369/submittedfiles/av1_2.py | 4f5a24414af8bcff93f9204bbb739083ba7a9bd2 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
n = int(input('Digite n:'))
x1 = int(input('Digite a coordenada em x para a figura 1:'))
y1 = int(input('Digite a coordenada em y para a figura 1:'))
x2 = int(input('Digite a coordenada em x para a figura 2:'))
y2 = int(input('Digite a coordenada em y para a figura 2:'))
for i in range (1,n+1,1):
if n%2==0:
if (x1<=(n/2) and x2>(n/2)) or (x2<=(n/2) and x1>(n/2)):
print ('S')
break
elif (y1<=(n/2) and y2>(n/2)) or (y2<=(n/2) and y1>(n/2)):
print ('S')
else:
print ('N')
| [
"[email protected]"
] | |
ec61edb372da268e0930cb58292ef8c914745487 | c77f1d4976d241574a9bf68ee035632a010cdc85 | /qualification/migrations/0003_auto_20190102_1150.py | a59750689f991a27692f605996293a2b3e986d03 | [] | no_license | alifarazz/csesa-django | e24847fb1a7a2dc0c0f56f396b66c28d63efc869 | 7d77686b95796b30d5c65957776b2bbe927445b5 | refs/heads/master | 2020-04-27T13:27:10.119436 | 2019-03-07T16:23:37 | 2019-03-07T16:23:37 | 174,370,553 | 0 | 0 | null | 2019-03-07T15:27:00 | 2019-03-07T15:26:58 | Python | UTF-8 | Python | false | false | 1,207 | py | # Generated by Django 2.0.9 on 2019-01-02 11:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('qualification', '0002_qualificationform'),
]
operations = [
migrations.CreateModel(
name='QuestionQualificationRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('place', models.IntegerField()),
],
),
migrations.RemoveField(
model_name='qualificationform',
name='questions',
),
migrations.AddField(
model_name='questionqualificationrelation',
name='form',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='qualification.QualificationForm'),
),
migrations.AddField(
model_name='questionqualificationrelation',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forms', to='qualification.Question'),
),
]
| [
"[email protected]"
] | |
d932577fc1d8b71405a05fa54c4ae2ec74119e08 | fe6f6d11dde2a3205ae9758c7d4eb1f824b84102 | /venv/lib/python2.7/site-packages/pylint/test/input/func___name___access.py | def867475829143945bd7552ef152ca874170278 | [
"MIT"
] | permissive | mutaihillary/mycalculator | ebf12a5ac90cb97c268b05606c675d64e7ccf8a6 | 55685dd7c968861f18ae0701129f5af2bc682d67 | refs/heads/master | 2023-01-10T14:56:11.780045 | 2016-09-20T12:30:21 | 2016-09-20T12:30:21 | 68,580,251 | 0 | 0 | MIT | 2022-12-26T20:15:21 | 2016-09-19T07:27:48 | Python | UTF-8 | Python | false | false | 515 | py | # pylint: disable=R0903,W0142
"""test access to __name__ gives undefined member on new/old class instances
but not on new/old class object
"""
__revision__ = 1
class Aaaa:
"""old class"""
def __init__(self):
print self.__name__
print self.__class__.__name__
class NewClass(object):
"""new class"""
def __new__(cls, *args, **kwargs):
print 'new', cls.__name__
return object.__new__(cls, *args, **kwargs)
def __init__(self):
print 'init', self.__name__
| [
"[email protected]"
] | |
2e2bdefe2b4e3ce8514dd285194ed6d9f43863bd | 74b6523512f17f4c18096b956e4c3c074b53cf4c | /myNews.py | 3170f0ec9c830c21762b973cc0dd598006213758 | [] | no_license | howie6879/getNews | f7fdbd310c0e48a8a2c74504aa27893d25354ba1 | ab5ad56c8520e60d5f568deed0081dfc127b7cd9 | refs/heads/master | 2020-05-21T23:49:40.805281 | 2017-04-02T03:51:33 | 2017-04-02T03:51:33 | 59,347,631 | 49 | 23 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | """myNews
Usage: myNews [-p] <port>
Options:
-h,--help 显示帮助菜单
-p 端口号
Example:
myNews -p 8888 设置端口号为8888
"""
from docopt import docopt
from server import main
def cli():
kwargs = docopt(__doc__)
port = kwargs['<port>']
main(port)
if __name__ == "__main__":
cli()
| [
"[email protected]"
] | |
9fdb4d019b5ec120c7bd4c3cbe140bf7023e5911 | e32801b4debf07340b98255eb35e2c41ba2d2bb5 | /scripts/addons_extern/animation_nodes_master/nodes/spline/spline_info.py | 83687abbd74969916131dea3e58cb5731c0728d3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | JuhaW/blenderpython | 8c7130484690339c06f85b740c2f9e595b34a9dc | ee7b3a9f9d8cfbea32258e7ff05c3cb485a8879a | refs/heads/master | 2021-07-21T23:59:42.476215 | 2017-10-25T08:42:04 | 2017-10-25T08:42:04 | 108,861,751 | 1 | 0 | null | 2017-10-30T14:25:14 | 2017-10-30T14:25:14 | null | UTF-8 | Python | false | false | 505 | py | import bpy
from ... base_types.node import AnimationNode
class SplineInfoNode(bpy.types.Node, AnimationNode):
bl_idname = "an_SplineInfoNode"
bl_label = "Spline Info"
def create(self):
self.newInput("Spline", "Spline", "spline", defaultDrawType = "PROPERTY_ONLY")
self.newOutput("Vector List", "Points", "points")
self.newOutput("Boolean", "Cyclic", "cyclic")
def execute(self, spline):
spline.update()
return spline.getPoints(), spline.isCyclic
| [
"[email protected]"
] | |
61c6ccd66c69dcc38f504e14f4d66366d9bc51e6 | b8f4b32171bba9e60a101f5a605e084c9aa974fd | /BaseTools/Source/Python/Workspace/InfBuildData.py | 7675b0ea00ebd6a5fc3e823c965e32066f66f650 | [
"BSD-3-Clause",
"BSD-2-Clause-Patent"
] | permissive | jinjhuli/slimbootloader | 3137ab83073865b247f69b09a628f8b39b4c05ee | cfba21067cf4dce659b508833d8c886967081375 | refs/heads/master | 2023-07-11T12:59:51.336343 | 2020-09-11T00:16:48 | 2020-09-11T00:24:52 | 149,729,121 | 1 | 0 | BSD-2-Clause | 2018-09-21T07:49:42 | 2018-09-21T07:49:42 | null | UTF-8 | Python | false | false | 48,567 | py | ## @file
# This file is used to create a database used by build tool
#
# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
from __future__ import absolute_import
from Common.DataType import *
from Common.Misc import *
from Common.caching import cached_property, cached_class_function
from types import *
from .MetaFileParser import *
from collections import OrderedDict
from Workspace.BuildClassObject import ModuleBuildClassObject, LibraryClassObject, PcdClassObject
## Get Protocol value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def _ProtocolValue(CName, PackageList, Inffile = None):
for P in PackageList:
ProtocolKeys = list(P.Protocols.keys())
if Inffile and P._PrivateProtocols:
if not Inffile.startswith(P.MetaFile.Dir):
ProtocolKeys = [x for x in P.Protocols if x not in P._PrivateProtocols]
if CName in ProtocolKeys:
return P.Protocols[CName]
return None
## Get PPI value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def _PpiValue(CName, PackageList, Inffile = None):
for P in PackageList:
PpiKeys = list(P.Ppis.keys())
if Inffile and P._PrivatePpis:
if not Inffile.startswith(P.MetaFile.Dir):
PpiKeys = [x for x in P.Ppis if x not in P._PrivatePpis]
if CName in PpiKeys:
return P.Ppis[CName]
return None
## Module build information from INF file
#
# This class is used to retrieve information stored in database and convert them
# into ModuleBuildClassObject form for easier use for AutoGen.
#
class InfBuildData(ModuleBuildClassObject):
# dict used to convert PCD type in database to string used by build tool
_PCD_TYPE_STRING_ = {
MODEL_PCD_FIXED_AT_BUILD : TAB_PCDS_FIXED_AT_BUILD,
MODEL_PCD_PATCHABLE_IN_MODULE : TAB_PCDS_PATCHABLE_IN_MODULE,
MODEL_PCD_FEATURE_FLAG : TAB_PCDS_FEATURE_FLAG,
MODEL_PCD_DYNAMIC : TAB_PCDS_DYNAMIC,
MODEL_PCD_DYNAMIC_DEFAULT : TAB_PCDS_DYNAMIC,
MODEL_PCD_DYNAMIC_HII : TAB_PCDS_DYNAMIC_HII,
MODEL_PCD_DYNAMIC_VPD : TAB_PCDS_DYNAMIC_VPD,
MODEL_PCD_DYNAMIC_EX : TAB_PCDS_DYNAMIC_EX,
MODEL_PCD_DYNAMIC_EX_DEFAULT : TAB_PCDS_DYNAMIC_EX,
MODEL_PCD_DYNAMIC_EX_HII : TAB_PCDS_DYNAMIC_EX_HII,
MODEL_PCD_DYNAMIC_EX_VPD : TAB_PCDS_DYNAMIC_EX_VPD,
}
# dict used to convert part of [Defines] to members of InfBuildData directly
_PROPERTY_ = {
#
# Required Fields
#
TAB_INF_DEFINES_BASE_NAME : "_BaseName",
TAB_INF_DEFINES_FILE_GUID : "_Guid",
TAB_INF_DEFINES_MODULE_TYPE : "_ModuleType",
#
# Optional Fields
#
# TAB_INF_DEFINES_INF_VERSION : "_AutoGenVersion",
TAB_INF_DEFINES_COMPONENT_TYPE : "_ComponentType",
TAB_INF_DEFINES_MAKEFILE_NAME : "_MakefileName",
# TAB_INF_DEFINES_CUSTOM_MAKEFILE : "_CustomMakefile",
TAB_INF_DEFINES_DPX_SOURCE :"_DxsFile",
TAB_INF_DEFINES_VERSION_NUMBER : "_Version",
TAB_INF_DEFINES_VERSION_STRING : "_Version",
TAB_INF_DEFINES_VERSION : "_Version",
TAB_INF_DEFINES_PCD_IS_DRIVER : "_PcdIsDriver",
TAB_INF_DEFINES_SHADOW : "_Shadow"
}
# regular expression for converting XXX_FLAGS in [nmake] section to new type
_NMAKE_FLAG_PATTERN_ = re.compile("(?:EBC_)?([A-Z]+)_(?:STD_|PROJ_|ARCH_)?FLAGS(?:_DLL|_ASL|_EXE)?", re.UNICODE)
# dict used to convert old tool name used in [nmake] section to new ones
_TOOL_CODE_ = {
"C" : "CC",
BINARY_FILE_TYPE_LIB : "SLINK",
"LINK" : "DLINK",
}
## Constructor of InfBuildData
#
# Initialize object of InfBuildData
#
# @param FilePath The path of platform description file
# @param RawData The raw data of DSC file
# @param BuildDataBase Database used to retrieve module/package information
# @param Arch The target architecture
# @param Platform The name of platform employing this module
# @param Macros Macros used for replacement in DSC file
#
def __init__(self, FilePath, RawData, BuildDatabase, Arch=TAB_ARCH_COMMON, Target=None, Toolchain=None):
self.MetaFile = FilePath
self._ModuleDir = FilePath.Dir
self._RawData = RawData
self._Bdb = BuildDatabase
self._Arch = Arch
self._Target = Target
self._Toolchain = Toolchain
self._Platform = TAB_COMMON
self._TailComments = None
self._BaseName = None
self._DxsFile = None
self._ModuleType = None
self._ComponentType = None
self._BuildType = None
self._Guid = None
self._Version = None
self._PcdIsDriver = None
self._BinaryModule = None
self._Shadow = None
self._MakefileName = None
self._CustomMakefile = None
self._Specification = None
self._LibraryClass = None
self._ModuleEntryPointList = None
self._ModuleUnloadImageList = None
self._ConstructorList = None
self._DestructorList = None
self._Defs = OrderedDict()
self._ProtocolComments = None
self._PpiComments = None
self._GuidsUsedByPcd = OrderedDict()
self._GuidComments = None
self._PcdComments = None
self._BuildOptions = None
self._DependencyFileList = None
self.LibInstances = []
self.ReferenceModules = set()
def SetReferenceModule(self,Module):
self.ReferenceModules.add(Module)
return self
## XXX[key] = value
def __setitem__(self, key, value):
self.__dict__[self._PROPERTY_[key]] = value
## value = XXX[key]
def __getitem__(self, key):
return self.__dict__[self._PROPERTY_[key]]
## "in" test support
def __contains__(self, key):
return key in self._PROPERTY_
## Get current effective macros
@cached_property
def _Macros(self):
RetVal = {}
return RetVal
## Get architecture
@cached_property
def Arch(self):
return self._Arch
## Return the name of platform employing this module
@cached_property
def Platform(self):
return self._Platform
@cached_property
def HeaderComments(self):
return [a[0] for a in self._RawData[MODEL_META_DATA_HEADER_COMMENT]]
@cached_property
def TailComments(self):
return [a[0] for a in self._RawData[MODEL_META_DATA_TAIL_COMMENT]]
## Retrieve all information in [Defines] section
#
# (Retrieving all [Defines] information in one-shot is just to save time.)
#
@cached_class_function
def _GetHeaderInfo(self):
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, self._Platform]
for Record in RecordList:
Name, Value = Record[1], ReplaceMacro(Record[2], self._Macros, False)
# items defined _PROPERTY_ don't need additional processing
if Name in self:
self[Name] = Value
self._Defs[Name] = Value
self._Macros[Name] = Value
# some special items in [Defines] section need special treatment
elif Name in ('EFI_SPECIFICATION_VERSION', 'UEFI_SPECIFICATION_VERSION', 'EDK_RELEASE_VERSION', 'PI_SPECIFICATION_VERSION'):
if Name in ('EFI_SPECIFICATION_VERSION', 'UEFI_SPECIFICATION_VERSION'):
Name = 'UEFI_SPECIFICATION_VERSION'
if self._Specification is None:
self._Specification = OrderedDict()
self._Specification[Name] = GetHexVerValue(Value)
if self._Specification[Name] is None:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED,
"'%s' format is not supported for %s" % (Value, Name),
File=self.MetaFile, Line=Record[-1])
elif Name == 'LIBRARY_CLASS':
if self._LibraryClass is None:
self._LibraryClass = []
ValueList = GetSplitValueList(Value)
LibraryClass = ValueList[0]
if len(ValueList) > 1:
SupModuleList = GetSplitValueList(ValueList[1], ' ')
else:
SupModuleList = SUP_MODULE_LIST
self._LibraryClass.append(LibraryClassObject(LibraryClass, SupModuleList))
elif Name == 'ENTRY_POINT':
if self._ModuleEntryPointList is None:
self._ModuleEntryPointList = []
self._ModuleEntryPointList.append(Value)
elif Name == 'UNLOAD_IMAGE':
if self._ModuleUnloadImageList is None:
self._ModuleUnloadImageList = []
if not Value:
continue
self._ModuleUnloadImageList.append(Value)
elif Name == 'CONSTRUCTOR':
if self._ConstructorList is None:
self._ConstructorList = []
if not Value:
continue
self._ConstructorList.append(Value)
elif Name == 'DESTRUCTOR':
if self._DestructorList is None:
self._DestructorList = []
if not Value:
continue
self._DestructorList.append(Value)
elif Name == TAB_INF_DEFINES_CUSTOM_MAKEFILE:
TokenList = GetSplitValueList(Value)
if self._CustomMakefile is None:
self._CustomMakefile = {}
if len(TokenList) < 2:
self._CustomMakefile[TAB_COMPILER_MSFT] = TokenList[0]
self._CustomMakefile['GCC'] = TokenList[0]
else:
if TokenList[0] not in [TAB_COMPILER_MSFT, 'GCC']:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED,
"No supported family [%s]" % TokenList[0],
File=self.MetaFile, Line=Record[-1])
self._CustomMakefile[TokenList[0]] = TokenList[1]
else:
self._Defs[Name] = Value
self._Macros[Name] = Value
#
# Retrieve information in sections specific to Edk.x modules
#
if not self._ModuleType:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE,
"MODULE_TYPE is not given", File=self.MetaFile)
if self._ModuleType not in SUP_MODULE_LIST:
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, self._Platform]
for Record in RecordList:
Name = Record[1]
if Name == "MODULE_TYPE":
LineNo = Record[6]
break
EdkLogger.error("build", FORMAT_NOT_SUPPORTED,
"MODULE_TYPE %s is not supported for EDK II, valid values are:\n %s" % (self._ModuleType, ' '.join(l for l in SUP_MODULE_LIST)),
File=self.MetaFile, Line=LineNo)
if (self._Specification is None) or (not 'PI_SPECIFICATION_VERSION' in self._Specification) or (int(self._Specification['PI_SPECIFICATION_VERSION'], 16) < 0x0001000A):
if self._ModuleType == SUP_MODULE_SMM_CORE:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "SMM_CORE module type can't be used in the module with PI_SPECIFICATION_VERSION less than 0x0001000A", File=self.MetaFile)
if (self._Specification is None) or (not 'PI_SPECIFICATION_VERSION' in self._Specification) or (int(self._Specification['PI_SPECIFICATION_VERSION'], 16) < 0x00010032):
if self._ModuleType == SUP_MODULE_MM_CORE_STANDALONE:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "MM_CORE_STANDALONE module type can't be used in the module with PI_SPECIFICATION_VERSION less than 0x00010032", File=self.MetaFile)
if self._ModuleType == SUP_MODULE_MM_STANDALONE:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "MM_STANDALONE module type can't be used in the module with PI_SPECIFICATION_VERSION less than 0x00010032", File=self.MetaFile)
if 'PCI_DEVICE_ID' in self._Defs and 'PCI_VENDOR_ID' in self._Defs \
and 'PCI_CLASS_CODE' in self._Defs and 'PCI_REVISION' in self._Defs:
self._BuildType = 'UEFI_OPTIONROM'
if 'PCI_COMPRESS' in self._Defs:
if self._Defs['PCI_COMPRESS'] not in ('TRUE', 'FALSE'):
EdkLogger.error("build", FORMAT_INVALID, "Expected TRUE/FALSE for PCI_COMPRESS: %s" % self.MetaFile)
elif 'UEFI_HII_RESOURCE_SECTION' in self._Defs \
and self._Defs['UEFI_HII_RESOURCE_SECTION'] == 'TRUE':
self._BuildType = 'UEFI_HII'
else:
self._BuildType = self._ModuleType.upper()
if self._DxsFile:
File = PathClass(NormPath(self._DxsFile), self._ModuleDir, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = File.Validate(".dxs", CaseSensitive=False)
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo,
File=self.MetaFile, Line=LineNo)
if not self._DependencyFileList:
self._DependencyFileList = []
self._DependencyFileList.append(File)
## Retrieve file version
@cached_property
def AutoGenVersion(self):
RetVal = 0x00010000
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, self._Platform]
for Record in RecordList:
if Record[1] == TAB_INF_DEFINES_INF_VERSION:
if '.' in Record[2]:
ValueList = Record[2].split('.')
Major = '%04o' % int(ValueList[0], 0)
Minor = '%04o' % int(ValueList[1], 0)
RetVal = int('0x' + Major + Minor, 0)
else:
RetVal = int(Record[2], 0)
break
return RetVal
## Retrieve BASE_NAME
@cached_property
def BaseName(self):
if self._BaseName is None:
self._GetHeaderInfo()
if self._BaseName is None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No BASE_NAME name", File=self.MetaFile)
return self._BaseName
## Retrieve DxsFile
@cached_property
def DxsFile(self):
if self._DxsFile is None:
self._GetHeaderInfo()
if self._DxsFile is None:
self._DxsFile = ''
return self._DxsFile
## Retrieve MODULE_TYPE
@cached_property
def ModuleType(self):
if self._ModuleType is None:
self._GetHeaderInfo()
if self._ModuleType is None:
self._ModuleType = SUP_MODULE_BASE
if self._ModuleType not in SUP_MODULE_LIST:
self._ModuleType = SUP_MODULE_USER_DEFINED
return self._ModuleType
## Retrieve COMPONENT_TYPE
@cached_property
def ComponentType(self):
if self._ComponentType is None:
self._GetHeaderInfo()
if self._ComponentType is None:
self._ComponentType = SUP_MODULE_USER_DEFINED
return self._ComponentType
## Retrieve "BUILD_TYPE"
@cached_property
def BuildType(self):
if self._BuildType is None:
self._GetHeaderInfo()
if not self._BuildType:
self._BuildType = SUP_MODULE_BASE
return self._BuildType
## Retrieve file guid
@cached_property
def Guid(self):
if self._Guid is None:
self._GetHeaderInfo()
if self._Guid is None:
self._Guid = '00000000-0000-0000-0000-000000000000'
return self._Guid
## Retrieve module version
@cached_property
def Version(self):
if self._Version is None:
self._GetHeaderInfo()
if self._Version is None:
self._Version = '0.0'
return self._Version
## Retrieve PCD_IS_DRIVER
@cached_property
def PcdIsDriver(self):
if self._PcdIsDriver is None:
self._GetHeaderInfo()
if self._PcdIsDriver is None:
self._PcdIsDriver = ''
return self._PcdIsDriver
## Retrieve SHADOW
@cached_property
def Shadow(self):
if self._Shadow is None:
self._GetHeaderInfo()
if self._Shadow and self._Shadow.upper() == 'TRUE':
self._Shadow = True
else:
self._Shadow = False
return self._Shadow
## Retrieve CUSTOM_MAKEFILE
@cached_property
def CustomMakefile(self):
if self._CustomMakefile is None:
self._GetHeaderInfo()
if self._CustomMakefile is None:
self._CustomMakefile = {}
return self._CustomMakefile
## Retrieve EFI_SPECIFICATION_VERSION
@cached_property
def Specification(self):
if self._Specification is None:
self._GetHeaderInfo()
if self._Specification is None:
self._Specification = {}
return self._Specification
## Retrieve LIBRARY_CLASS
@cached_property
def LibraryClass(self):
if self._LibraryClass is None:
self._GetHeaderInfo()
if self._LibraryClass is None:
self._LibraryClass = []
return self._LibraryClass
## Retrieve ENTRY_POINT
@cached_property
def ModuleEntryPointList(self):
if self._ModuleEntryPointList is None:
self._GetHeaderInfo()
if self._ModuleEntryPointList is None:
self._ModuleEntryPointList = []
return self._ModuleEntryPointList
## Retrieve UNLOAD_IMAGE
@cached_property
def ModuleUnloadImageList(self):
if self._ModuleUnloadImageList is None:
self._GetHeaderInfo()
if self._ModuleUnloadImageList is None:
self._ModuleUnloadImageList = []
return self._ModuleUnloadImageList
## Retrieve CONSTRUCTOR
@cached_property
def ConstructorList(self):
if self._ConstructorList is None:
self._GetHeaderInfo()
if self._ConstructorList is None:
self._ConstructorList = []
return self._ConstructorList
## Retrieve DESTRUCTOR
@cached_property
def DestructorList(self):
if self._DestructorList is None:
self._GetHeaderInfo()
if self._DestructorList is None:
self._DestructorList = []
return self._DestructorList
## Retrieve definies other than above ones
@cached_property
def Defines(self):
self._GetHeaderInfo()
return self._Defs
## Retrieve binary files
@cached_class_function
def _GetBinaries(self):
RetVal = []
RecordList = self._RawData[MODEL_EFI_BINARY_FILE, self._Arch, self._Platform]
Macros = self._Macros
Macros['PROCESSOR'] = self._Arch
for Record in RecordList:
FileType = Record[0]
LineNo = Record[-1]
Target = TAB_COMMON
FeatureFlag = []
if Record[2]:
TokenList = GetSplitValueList(Record[2], TAB_VALUE_SPLIT)
if TokenList:
Target = TokenList[0]
if len(TokenList) > 1:
FeatureFlag = Record[1:]
File = PathClass(NormPath(Record[1], Macros), self._ModuleDir, '', FileType, True, self._Arch, '', Target)
# check the file validation
ErrorCode, ErrorInfo = File.Validate()
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
RetVal.append(File)
return RetVal
## Retrieve binary files with error check.
@cached_property
def Binaries(self):
RetVal = self._GetBinaries()
if GlobalData.gIgnoreSource and not RetVal:
ErrorInfo = "The INF file does not contain any RetVal to use in creating the image\n"
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE, ExtraData=ErrorInfo, File=self.MetaFile)
return RetVal
## Retrieve source files
@cached_property
def Sources(self):
self._GetHeaderInfo()
# Ignore all source files in a binary build mode
if GlobalData.gIgnoreSource:
return []
RetVal = []
RecordList = self._RawData[MODEL_EFI_SOURCE_FILE, self._Arch, self._Platform]
Macros = self._Macros
for Record in RecordList:
LineNo = Record[-1]
ToolChainFamily = Record[1]
TagName = Record[2]
ToolCode = Record[3]
File = PathClass(NormPath(Record[0], Macros), self._ModuleDir, '',
'', False, self._Arch, ToolChainFamily, '', TagName, ToolCode)
# check the file validation
ErrorCode, ErrorInfo = File.Validate()
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
RetVal.append(File)
# add any previously found dependency files to the source list
if self._DependencyFileList:
RetVal.extend(self._DependencyFileList)
return RetVal
## Retrieve library classes employed by this module
@cached_property
def LibraryClasses(self):
RetVal = OrderedDict()
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch, self._Platform]
for Record in RecordList:
Lib = Record[0]
Instance = Record[1]
if Instance:
Instance = NormPath(Instance, self._Macros)
RetVal[Lib] = Instance
else:
RetVal[Lib] = None
return RetVal
## Retrieve library names (for Edk.x style of modules)
@cached_property
def Libraries(self):
RetVal = []
RecordList = self._RawData[MODEL_EFI_LIBRARY_INSTANCE, self._Arch, self._Platform]
for Record in RecordList:
LibraryName = ReplaceMacro(Record[0], self._Macros, False)
# in case of name with '.lib' extension, which is unusual in Edk.x inf
LibraryName = os.path.splitext(LibraryName)[0]
if LibraryName not in RetVal:
RetVal.append(LibraryName)
return RetVal
@cached_property
def ProtocolComments(self):
self.Protocols
return self._ProtocolComments
## Retrieve protocols consumed/produced by this module
@cached_property
def Protocols(self):
RetVal = OrderedDict()
self._ProtocolComments = OrderedDict()
RecordList = self._RawData[MODEL_EFI_PROTOCOL, self._Arch, self._Platform]
for Record in RecordList:
CName = Record[0]
Value = _ProtocolValue(CName, self.Packages, self.MetaFile.Path)
if Value is None:
PackageList = "\n\t".join(str(P) for P in self.Packages)
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of Protocol [%s] is not found under [Protocols] section in" % CName,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
RetVal[CName] = Value
CommentRecords = self._RawData[MODEL_META_DATA_COMMENT, self._Arch, self._Platform, Record[5]]
self._ProtocolComments[CName] = [a[0] for a in CommentRecords]
return RetVal
@cached_property
def PpiComments(self):
self.Ppis
return self._PpiComments
## Retrieve PPIs consumed/produced by this module
@cached_property
def Ppis(self):
RetVal = OrderedDict()
self._PpiComments = OrderedDict()
RecordList = self._RawData[MODEL_EFI_PPI, self._Arch, self._Platform]
for Record in RecordList:
CName = Record[0]
Value = _PpiValue(CName, self.Packages, self.MetaFile.Path)
if Value is None:
PackageList = "\n\t".join(str(P) for P in self.Packages)
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of PPI [%s] is not found under [Ppis] section in " % CName,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
RetVal[CName] = Value
CommentRecords = self._RawData[MODEL_META_DATA_COMMENT, self._Arch, self._Platform, Record[5]]
self._PpiComments[CName] = [a[0] for a in CommentRecords]
return RetVal
@cached_property
def GuidComments(self):
self.Guids
return self._GuidComments
## Retrieve GUIDs consumed/produced by this module
@cached_property
def Guids(self):
RetVal = OrderedDict()
self._GuidComments = OrderedDict()
RecordList = self._RawData[MODEL_EFI_GUID, self._Arch, self._Platform]
for Record in RecordList:
CName = Record[0]
Value = GuidValue(CName, self.Packages, self.MetaFile.Path)
if Value is None:
PackageList = "\n\t".join(str(P) for P in self.Packages)
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of Guid [%s] is not found under [Guids] section in" % CName,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
RetVal[CName] = Value
CommentRecords = self._RawData[MODEL_META_DATA_COMMENT, self._Arch, self._Platform, Record[5]]
self._GuidComments[CName] = [a[0] for a in CommentRecords]
for Type in [MODEL_PCD_FIXED_AT_BUILD,MODEL_PCD_PATCHABLE_IN_MODULE,MODEL_PCD_FEATURE_FLAG,MODEL_PCD_DYNAMIC,MODEL_PCD_DYNAMIC_EX]:
RecordList = self._RawData[Type, self._Arch, self._Platform]
for TokenSpaceGuid, _, _, _, _, _, LineNo in RecordList:
# get the guid value
if TokenSpaceGuid not in RetVal:
Value = GuidValue(TokenSpaceGuid, self.Packages, self.MetaFile.Path)
if Value is None:
PackageList = "\n\t".join(str(P) for P in self.Packages)
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of Guid [%s] is not found under [Guids] section in" % TokenSpaceGuid,
ExtraData=PackageList, File=self.MetaFile, Line=LineNo)
RetVal[TokenSpaceGuid] = Value
self._GuidsUsedByPcd[TokenSpaceGuid] = Value
return RetVal
## Retrieve include paths necessary for this module (for Edk.x style of modules)
@cached_property
def Includes(self):
RetVal = []
Macros = self._Macros
Macros['PROCESSOR'] = GlobalData.gEdkGlobal.get('PROCESSOR', self._Arch)
RecordList = self._RawData[MODEL_EFI_INCLUDE, self._Arch, self._Platform]
for Record in RecordList:
File = NormPath(Record[0], Macros)
if File[0] == '.':
File = os.path.join(self._ModuleDir, File)
else:
File = mws.join(GlobalData.gWorkspace, File)
File = RealPath(os.path.normpath(File))
if File:
RetVal.append(File)
return RetVal
## Retrieve packages this module depends on
@cached_property
def Packages(self):
RetVal = []
RecordList = self._RawData[MODEL_META_DATA_PACKAGE, self._Arch, self._Platform]
Macros = self._Macros
for Record in RecordList:
File = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = File.Validate('.dec')
if ErrorCode != 0:
LineNo = Record[-1]
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
# parse this package now. we need it to get protocol/ppi/guid value
RetVal.append(self._Bdb[File, self._Arch, self._Target, self._Toolchain])
return RetVal
## Retrieve PCD comments
@cached_property
def PcdComments(self):
self.Pcds
return self._PcdComments
## Retrieve PCDs used in this module
@cached_property
def Pcds(self):
self._PcdComments = OrderedDict()
RetVal = OrderedDict()
RetVal.update(self._GetPcd(MODEL_PCD_FIXED_AT_BUILD))
RetVal.update(self._GetPcd(MODEL_PCD_PATCHABLE_IN_MODULE))
RetVal.update(self._GetPcd(MODEL_PCD_FEATURE_FLAG))
RetVal.update(self._GetPcd(MODEL_PCD_DYNAMIC))
RetVal.update(self._GetPcd(MODEL_PCD_DYNAMIC_EX))
return RetVal
@cached_property
def ModulePcdList(self):
RetVal = self.Pcds
return RetVal
@cached_property
def LibraryPcdList(self):
if bool(self.LibraryClass):
return []
RetVal = {}
Pcds = set()
for Library in self.LibInstances:
PcdsInLibrary = OrderedDict()
for Key in Library.Pcds:
if Key in self.Pcds or Key in Pcds:
continue
Pcds.add(Key)
PcdsInLibrary[Key] = copy.copy(Library.Pcds[Key])
RetVal[Library] = PcdsInLibrary
return RetVal
@cached_property
def PcdsName(self):
PcdsName = set()
for Type in (MODEL_PCD_FIXED_AT_BUILD,MODEL_PCD_PATCHABLE_IN_MODULE,MODEL_PCD_FEATURE_FLAG,MODEL_PCD_DYNAMIC,MODEL_PCD_DYNAMIC_EX):
RecordList = self._RawData[Type, self._Arch, self._Platform]
for TokenSpaceGuid, PcdCName, _, _, _, _, _ in RecordList:
PcdsName.add((PcdCName, TokenSpaceGuid))
return PcdsName
## Retrieve build options specific to this module
@cached_property
def BuildOptions(self):
if self._BuildOptions is None:
self._BuildOptions = OrderedDict()
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch, self._Platform]
for Record in RecordList:
ToolChainFamily = Record[0]
ToolChain = Record[1]
Option = Record[2]
if (ToolChainFamily, ToolChain) not in self._BuildOptions or Option.startswith('='):
self._BuildOptions[ToolChainFamily, ToolChain] = Option
else:
# concatenate the option string if they're for the same tool
OptionString = self._BuildOptions[ToolChainFamily, ToolChain]
self._BuildOptions[ToolChainFamily, ToolChain] = OptionString + " " + Option
return self._BuildOptions
## Retrieve dependency expression
@cached_property
def Depex(self):
RetVal = tdict(False, 2)
# If the module has only Binaries and no Sources, then ignore [Depex]
if not self.Sources and self.Binaries:
return RetVal
RecordList = self._RawData[MODEL_EFI_DEPEX, self._Arch]
# PEIM and DXE drivers must have a valid [Depex] section
if len(self.LibraryClass) == 0 and len(RecordList) == 0:
if self.ModuleType == SUP_MODULE_DXE_DRIVER or self.ModuleType == SUP_MODULE_PEIM or self.ModuleType == SUP_MODULE_DXE_SMM_DRIVER or \
self.ModuleType == SUP_MODULE_DXE_SAL_DRIVER or self.ModuleType == SUP_MODULE_DXE_RUNTIME_DRIVER:
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE, "No [Depex] section or no valid expression in [Depex] section for [%s] module" \
% self.ModuleType, File=self.MetaFile)
if len(RecordList) != 0 and (self.ModuleType == SUP_MODULE_USER_DEFINED or self.ModuleType == SUP_MODULE_HOST_APPLICATION):
for Record in RecordList:
if Record[4] not in [SUP_MODULE_PEIM, SUP_MODULE_DXE_DRIVER, SUP_MODULE_DXE_SMM_DRIVER]:
EdkLogger.error('build', FORMAT_INVALID,
"'%s' module must specify the type of [Depex] section" % self.ModuleType,
File=self.MetaFile)
TemporaryDictionary = OrderedDict()
for Record in RecordList:
DepexStr = ReplaceMacro(Record[0], self._Macros, False)
Arch = Record[3]
ModuleType = Record[4]
TokenList = DepexStr.split()
if (Arch, ModuleType) not in TemporaryDictionary:
TemporaryDictionary[Arch, ModuleType] = []
DepexList = TemporaryDictionary[Arch, ModuleType]
for Token in TokenList:
if Token in DEPEX_SUPPORTED_OPCODE_SET:
DepexList.append(Token)
elif Token.endswith(".inf"): # module file name
ModuleFile = os.path.normpath(Token)
Module = self.BuildDatabase[ModuleFile]
if Module is None:
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE, "Module is not found in active platform",
ExtraData=Token, File=self.MetaFile, Line=Record[-1])
DepexList.append(Module.Guid)
else:
# it use the Fixed PCD format
if '.' in Token:
if tuple(Token.split('.')[::-1]) not in self.Pcds:
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE, "PCD [{}] used in [Depex] section should be listed in module PCD section".format(Token), File=self.MetaFile, Line=Record[-1])
else:
if self.Pcds[tuple(Token.split('.')[::-1])].DatumType != TAB_VOID:
EdkLogger.error('build', FORMAT_INVALID, "PCD [{}] used in [Depex] section should be VOID* datum type".format(Token), File=self.MetaFile, Line=Record[-1])
Value = Token
else:
# get the GUID value now
Value = _ProtocolValue(Token, self.Packages, self.MetaFile.Path)
if Value is None:
Value = _PpiValue(Token, self.Packages, self.MetaFile.Path)
if Value is None:
Value = GuidValue(Token, self.Packages, self.MetaFile.Path)
if Value is None:
PackageList = "\n\t".join(str(P) for P in self.Packages)
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of [%s] is not found in" % Token,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
DepexList.append(Value)
for Arch, ModuleType in TemporaryDictionary:
RetVal[Arch, ModuleType] = TemporaryDictionary[Arch, ModuleType]
return RetVal
## Retrieve dependency expression
@cached_property
def DepexExpression(self):
RetVal = tdict(False, 2)
RecordList = self._RawData[MODEL_EFI_DEPEX, self._Arch]
TemporaryDictionary = OrderedDict()
for Record in RecordList:
DepexStr = ReplaceMacro(Record[0], self._Macros, False)
Arch = Record[3]
ModuleType = Record[4]
TokenList = DepexStr.split()
if (Arch, ModuleType) not in TemporaryDictionary:
TemporaryDictionary[Arch, ModuleType] = ''
for Token in TokenList:
TemporaryDictionary[Arch, ModuleType] = TemporaryDictionary[Arch, ModuleType] + Token.strip() + ' '
for Arch, ModuleType in TemporaryDictionary:
RetVal[Arch, ModuleType] = TemporaryDictionary[Arch, ModuleType]
return RetVal
def LocalPkg(self):
module_path = self.MetaFile.File
subdir = os.path.split(module_path)[0]
TopDir = ""
while subdir:
subdir,TopDir = os.path.split(subdir)
for file_name in os.listdir(os.path.join(self.MetaFile.Root,TopDir)):
if file_name.upper().endswith("DEC"):
pkg = os.path.join(TopDir,file_name)
return pkg
@cached_class_function
def GetGuidsUsedByPcd(self):
self.Guid
return self._GuidsUsedByPcd
## Retrieve PCD for given type
def _GetPcd(self, Type):
Pcds = OrderedDict()
PcdDict = tdict(True, 4)
PcdList = []
RecordList = self._RawData[Type, self._Arch, self._Platform]
for TokenSpaceGuid, PcdCName, Setting, Arch, Platform, Id, LineNo in RecordList:
PcdDict[Arch, Platform, PcdCName, TokenSpaceGuid] = (Setting, LineNo)
PcdList.append((PcdCName, TokenSpaceGuid))
CommentRecords = self._RawData[MODEL_META_DATA_COMMENT, self._Arch, self._Platform, Id]
Comments = []
for CmtRec in CommentRecords:
Comments.append(CmtRec[0])
self._PcdComments[TokenSpaceGuid, PcdCName] = Comments
# resolve PCD type, value, datum info, etc. by getting its definition from package
_GuidDict = self.Guids.copy()
for PcdCName, TokenSpaceGuid in PcdList:
PcdRealName = PcdCName
Setting, LineNo = PcdDict[self._Arch, self.Platform, PcdCName, TokenSpaceGuid]
if Setting is None:
continue
ValueList = AnalyzePcdData(Setting)
DefaultValue = ValueList[0]
Pcd = PcdClassObject(
PcdCName,
TokenSpaceGuid,
'',
'',
DefaultValue,
'',
'',
{},
False,
self.Guids[TokenSpaceGuid]
)
if Type == MODEL_PCD_PATCHABLE_IN_MODULE and ValueList[1]:
# Patch PCD: TokenSpace.PcdCName|Value|Offset
Pcd.Offset = ValueList[1]
if (PcdRealName, TokenSpaceGuid) in GlobalData.MixedPcd:
for Package in self.Packages:
for key in Package.Pcds:
if (Package.Pcds[key].TokenCName, Package.Pcds[key].TokenSpaceGuidCName) == (PcdRealName, TokenSpaceGuid):
for item in GlobalData.MixedPcd[(PcdRealName, TokenSpaceGuid)]:
Pcd_Type = item[0].split('_')[-1]
if Pcd_Type == Package.Pcds[key].Type:
Value = Package.Pcds[key]
Value.TokenCName = Package.Pcds[key].TokenCName + '_' + Pcd_Type
if len(key) == 2:
newkey = (Value.TokenCName, key[1])
elif len(key) == 3:
newkey = (Value.TokenCName, key[1], key[2])
del Package.Pcds[key]
Package.Pcds[newkey] = Value
break
else:
pass
else:
pass
# get necessary info from package declaring this PCD
for Package in self.Packages:
#
# 'dynamic' in INF means its type is determined by platform;
# if platform doesn't give its type, use 'lowest' one in the
# following order, if any
#
# TAB_PCDS_FIXED_AT_BUILD, TAB_PCDS_PATCHABLE_IN_MODULE, TAB_PCDS_FEATURE_FLAG, TAB_PCDS_DYNAMIC, TAB_PCDS_DYNAMIC_EX
#
_GuidDict.update(Package.Guids)
PcdType = self._PCD_TYPE_STRING_[Type]
if Type == MODEL_PCD_DYNAMIC:
Pcd.Pending = True
for T in PCD_TYPE_LIST:
if (PcdRealName, TokenSpaceGuid) in GlobalData.MixedPcd:
for item in GlobalData.MixedPcd[(PcdRealName, TokenSpaceGuid)]:
if str(item[0]).endswith(T) and (item[0], item[1], T) in Package.Pcds:
PcdType = T
PcdCName = item[0]
break
else:
pass
break
else:
if (PcdRealName, TokenSpaceGuid, T) in Package.Pcds:
PcdType = T
break
else:
Pcd.Pending = False
if (PcdRealName, TokenSpaceGuid) in GlobalData.MixedPcd:
for item in GlobalData.MixedPcd[(PcdRealName, TokenSpaceGuid)]:
Pcd_Type = item[0].split('_')[-1]
if Pcd_Type == PcdType:
PcdCName = item[0]
break
else:
pass
else:
pass
if (PcdCName, TokenSpaceGuid, PcdType) in Package.Pcds:
PcdInPackage = Package.Pcds[PcdCName, TokenSpaceGuid, PcdType]
Pcd.Type = PcdType
Pcd.TokenValue = PcdInPackage.TokenValue
#
# Check whether the token value exist or not.
#
if Pcd.TokenValue is None or Pcd.TokenValue == "":
EdkLogger.error(
'build',
FORMAT_INVALID,
"No TokenValue for PCD [%s.%s] in [%s]!" % (TokenSpaceGuid, PcdRealName, str(Package)),
File=self.MetaFile, Line=LineNo,
ExtraData=None
)
#
# Check hexadecimal token value length and format.
#
ReIsValidPcdTokenValue = re.compile(r"^[0][x|X][0]*[0-9a-fA-F]{1,8}$", re.DOTALL)
if Pcd.TokenValue.startswith("0x") or Pcd.TokenValue.startswith("0X"):
if ReIsValidPcdTokenValue.match(Pcd.TokenValue) is None:
EdkLogger.error(
'build',
FORMAT_INVALID,
"The format of TokenValue [%s] of PCD [%s.%s] in [%s] is invalid:" % (Pcd.TokenValue, TokenSpaceGuid, PcdRealName, str(Package)),
File=self.MetaFile, Line=LineNo,
ExtraData=None
)
#
# Check decimal token value length and format.
#
else:
try:
TokenValueInt = int (Pcd.TokenValue, 10)
if (TokenValueInt < 0 or TokenValueInt > 4294967295):
EdkLogger.error(
'build',
FORMAT_INVALID,
"The format of TokenValue [%s] of PCD [%s.%s] in [%s] is invalid, as a decimal it should between: 0 - 4294967295!" % (Pcd.TokenValue, TokenSpaceGuid, PcdRealName, str(Package)),
File=self.MetaFile, Line=LineNo,
ExtraData=None
)
except:
EdkLogger.error(
'build',
FORMAT_INVALID,
"The format of TokenValue [%s] of PCD [%s.%s] in [%s] is invalid, it should be hexadecimal or decimal!" % (Pcd.TokenValue, TokenSpaceGuid, PcdRealName, str(Package)),
File=self.MetaFile, Line=LineNo,
ExtraData=None
)
Pcd.DatumType = PcdInPackage.DatumType
Pcd.MaxDatumSize = PcdInPackage.MaxDatumSize
Pcd.InfDefaultValue = Pcd.DefaultValue
if not Pcd.DefaultValue:
Pcd.DefaultValue = PcdInPackage.DefaultValue
else:
try:
Pcd.DefaultValue = ValueExpressionEx(Pcd.DefaultValue, Pcd.DatumType, _GuidDict)(True)
except BadExpression as Value:
EdkLogger.error('Parser', FORMAT_INVALID, 'PCD [%s.%s] Value "%s", %s' %(TokenSpaceGuid, PcdRealName, Pcd.DefaultValue, Value),
File=self.MetaFile, Line=LineNo)
break
else:
EdkLogger.error(
'build',
FORMAT_INVALID,
"PCD [%s.%s] in [%s] is not found in dependent packages:" % (TokenSpaceGuid, PcdRealName, self.MetaFile),
File=self.MetaFile, Line=LineNo,
ExtraData="\t%s" % '\n\t'.join(str(P) for P in self.Packages)
)
Pcds[PcdCName, TokenSpaceGuid] = Pcd
return Pcds
## check whether current module is binary module
@property
def IsBinaryModule(self):
if (self.Binaries and not self.Sources) or GlobalData.gIgnoreSource:
return True
return False
def ExtendCopyDictionaryLists(CopyToDict, CopyFromDict):
for Key in CopyFromDict:
CopyToDict[Key].extend(CopyFromDict[Key])
| [
"[email protected]"
] | |
beb8f00ca4461f449d82782c0683a196f2828a6a | 073c7ae30b0fbdadb3f60bdcf37940a496a3b2eb | /python/util.py | f88ba65b52323c39f073a193f6750bc183bd56c0 | [
"MIT"
] | permissive | cms-ttbarAC/CyMiniAna | 0e2a771473cf23eb931aa0ae7a015a5165f927b9 | 405b1ac6639f8a93297e847180b5a6ab58f9a06c | refs/heads/master | 2021-05-15T22:57:36.033299 | 2018-07-31T20:39:11 | 2018-07-31T20:39:11 | 106,871,363 | 0 | 1 | MIT | 2018-07-31T20:39:12 | 2017-10-13T20:41:28 | C++ | UTF-8 | Python | false | false | 5,834 | py | """
Created: --
Last Updated: 2 March 2018
Dan Marley
[email protected]
Texas A&M University
-----
File that holds any and all misc. functions
to be called from other python scripts.
(All information in one file => one location to update!)
"""
import ROOT
import numpy as np
class Sample(object):
"""Class for holding metadata information"""
def __init__(self):
self.xsection = 1
self.sumOfWeights = 1
self.nevents = 1
self.sampleType = ""
self.primaryDataset = ""
def getHistSeparation( S, B ):
"""Compare TH1* S and B -- need same dimensions
Copied from : https://root.cern.ch/doc/master/MethodBase_8cxx_source.html#l02740
"""
separation = 0
nstep = S.GetNbinsX()
xaxis = S.GetXaxis()
nS = S.GetSumOfWeights()
nB = B.GetSumOfWeights()
for bin in range(nstep):
s = S.GetBinContent( bin+1 )/nS
b = B.GetBinContent( bin+1 )/nB
if (s+b)>0: separation += (s - b)*(s - b)/(s + b)
separation *= 0.5
return separation
def GetSeparation2D( S, B ):
"""Compare TH2* S and B -- need same dimensions"""
separation = 0
nbinsx = S.GetNbinsX()
xaxis = S.GetXaxis()
nbinsy = S.GetNbinsY()
yaxis = S.GetYaxis()
integral_s = S.Integral()
integral_b = B.Integral()
for x in range(nbinsx):
for y in range(nbinsy):
s = S.GetBinContent( x+1,y+1 )/integral_s
b = B.GetBinContent( x+1,y+1 )/integral_b
if (s+b) > 0: separation += (s - b)*(s - b)/(s + b)
separation *= 0.5
return separation
def getSeparation(sig,bkg):
"""Calculate separation between two distributions"""
separation = 0
nS = 1.0*np.sum(sig)
nB = 1.0*np.sum(bkg)
for ss,bb in zip(sig,bkg):
s = ss/nS
b = bb/nB
if (s+b) > 0: separation += (s - b)*(s - b)/(s + b)
separation *= 0.5
return separation
def read_config(filename,separation=" "):
"""
Read configuration file with data stored like:
'config option'
And the 'config' and 'option' are separated by a character, e.g., " "
"""
data = file2list(filename)
cfg = {}
for i in data:
j = i.split(separation)
cfg[j[0]] = j[1]
return cfg
def extract(str_value, start_='{', stop_='}'):
"""Extract a string between two symbols, e.g., parentheses."""
extraction = str_value[str_value.index(start_)+1:str_value.index(stop_)]
return extraction
def to_csv(filename,data):
"""Write data to CSV file"""
if not filename.endswith(".csv"): filename += ".csv"
f = open(filename,"w")
for d in data:
f.write(d)
f.close()
return
def file2list(filename):
"""Load text file and dump contents into a list"""
listOfFiles = open( filename,'r').readlines()
listOfFiles = [i.rstrip('\n') for i in listOfFiles if not i.startswith("#")]
return listOfFiles
def str2bool(param):
"""Convert a string to a boolean"""
return (param in ['true','True','1'])
def getPrimaryDataset(root_file):
"""Get the sample type given the root file"""
try:
md = root_file.Get("tree/metadata")
md.GetEntry(0)
pd = str(md.primaryDataset)
except:
pd = None
return pd
def loadMetadata(file):
"""Load metadata"""
data = file2list(file)
samples = {}
for i in data:
if i.startswith("#"): continue
items = i.split(" ")
s = Sample()
s.sampleType = items[0]
s.primaryDataset = items[1]
samples[items[1]] = s
data = Sample()
data.sampleType = 'data'
data.primaryDataset = 'data'
mujets = Sample()
mujets.sampleType = 'mujets'
mujets.primaryDataset = 'SingleMuon'
ejets = Sample()
ejets.sampleType = 'ejets'
ejets.primaryDataset = 'SingleElectron'
samples['data'] = data
samples['SingleMuon'] = mujets
samples['SingleElectron'] = ejets
return samples
class VERBOSE(object):
"""Object for handling output"""
def __init__(self):
self.verboseMap = {"DEBUG":0,
"INFO": 1,
"WARNING":2,
"ERROR": 3};
self.level = "WARNING"
self.level_int = 2
def initialize(self):
"""Setup the integer level value"""
self.level_int = self.verboseMap[self.level]
def level_value(self):
"""Return the integer value"""
return self.level_int
def DEBUG(self,message):
"""Debug level - most verbose"""
self.verbose("DEBUG",message)
return
def INFO(self,message):
"""Info level - standard output"""
self.verbose("INFO",message)
return
def WARNING(self,message):
"""Warning level - if something seems wrong but code can continue"""
self.verbose("WARNING",message)
return
def ERROR(self,message):
"""Error level - something is wrong"""
self.verbose("ERROR",message)
return
def compare(self,level1,level2=None):
"""Compare two levels"""
if level2 is None:
return self.verboseMap[level1]>=self.level_int
else:
return self.verboseMap[level1]>=self.verboseMap[level2]
def verbose(self,level,message):
"""Print message to the screen"""
if self.compare( level ):
print " {0} :: {1}".format(level,message)
return
def HELP(self):
"""Help message"""
print " CyMiniAna Deep Learning "
print " To run, execute the command: "
print " $ python python/runDeepLearning.py <config> "
print " where <config> is a text file that outlines the configuration "
## THE END ##
| [
"[email protected]"
] | |
cd92ecd38dfe509e767b4977f1112c79d390744f | 0bfe6df147ffa74b6d2800391981273149502684 | /visionary/visionary/migrations/0002_add_model_Mindmap.py | 5ab5e8e1132a90e50d890cd2eef82b5aab730db0 | [] | no_license | lumenwrites/digitalMind_django | 829c95eca4720c2bbe71d14bdcce64e9eccd3752 | 0968f0006cf450f2796736cd604c5f6cba82147f | refs/heads/master | 2021-05-27T14:54:35.108215 | 2014-09-11T09:48:58 | 2014-09-11T09:48:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,903 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Mindmap'
db.create_table('visionary_mindmap', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=100, unique=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('data', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('visionary', ['Mindmap'])
def backwards(self, orm):
# Deleting model 'Mindmap'
db.delete_table('visionary_mindmap')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'db_table': "'django_content_type'", 'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType'},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'visionary.mindmap': {
'Meta': {'object_name': 'Mindmap'},
'data': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'unique': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'visionary.state': {
'Meta': {'object_name': 'State'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['visionary'] | [
"[email protected]"
] | |
1a0586b543e61229aa5c7ecc3626c76951c49596 | aea3b522c0f8c6f82279cf6cc70bc11b22ef9f02 | /feincms3/mixins.py | 872c3c2269da46af9112d0eb37dba939ddbcdc59 | [
"BSD-2-Clause"
] | permissive | hancush/feincms3 | 0dfbb98f85f9bd2c2edf98cdb8de298f0188b17c | 782a4ee83a36756752b2f9aa225eed4dc402ff4c | refs/heads/master | 2020-04-04T11:55:39.289197 | 2018-10-31T18:49:47 | 2018-10-31T18:49:47 | 155,908,332 | 0 | 0 | NOASSERTION | 2018-11-02T18:44:39 | 2018-11-02T18:44:39 | null | UTF-8 | Python | false | false | 5,877 | py | # coding=utf-8
from django.conf import settings
from django.db import models
from django.db.models import signals
from django.utils.translation import activate, get_language, ugettext_lazy as _
from tree_queries.fields import TreeNodeForeignKey
from feincms3.utils import validation_error
class MenuMixin(models.Model):
"""
The ``MenuMixin`` is most useful on pages where there are menus with
differing content on a single page, for example the main navigation
and a meta navigation (containing contact, imprint etc.)
"""
menu = models.CharField(
_("menu"),
max_length=20,
blank=True,
choices=(("", ""),), # Non-empty choices for get_*_display
)
class Meta:
abstract = True
@staticmethod
def fill_menu_choices(sender, **kwargs):
"""
Fills in the choices for ``menu`` from the ``MENUS`` class variable.
This method is a receiver of Django's ``class_prepared`` signal.
"""
if issubclass(sender, MenuMixin) and not sender._meta.abstract:
field = sender._meta.get_field("menu")
field.choices = sender.MENUS
field.default = field.choices[0][0]
signals.class_prepared.connect(MenuMixin.fill_menu_choices)
class TemplateMixin(models.Model):
"""
It is sometimes useful to have different templates for CMS models such
as pages, articles or anything comparable. The ``TemplateMixin``
provides a ready-made solution for selecting django-content-editor
``Template`` instances through Django's administration interface.
"""
template_key = models.CharField(
_("template"),
max_length=100,
choices=(("", ""),), # Non-empty choices for get_*_display
)
class Meta:
abstract = True
@property
def template(self):
"""
Return the selected template instance if the ``template_key`` field
matches, or ``None``.
"""
return self.TEMPLATES_DICT.get(self.template_key)
@property
def regions(self):
"""
Return the selected template instances' ``regions`` attribute, falling
back to an empty list if no template instance could be found.
"""
return self.template.regions if self.template else []
@staticmethod
def fill_template_key_choices(sender, **kwargs):
"""
Fills in the choices for ``menu`` from the ``MENUS`` class variable.
This method is a receiver of Django's ``class_prepared`` signal.
"""
if issubclass(sender, TemplateMixin) and not sender._meta.abstract:
field = sender._meta.get_field("template_key")
field.choices = [(t.key, t.title) for t in sender.TEMPLATES]
field.default = sender.TEMPLATES[0].key
sender.TEMPLATES_DICT = {t.key: t for t in sender.TEMPLATES}
signals.class_prepared.connect(TemplateMixin.fill_template_key_choices)
class LanguageMixin(models.Model):
"""
Pages may come in varying languages. ``LanguageMixin`` helps with that.
"""
language_code = models.CharField(
_("language"),
max_length=10,
choices=settings.LANGUAGES,
default=settings.LANGUAGES[0][0],
)
class Meta:
abstract = True
def activate_language(self, request):
"""
``activate()`` the page's language and set ``request.LANGUAGE_CODE``
"""
# Do what LocaleMiddleware does.
activate(self.language_code)
request.LANGUAGE_CODE = get_language()
class RedirectMixin(models.Model):
"""
The ``RedirectMixin`` allows adding redirects in the page tree.
"""
redirect_to_url = models.CharField(_("Redirect to URL"), max_length=200, blank=True)
redirect_to_page = TreeNodeForeignKey(
"self",
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name="+",
verbose_name=_("Redirect to page"),
)
class Meta:
abstract = True
def clean_fields(self, exclude=None):
"""
Ensure that redirects are configured properly.
"""
super(RedirectMixin, self).clean_fields(exclude)
if self.redirect_to_url and self.redirect_to_page_id:
raise validation_error(
_("Only set one redirect value."),
field="redirect_to_url",
exclude=exclude,
)
if self.redirect_to_page_id:
if self.redirect_to_page_id == self.pk:
raise validation_error(
_("Cannot redirect to self."),
field="redirect_to_page",
exclude=exclude,
)
if self.redirect_to_page.redirect_to_page_id:
raise validation_error(
_(
"Do not chain redirects. The selected page redirects"
" to %(title)s (%(path)s)."
)
% {
"title": self.redirect_to_page,
"path": self.redirect_to_page.get_absolute_url(),
},
field="redirect_to_page",
exclude=exclude,
)
if self.redirect_to_url or self.redirect_to_page_id:
# Any page redirects to this page?
other = self.__class__._default_manager.filter(redirect_to_page=self)
if other:
raise validation_error(
_(
"Do not chain redirects. The page %(page)s already"
" redirects to this page."
)
% {"page": ", ".join("%s" % page for page in other)},
field="redirect_to_page",
exclude=exclude,
)
| [
"[email protected]"
] | |
3ad3f271e1638aeab5f1a60f9e46cbf4d55b64e0 | a3faf585ac766da428ee896e6c70c39ecc22ce1f | /xy/planner.py | b4be54c6910ff99f946e9c2aa08bc9b5ab70185d | [] | no_license | RolandJuno/xy | dcab6c0682cda79ffd6b5fb6cb8365390421f784 | 1079175b9a2f58c72fd94520908ebbaf81585037 | refs/heads/master | 2020-04-05T04:11:02.909464 | 2019-11-13T22:05:16 | 2019-11-13T22:05:16 | 50,703,647 | 7 | 1 | null | 2016-01-30T01:56:42 | 2016-01-30T01:56:42 | null | UTF-8 | Python | false | false | 5,970 | py | from hashindex import Index
from math import hypot
import anneal
import random
def sort_paths_greedy(paths, reversable=True):
first = max(paths, key=lambda x: x[0][1])
paths.remove(first)
result = [first]
points = []
for path in paths:
x1, y1 = path[0]
x2, y2 = path[-1]
points.append((x1, y1, path, False))
if reversable:
points.append((x2, y2, path, True))
index = Index(points)
while index.size:
x, y, path, reverse = index.search(result[-1][-1])
x1, y1 = path[0]
x2, y2 = path[-1]
index.remove((x1, y1, path, False))
if reversable:
index.remove((x2, y2, path, True))
if reverse:
result.append(list(reversed(path)))
else:
result.append(path)
return result
def sort_paths(paths, iterations=100000, reversable=True):
'''
This function re-orders a set of 2D paths (polylines) to minimize the
distance required to visit each path. This is useful for 2D plotting to
reduce wasted movements where the instrument is not drawing.
If allowed, the algorithm will also reverse some paths if doing so reduces
the total distance.
The code uses simulated annealing as its optimization algorithm. The number
of iterations can be increased to improve the chances of finding a perfect
solution. However, a perfect solution isn't necessarily required - we just
want to find something good enough.
With randomly generated paths, the algorithm can quickly find a solution
that reduces the extra distance to ~25 percent of its original value.
'''
state = Model(list(paths), reversable)
max_temp = anneal.get_max_temp(state, 10000)
min_temp = max_temp / 1000.0
state = anneal.anneal(state, max_temp, min_temp, iterations)
for path, reverse in zip(state.paths, state.reverse):
if reverse:
path.reverse()
return state.paths
def sort_points(points, iterations=100000):
'''
Like sort_paths, but operates on individual points instead.
This is basically a traveling salesman optimization.
'''
paths = [[x] for x in points]
paths = sort_paths(paths, iterations, False)
points = [x[0] for x in paths]
return points
class Model(object):
def __init__(self, paths, reversable=True, reverse=None, distances=None, total_distance=None):
self.paths = paths
self.reversable = reversable
self.reverse = reverse or [False] * len(self.paths)
if distances:
self.total_distance = total_distance or 0
self.distances = distances
else:
self.total_distance = 0
self.distances = [0] * (len(paths) - 1)
self.add_distances(range(len(self.distances)))
def subtract_distances(self, indexes):
n = len(self.distances)
for i in indexes:
if i >= 0 and i < n:
self.total_distance -= self.distances[i]
def add_distances(self, indexes):
n = len(self.distances)
for i in indexes:
if i < 0 or i >= n:
continue
j = i + 1
if self.reverse[i]:
x1, y1 = self.paths[i][0]
else:
x1, y1 = self.paths[i][-1]
if self.reverse[j]:
x2, y2 = self.paths[j][-1]
else:
x2, y2 = self.paths[j][0]
self.distances[i] = hypot(x2 - x1, y2 - y1)
self.total_distance += self.distances[i]
def energy(self):
# return the total extra distance for this ordering
return self.total_distance
def do_move(self):
if self.reversable and random.random() < 0.25:
# mutate by reversing a random path
n = len(self.paths) - 1
i = random.randint(0, n)
indexes = [i - 1, i]
self.subtract_distances(indexes)
self.reverse[i] = not self.reverse[i]
self.add_distances(indexes)
return (1, i, 0)
else:
# mutate by swapping two random paths
n = len(self.paths) - 1
i = random.randint(0, n)
j = random.randint(0, n)
indexes = set([i - 1, i, j - 1, j])
self.subtract_distances(indexes)
self.paths[i], self.paths[j] = self.paths[j], self.paths[i]
self.add_distances(indexes)
return (0, i, j)
def undo_move(self, undo):
# undo the previous mutation
mode, i, j = undo
if mode == 0:
indexes = set([i - 1, i, j - 1, j])
self.subtract_distances(indexes)
self.paths[i], self.paths[j] = self.paths[j], self.paths[i]
self.add_distances(indexes)
else:
indexes = [i - 1, i]
self.subtract_distances(indexes)
self.reverse[i] = not self.reverse[i]
self.add_distances(indexes)
def copy(self):
# make a copy of the model
return Model(
list(self.paths), self.reversable, list(self.reverse),
list(self.distances), self.total_distance)
def test(n_paths, n_iterations, seed=None):
random.seed(seed)
paths = []
for _ in range(n_paths):
x1 = random.random()
y1 = random.random()
x2 = random.random()
y2 = random.random()
path = [(x1, y1), (x2, y2)]
paths.append(path)
before = Model(paths).energy()
if n_iterations:
paths = sort_paths(paths, n_iterations)
else:
paths = sort_paths_greedy(paths)
after = Model(paths).energy()
pct = 100.0 * after / before
return pct
if __name__ == '__main__':
# test the module
for n_paths in [10, 100, 1000, 10000]:
for n_iterations in [None, 10, 100, 1000, 10000, 100000, 1000000]:
pct = test(n_paths, n_iterations, 123)
print n_paths, n_iterations, pct
| [
"[email protected]"
] | |
ff053c7af547706471c802a09fb2b03078714f37 | 3a0336f8ba841f6076f412dfb6de1af9ac946efd | /azure/multiapi/storagev2/fileshare/v2019_07_07/_models.py | 1488fcea6ebdc1c95ef6a44eccad5db9309bf5b8 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | Azure/azure-multiapi-storage-python | 4291579aa1f47d4b74557267558bd5029e01e12c | 650ef33ad683a5f2aba590c4553f9871bfa0dd93 | refs/heads/master | 2023-09-03T22:27:16.816305 | 2023-06-01T07:37:02 | 2023-06-01T07:37:02 | 94,827,841 | 4 | 17 | MIT | 2023-06-01T07:37:03 | 2017-06-19T22:58:21 | Python | UTF-8 | Python | false | false | 40,637 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=too-few-public-methods, too-many-instance-attributes
# pylint: disable=super-init-not-called, too-many-lines
from azure.core.paging import PageIterator
from ._parser import _parse_datetime_from_str
from ._shared.response_handlers import return_context_and_deserialized, process_storage_error
from ._shared.models import DictMixin, get_enum_value
from ._generated.models import StorageErrorException
from ._generated.models import Metrics as GeneratedMetrics
from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy
from ._generated.models import CorsRule as GeneratedCorsRule
from ._generated.models import AccessPolicy as GenAccessPolicy
from ._generated.models import DirectoryItem
def _wrap_item(item):
if isinstance(item, DirectoryItem):
return {'name': item.name, 'is_directory': True}
return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False}
class Metrics(GeneratedMetrics):
"""A summary of request statistics grouped by API in hour or minute aggregates
for files.
All required parameters must be populated in order to send to Azure.
:keyword str version: The version of Storage Analytics to configure.
:keyword bool enabled: Required. Indicates whether metrics are enabled for the
File service.
:keyword bool include_ap_is: Indicates whether metrics should generate summary
statistics for called API operations.
:keyword ~azure.storage.fileshare.RetentionPolicy retention_policy: Determines how long the associated data should
persist.
"""
def __init__(self, **kwargs):
self.version = kwargs.get('version', u'1.0')
self.enabled = kwargs.get('enabled', False)
self.include_apis = kwargs.get('include_apis')
self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
@classmethod
def _from_generated(cls, generated):
if not generated:
return cls()
return cls(
version=generated.version,
enabled=generated.enabled,
include_apis=generated.include_apis,
retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access
)
class RetentionPolicy(GeneratedRetentionPolicy):
"""The retention policy which determines how long the associated data should
persist.
All required parameters must be populated in order to send to Azure.
:param bool enabled: Required. Indicates whether a retention policy is enabled
for the storage service.
:param int days: Indicates the number of days that metrics or logging or
soft-deleted data should be retained. All data older than this value will
be deleted.
"""
def __init__(self, enabled=False, days=None):
self.enabled = enabled
self.days = days
if self.enabled and (self.days is None):
raise ValueError("If policy is enabled, 'days' must be specified.")
@classmethod
def _from_generated(cls, generated):
if not generated:
return cls()
return cls(
enabled=generated.enabled,
days=generated.days,
)
class CorsRule(GeneratedCorsRule):
"""CORS is an HTTP feature that enables a web application running under one
domain to access resources in another domain. Web browsers implement a
security restriction known as same-origin policy that prevents a web page
from calling APIs in a different domain; CORS provides a secure way to
allow one domain (the origin domain) to call APIs in another domain.
All required parameters must be populated in order to send to Azure.
:param list(str) allowed_origins:
A list of origin domains that will be allowed via CORS, or "*" to allow
all domains. The list of must contain at least one entry. Limited to 64
origin domains. Each allowed origin can have up to 256 characters.
:param list(str) allowed_methods:
A list of HTTP methods that are allowed to be executed by the origin.
The list of must contain at least one entry. For Azure Storage,
permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
:keyword list(str) allowed_headers:
Defaults to an empty list. A list of headers allowed to be part of
the cross-origin request. Limited to 64 defined headers and 2 prefixed
headers. Each header can be up to 256 characters.
:keyword list(str) exposed_headers:
Defaults to an empty list. A list of response headers to expose to CORS
clients. Limited to 64 defined headers and two prefixed headers. Each
header can be up to 256 characters.
:keyword int max_age_in_seconds:
The number of seconds that the client/browser should cache a
preflight response.
"""
def __init__(self, allowed_origins, allowed_methods, **kwargs):
self.allowed_origins = ','.join(allowed_origins)
self.allowed_methods = ','.join(allowed_methods)
self.allowed_headers = ','.join(kwargs.get('allowed_headers', []))
self.exposed_headers = ','.join(kwargs.get('exposed_headers', []))
self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0)
@classmethod
def _from_generated(cls, generated):
return cls(
[generated.allowed_origins],
[generated.allowed_methods],
allowed_headers=[generated.allowed_headers],
exposed_headers=[generated.exposed_headers],
max_age_in_seconds=generated.max_age_in_seconds,
)
class AccessPolicy(GenAccessPolicy):
"""Access Policy class used by the set and get acl methods in each service.
A stored access policy can specify the start time, expiry time, and
permissions for the Shared Access Signatures with which it's associated.
Depending on how you want to control access to your resource, you can
specify all of these parameters within the stored access policy, and omit
them from the URL for the Shared Access Signature. Doing so permits you to
modify the associated signature's behavior at any time, as well as to revoke
it. Or you can specify one or more of the access policy parameters within
the stored access policy, and the others on the URL. Finally, you can
specify all of the parameters on the URL. In this case, you can use the
stored access policy to revoke the signature, but not to modify its behavior.
Together the Shared Access Signature and the stored access policy must
include all fields required to authenticate the signature. If any required
fields are missing, the request will fail. Likewise, if a field is specified
both in the Shared Access Signature URL and in the stored access policy, the
request will fail with status code 400 (Bad Request).
:param permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:type permission: str or ~azure.storage.fileshare.FileSasPermissions or
~azure.storage.fileshare.ShareSasPermissions
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: ~datetime.datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: ~datetime.datetime or str
"""
def __init__(self, permission=None, expiry=None, start=None):
self.start = start
self.expiry = expiry
self.permission = permission
class LeaseProperties(DictMixin):
"""File Lease Properties.
:ivar str status:
The lease status of the file. Possible values: locked|unlocked
:ivar str state:
Lease state of the file. Possible values: available|leased|expired|breaking|broken
:ivar str duration:
When a file is leased, specifies whether the lease is of infinite or fixed duration.
"""
def __init__(self, **kwargs):
self.status = get_enum_value(kwargs.get('x-ms-lease-status'))
self.state = get_enum_value(kwargs.get('x-ms-lease-state'))
self.duration = get_enum_value(kwargs.get('x-ms-lease-duration'))
@classmethod
def _from_generated(cls, generated):
lease = cls()
lease.status = get_enum_value(generated.properties.lease_status)
lease.state = get_enum_value(generated.properties.lease_state)
lease.duration = get_enum_value(generated.properties.lease_duration)
return lease
class ContentSettings(DictMixin):
"""Used to store the content settings of a file.
:param str content_type:
The content type specified for the file. If no content type was
specified, the default content type is application/octet-stream.
:param str content_encoding:
If the content_encoding has previously been set
for the file, that value is stored.
:param str content_language:
If the content_language has previously been set
for the file, that value is stored.
:param str content_disposition:
content_disposition conveys additional information about how to
process the response payload, and also can be used to attach
additional metadata. If content_disposition has previously been set
for the file, that value is stored.
:param str cache_control:
If the cache_control has previously been set for
the file, that value is stored.
:param str content_md5:
If the content_md5 has been set for the file, this response
header is stored so that the client can check for message content
integrity.
"""
def __init__(
self, content_type=None, content_encoding=None,
content_language=None, content_disposition=None,
cache_control=None, content_md5=None, **kwargs):
self.content_type = content_type or kwargs.get('Content-Type')
self.content_encoding = content_encoding or kwargs.get('Content-Encoding')
self.content_language = content_language or kwargs.get('Content-Language')
self.content_md5 = content_md5 or kwargs.get('Content-MD5')
self.content_disposition = content_disposition or kwargs.get('Content-Disposition')
self.cache_control = cache_control or kwargs.get('Cache-Control')
@classmethod
def _from_generated(cls, generated):
settings = cls()
settings.content_type = generated.properties.content_type or None
settings.content_encoding = generated.properties.content_encoding or None
settings.content_language = generated.properties.content_language or None
settings.content_md5 = generated.properties.content_md5 or None
settings.content_disposition = generated.properties.content_disposition or None
settings.cache_control = generated.properties.cache_control or None
return settings
class ShareProperties(DictMixin):
"""Share's properties class.
:ivar str name:
The name of the share.
:ivar ~datetime.datetime last_modified:
A datetime object representing the last time the share was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar int quota:
The allocated quota.
:ivar dict metadata: A dict with name_value pairs to associate with the
share as metadata.
:ivar str snapshot:
Snapshot of the share.
"""
def __init__(self, **kwargs):
self.name = None
self.last_modified = kwargs.get('Last-Modified')
self.etag = kwargs.get('ETag')
self.quota = kwargs.get('x-ms-share-quota')
self.next_allowed_quota_downgrade_time = kwargs.get('x-ms-share-next-allowed-quota-downgrade-time')
self.metadata = kwargs.get('metadata')
self.snapshot = None
self.provisioned_egress_mbps = kwargs.get('x-ms-share-provisioned-egress-mbps')
self.provisioned_ingress_mbps = kwargs.get('x-ms-share-provisioned-ingress-mbps')
self.provisioned_iops = kwargs.get('x-ms-share-provisioned-iops')
@classmethod
def _from_generated(cls, generated):
props = cls()
props.name = generated.name
props.last_modified = generated.properties.last_modified
props.etag = generated.properties.etag
props.quota = generated.properties.quota
props.next_allowed_quota_downgrade_time = generated.properties.next_allowed_quota_downgrade_time
props.metadata = generated.metadata
props.snapshot = generated.snapshot
props.provisioned_egress_mbps = generated.properties.provisioned_egress_mbps
props.provisioned_ingress_mbps = generated.properties.provisioned_ingress_mbps
props.provisioned_iops = generated.properties.provisioned_iops
return props
class SharePropertiesPaged(PageIterator):
"""An iterable of Share properties.
:ivar str service_endpoint: The service URL.
:ivar str prefix: A file name prefix being used to filter the list.
:ivar str marker: The continuation token of the current page of results.
:ivar int results_per_page: The maximum number of results retrieved per API call.
:ivar str continuation_token: The continuation token to retrieve the next page of results.
:ivar str location_mode: The location mode being used to list results. The available
options include "primary" and "secondary".
:ivar current_page: The current page of listed results.
:vartype current_page: list(~azure.storage.fileshare.ShareProperties)
:param callable command: Function to retrieve the next page of items.
:param str prefix: Filters the results to return only shares whose names
begin with the specified prefix.
:param int results_per_page: The maximum number of share names to retrieve per
call.
:param str continuation_token: An opaque continuation token.
"""
def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
super(SharePropertiesPaged, self).__init__(
get_next=self._get_next_cb,
extract_data=self._extract_data_cb,
continuation_token=continuation_token or ""
)
self._command = command
self.service_endpoint = None
self.prefix = prefix
self.marker = None
self.results_per_page = results_per_page
self.location_mode = None
self.current_page = []
def _get_next_cb(self, continuation_token):
try:
return self._command(
marker=continuation_token or None,
maxresults=self.results_per_page,
prefix=self.prefix,
cls=return_context_and_deserialized,
use_location=self.location_mode)
except StorageErrorException as error:
process_storage_error(error)
def _extract_data_cb(self, get_next_return):
self.location_mode, self._response = get_next_return
self.service_endpoint = self._response.service_endpoint
self.prefix = self._response.prefix
self.marker = self._response.marker
self.results_per_page = self._response.max_results
self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access
return self._response.next_marker or None, self.current_page
class Handle(DictMixin):
"""A listed Azure Storage handle item.
All required parameters must be populated in order to send to Azure.
:keyword str handle_id: Required. XSMB service handle ID
:keyword str path: Required. File or directory name including full path starting
from share root
:keyword str file_id: Required. FileId uniquely identifies the file or
directory.
:keyword str parent_id: ParentId uniquely identifies the parent directory of the
object.
:keyword str session_id: Required. SMB session ID in context of which the file
handle was opened
:keyword str client_ip: Required. Client IP that opened the handle
:keyword ~datetime.datetime open_time: Required. Time when the session that previously opened
the handle has last been reconnected. (UTC)
:keyword ~datetime.datetime last_reconnect_time: Time handle was last connected to (UTC)
"""
def __init__(self, **kwargs):
self.id = kwargs.get('handle_id')
self.path = kwargs.get('path')
self.file_id = kwargs.get('file_id')
self.parent_id = kwargs.get('parent_id')
self.session_id = kwargs.get('session_id')
self.client_ip = kwargs.get('client_ip')
self.open_time = kwargs.get('open_time')
self.last_reconnect_time = kwargs.get('last_reconnect_time')
@classmethod
def _from_generated(cls, generated):
handle = cls()
handle.id = generated.handle_id
handle.path = generated.path
handle.file_id = generated.file_id
handle.parent_id = generated.parent_id
handle.session_id = generated.session_id
handle.client_ip = generated.client_ip
handle.open_time = generated.open_time
handle.last_reconnect_time = generated.last_reconnect_time
return handle
class HandlesPaged(PageIterator):
"""An iterable of Handles.
:ivar str marker: The continuation token of the current page of results.
:ivar int results_per_page: The maximum number of results retrieved per API call.
:ivar str continuation_token: The continuation token to retrieve the next page of results.
:ivar str location_mode: The location mode being used to list results. The available
options include "primary" and "secondary".
:ivar current_page: The current page of listed results.
:vartype current_page: list(~azure.storage.fileshare.Handle)
:param callable command: Function to retrieve the next page of items.
:param int results_per_page: The maximum number of share names to retrieve per
call.
:param str continuation_token: An opaque continuation token.
"""
def __init__(self, command, results_per_page=None, continuation_token=None):
super(HandlesPaged, self).__init__(
get_next=self._get_next_cb,
extract_data=self._extract_data_cb,
continuation_token=continuation_token or ""
)
self._command = command
self.marker = None
self.results_per_page = results_per_page
self.location_mode = None
self.current_page = []
def _get_next_cb(self, continuation_token):
try:
return self._command(
marker=continuation_token or None,
maxresults=self.results_per_page,
cls=return_context_and_deserialized,
use_location=self.location_mode)
except StorageErrorException as error:
process_storage_error(error)
def _extract_data_cb(self, get_next_return):
self.location_mode, self._response = get_next_return
self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access
return self._response.next_marker or None, self.current_page
class DirectoryProperties(DictMixin):
"""Directory's properties class.
:ivar str name:
The name of the directory.
:ivar ~datetime.datetime last_modified:
A datetime object representing the last time the directory was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar bool server_encrypted:
Whether encryption is enabled.
:keyword dict metadata: A dict with name_value pairs to associate with the
directory as metadata.
:ivar change_time: Change time for the file.
:vartype change_time: str or ~datetime.datetime
:ivar creation_time: Creation time for the file.
:vartype creation_time: str or ~datetime.datetime
:ivar last_write_time: Last write time for the file.
:vartype last_write_time: str or ~datetime.datetime
:ivar file_attributes:
The file system attributes for files and directories.
:vartype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes`
:ivar permission_key: Key of the permission to be set for the
directory/file.
:vartype permission_key: str
:ivar file_id: Required. FileId uniquely identifies the file or
directory.
:vartype file_id: str
:ivar parent_id: ParentId uniquely identifies the parent directory of the
object.
:vartype parent_id: str
"""
def __init__(self, **kwargs):
self.name = None
self.last_modified = kwargs.get('Last-Modified')
self.etag = kwargs.get('ETag')
self.server_encrypted = kwargs.get('x-ms-server-encrypted')
self.metadata = kwargs.get('metadata')
self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time'))
self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time'))
self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time'))
self.file_attributes = kwargs.get('x-ms-file-attributes')
self.permission_key = kwargs.get('x-ms-file-permission-key')
self.file_id = kwargs.get('x-ms-file-id')
self.parent_id = kwargs.get('x-ms-file-parent-id')
@classmethod
def _from_generated(cls, generated):
props = cls()
props.name = generated.name
props.last_modified = generated.properties.last_modified
props.etag = generated.properties.etag
props.server_encrypted = generated.properties.server_encrypted
props.metadata = generated.metadata
return props
class DirectoryPropertiesPaged(PageIterator):
"""An iterable for the contents of a directory.
This iterable will yield dicts for the contents of the directory. The dicts
will have the keys 'name' (str) and 'is_directory' (bool).
Items that are files (is_directory=False) will have an additional 'content_length' key.
:ivar str service_endpoint: The service URL.
:ivar str prefix: A file name prefix being used to filter the list.
:ivar str marker: The continuation token of the current page of results.
:ivar int results_per_page: The maximum number of results retrieved per API call.
:ivar str continuation_token: The continuation token to retrieve the next page of results.
:ivar str location_mode: The location mode being used to list results. The available
options include "primary" and "secondary".
:ivar current_page: The current page of listed results.
:vartype current_page: list(dict(str, Any))
:param callable command: Function to retrieve the next page of items.
:param str prefix: Filters the results to return only directories whose names
begin with the specified prefix.
:param int results_per_page: The maximum number of share names to retrieve per
call.
:param str continuation_token: An opaque continuation token.
"""
def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
super(DirectoryPropertiesPaged, self).__init__(
get_next=self._get_next_cb,
extract_data=self._extract_data_cb,
continuation_token=continuation_token or ""
)
self._command = command
self.service_endpoint = None
self.prefix = prefix
self.marker = None
self.results_per_page = results_per_page
self.location_mode = None
self.current_page = []
def _get_next_cb(self, continuation_token):
try:
return self._command(
marker=continuation_token or None,
prefix=self.prefix,
maxresults=self.results_per_page,
cls=return_context_and_deserialized,
use_location=self.location_mode)
except StorageErrorException as error:
process_storage_error(error)
def _extract_data_cb(self, get_next_return):
self.location_mode, self._response = get_next_return
self.service_endpoint = self._response.service_endpoint
self.prefix = self._response.prefix
self.marker = self._response.marker
self.results_per_page = self._response.max_results
self.current_page = [_wrap_item(i) for i in self._response.segment.directory_items]
self.current_page.extend([_wrap_item(i) for i in self._response.segment.file_items])
return self._response.next_marker or None, self.current_page
class FileProperties(DictMixin):
"""File's properties class.
:ivar str name:
The name of the file.
:ivar str path:
The path of the file.
:ivar str share:
The name of share.
:ivar str snapshot:
File snapshot.
:ivar int content_length:
Size of file in bytes.
:ivar dict metadata: A dict with name_value pairs to associate with the
file as metadata.
:ivar str file_type:
Type of the file.
:ivar ~datetime.datetime last_modified:
A datetime object representing the last time the file was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar int size:
Size of file in bytes.
:ivar str content_range:
The range of bytes.
:ivar bool server_encrypted:
Whether encryption is enabled.
:ivar copy:
The copy properties.
:vartype copy: ~azure.storage.fileshare.CopyProperties
:ivar content_settings:
The content settings for the file.
:vartype content_settings: ~azure.storage.fileshare.ContentSettings
"""
def __init__(self, **kwargs):
self.name = kwargs.get('name')
self.path = None
self.share = None
self.snapshot = None
self.content_length = kwargs.get('Content-Length')
self.metadata = kwargs.get('metadata')
self.file_type = kwargs.get('x-ms-type')
self.last_modified = kwargs.get('Last-Modified')
self.etag = kwargs.get('ETag')
self.size = kwargs.get('Content-Length')
self.content_range = kwargs.get('Content-Range')
self.server_encrypted = kwargs.get('x-ms-server-encrypted')
self.copy = CopyProperties(**kwargs)
self.content_settings = ContentSettings(**kwargs)
self.lease = LeaseProperties(**kwargs)
self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time'))
self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time'))
self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time'))
self.file_attributes = kwargs.get('x-ms-file-attributes')
self.permission_key = kwargs.get('x-ms-file-permission-key')
self.file_id = kwargs.get('x-ms-file-id')
self.parent_id = kwargs.get('x-ms-file-parent-id')
@classmethod
def _from_generated(cls, generated):
props = cls()
props.name = generated.name
props.content_length = generated.properties.content_length
props.metadata = generated.properties.metadata
props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access
return props
class CopyProperties(DictMixin):
"""File Copy Properties.
:ivar str id:
String identifier for the last attempted Copy File operation where this file
was the destination file. This header does not appear if this file has never
been the destination in a Copy File operation, or if this file has been
modified after a concluded Copy File operation.
:ivar str source:
URL up to 2 KB in length that specifies the source file used in the last attempted
Copy File operation where this file was the destination file. This header does not
appear if this file has never been the destination in a Copy File operation, or if
this file has been modified after a concluded Copy File operation.
:ivar str status:
State of the copy operation identified by Copy ID, with these values:
success:
Copy completed successfully.
pending:
Copy is in progress. Check copy_status_description if intermittent,
non-fatal errors impede copy progress but don't cause failure.
aborted:
Copy was ended by Abort Copy File.
failed:
Copy failed. See copy_status_description for failure details.
:ivar str progress:
Contains the number of bytes copied and the total bytes in the source in the last
attempted Copy File operation where this file was the destination file. Can show
between 0 and Content-Length bytes copied.
:ivar datetime completion_time:
Conclusion time of the last attempted Copy File operation where this file was the
destination file. This value can specify the time of a completed, aborted, or
failed copy attempt.
:ivar str status_description:
Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
or non-fatal copy operation failure.
:ivar bool incremental_copy:
Copies the snapshot of the source file to a destination file.
The snapshot is copied such that only the differential changes between
the previously copied snapshot are transferred to the destination
:ivar datetime destination_snapshot:
Included if the file is incremental copy or incremental copy snapshot,
if x-ms-copy-status is success. Snapshot time of the last successful
incremental copy snapshot for this file.
"""
def __init__(self, **kwargs):
self.id = kwargs.get('x-ms-copy-id')
self.source = kwargs.get('x-ms-copy-source')
self.status = get_enum_value(kwargs.get('x-ms-copy-status'))
self.progress = kwargs.get('x-ms-copy-progress')
self.completion_time = kwargs.get('x-ms-copy-completion_time')
self.status_description = kwargs.get('x-ms-copy-status-description')
self.incremental_copy = kwargs.get('x-ms-incremental-copy')
self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot')
@classmethod
def _from_generated(cls, generated):
copy = cls()
copy.id = generated.properties.copy_id or None
copy.status = get_enum_value(generated.properties.copy_status) or None
copy.source = generated.properties.copy_source or None
copy.progress = generated.properties.copy_progress or None
copy.completion_time = generated.properties.copy_completion_time or None
copy.status_description = generated.properties.copy_status_description or None
copy.incremental_copy = generated.properties.incremental_copy or None
copy.destination_snapshot = generated.properties.destination_snapshot or None
return copy
class FileSasPermissions(object):
"""FileSasPermissions class to be used with
generating shared access signature operations.
:param bool read:
Read the content, properties, metadata. Use the file as the source of a copy
operation.
:param bool create:
Create a new file or copy a file to a new file.
:param bool write:
Create or write content, properties, metadata. Resize the file. Use the file
as the destination of a copy operation within the same account.
:param bool delete:
Delete the file.
"""
def __init__(self, read=False, create=False, write=False, delete=False):
self.read = read
self.create = create
self.write = write
self.delete = delete
self._str = (('r' if self.read else '') +
('c' if self.create else '') +
('w' if self.write else '') +
('d' if self.delete else ''))
def __str__(self):
return self._str
@classmethod
def from_string(cls, permission):
"""Create a FileSasPermissions from a string.
To specify read, create, write, or delete permissions you need only to
include the first letter of the word in the string. E.g. For read and
create permissions, you would provide a string "rc".
:param str permission: The string which dictates the read, create,
write, or delete permissions
:return: A FileSasPermissions object
:rtype: ~azure.storage.fileshare.FileSasPermissions
"""
p_read = 'r' in permission
p_create = 'c' in permission
p_write = 'w' in permission
p_delete = 'd' in permission
parsed = cls(p_read, p_create, p_write, p_delete)
parsed._str = permission # pylint: disable = protected-access
return parsed
class ShareSasPermissions(object):
"""ShareSasPermissions class to be used to be used with
generating shared access signature and access policy operations.
:param bool read:
Read the content, properties or metadata of any file in the share. Use any
file in the share as the source of a copy operation.
:param bool write:
For any file in the share, create or write content, properties or metadata.
Resize the file. Use the file as the destination of a copy operation within
the same account.
Note: You cannot grant permissions to read or write share properties or
metadata with a service SAS. Use an account SAS instead.
:param bool delete:
Delete any file in the share.
Note: You cannot grant permissions to delete a share with a service SAS. Use
an account SAS instead.
:param bool list:
List files and directories in the share.
"""
def __init__(self, read=False, write=False, delete=False, list=False): # pylint: disable=redefined-builtin
self.read = read
self.write = write
self.delete = delete
self.list = list
self._str = (('r' if self.read else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('l' if self.list else ''))
def __str__(self):
return self._str
@classmethod
def from_string(cls, permission):
"""Create a ShareSasPermissions from a string.
To specify read, write, delete, or list permissions you need only to
include the first letter of the word in the string. E.g. For read and
write permissions, you would provide a string "rw".
:param str permission: The string which dictates the read, write,
delete, or list permissions
:return: A ShareSasPermissions object
:rtype: ~azure.storage.fileshare.ShareSasPermissions
"""
p_read = 'r' in permission
p_write = 'w' in permission
p_delete = 'd' in permission
p_list = 'l' in permission
parsed = cls(p_read, p_write, p_delete, p_list)
parsed._str = permission # pylint: disable = protected-access
return parsed
class NTFSAttributes(object):
"""
Valid set of attributes to set for file or directory.
To set attribute for directory, 'Directory' should always be enabled except setting 'None' for directory.
:ivar bool read_only:
Enable/disable 'ReadOnly' attribute for DIRECTORY or FILE
:ivar bool hidden:
Enable/disable 'Hidden' attribute for DIRECTORY or FILE
:ivar bool system:
Enable/disable 'System' attribute for DIRECTORY or FILE
:ivar bool none:
Enable/disable 'None' attribute for DIRECTORY or FILE to clear all attributes of FILE/DIRECTORY
:ivar bool directory:
Enable/disable 'Directory' attribute for DIRECTORY
:ivar bool archive:
Enable/disable 'Archive' attribute for DIRECTORY or FILE
:ivar bool temporary:
Enable/disable 'Temporary' attribute for FILE
:ivar bool offline:
Enable/disable 'Offline' attribute for DIRECTORY or FILE
:ivar bool not_content_indexed:
Enable/disable 'NotContentIndexed' attribute for DIRECTORY or FILE
:ivar bool no_scrub_data:
Enable/disable 'NoScrubData' attribute for DIRECTORY or FILE
"""
def __init__(self, read_only=False, hidden=False, system=False, none=False, directory=False, archive=False,
temporary=False, offline=False, not_content_indexed=False, no_scrub_data=False):
self.read_only = read_only
self.hidden = hidden
self.system = system
self.none = none
self.directory = directory
self.archive = archive
self.temporary = temporary
self.offline = offline
self.not_content_indexed = not_content_indexed
self.no_scrub_data = no_scrub_data
self._str = (('ReadOnly|' if self.read_only else '') +
('Hidden|' if self.hidden else '') +
('System|' if self.system else '') +
('None|' if self.none else '') +
('Directory|' if self.directory else '') +
('Archive|' if self.archive else '') +
('Temporary|' if self.temporary else '') +
('Offline|' if self.offline else '') +
('NotContentIndexed|' if self.not_content_indexed else '') +
('NoScrubData|' if self.no_scrub_data else ''))
def __str__(self):
concatenated_params = self._str
return concatenated_params.strip('|')
@classmethod
def from_string(cls, string):
"""Create a NTFSAttributes from a string.
To specify permissions you can pass in a string with the
desired permissions, e.g. "ReadOnly|Hidden|System"
:param str string: The string which dictates the permissions.
:return: A NTFSAttributes object
:rtype: ~azure.storage.fileshare.NTFSAttributes
"""
read_only = "ReadOnly" in string
hidden = "Hidden" in string
system = "System" in string
none = "None" in string
directory = "Directory" in string
archive = "Archive" in string
temporary = "Temporary" in string
offline = "Offline" in string
not_content_indexed = "NotContentIndexed" in string
no_scrub_data = "NoScrubData" in string
parsed = cls(read_only, hidden, system, none, directory, archive, temporary, offline, not_content_indexed,
no_scrub_data)
parsed._str = string # pylint: disable = protected-access
return parsed
def service_properties_deserialize(generated):
"""Deserialize a ServiceProperties objects into a dict.
"""
return {
'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access
'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access
'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access
}
| [
"[email protected]"
] | |
2bcf76b268dcc14f93c164f38f79c9fac0b642c1 | 93d8f6332992d7f1574666096e956d47a2c23754 | /src/safe.py | 98b34c1ad9ca33b5b925d656a343e2388d310014 | [
"BSD-3-Clause"
] | permissive | aliceafterall/cocomud | d41a5a8964f1af17cacfb0d0dcdd4b5530bb1bc5 | b2b7a7b5f93542b8e94c0eec00c4dcd7bd96cff1 | refs/heads/master | 2023-07-20T09:34:49.410221 | 2017-08-03T15:16:05 | 2017-08-03T15:16:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,816 | py | # Copyright (c) 2016, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ytranslate nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file contains the 'safe' system of CocoMUD, ways to crypt/encrypt.
This feature requires:
pbkdf2
Crypto
The module contains a class named 'Safe', that should be insantiated
in order to manipulate the encrypting
/decrypting mechanism. This class requires a passphrase in
argument. You can insantiate it as follows:
>>> from safe import Safe
>>> safe = Safe(file=".passphrase")
>>> # (If the file doesn't exist, it will be created with an auto-generated
>>> # passphrase.)
>>> # Alternatively you can specify the passphrase directly
>>> safe = Safe(passphrase="Dsm18fvdjP9sz801,9DJA.1356gndYJz987v")
>>> # Store encrypted data
>>> safe.store("login", "kredh")
>>> safe.store("password", "YoudWishIToldYou")
>>> # Retrieve the data (can be later)
login = safe.retrieve("login")
password = safe.retrieve("password")
Note that datas that is not a string (like a bool or float) will be
saved as unprotected data. If you want to save it encrypted, you can
convert it to string.
"""
import base64
import os
import pickle
from Crypto.Cipher import AES
from pbkdf2 import PBKDF2
class Safe:
"""A safe object, to encrypt/decrypt information.
The Safe class requires a passphrase to be created. This is a
string of characters that adds to the security of encryption.
Obviously, it needs to remain similar to decrypt information that
has been encrypted. Other optional parameters are also possible:
secret: the path of the file in which to store crypted data.
"""
def __init__(self, passphrase=None, file=None, secret="data.crypt",
load=True):
self.salt_seed = 'mkhgts465wef4fwtdd'
self.passphrase = passphrase
self.secret = secret
self.passphrase_size = 64
self.key_size = 32
self.block_size = 16
self.iv_size = 16
self.salt_size = 8
self.data = {}
if file and os.path.exists(file):
with open(file, "r") as pass_file:
self.passphrase = pass_file.read()
if not self.passphrase:
self.passphrase = base64.b64encode(os.urandom(
self.passphrase_size))
if file:
with open(file, "w") as pass_file:
pass_file.write(self.passphrase)
# Load the secret file
if load:
self.load()
def get_salt_from_key(self, key):
return PBKDF2(key, self.salt_seed).read(self.salt_size)
def encrypt(self, plaintext, salt):
"""Pad plaintext, then encrypt it.
The encryption occurs with a new, randomly initialised cipher.
This method will not preserve trailing whitespace in plaintext!.
"""
# Initialise Cipher Randomly
init_vector = os.urandom(self.iv_size)
# Prepare cipher key
key = PBKDF2(self.passphrase, salt).read(self.key_size)
cipher = AES.new(key, AES.MODE_CBC, init_vector)
bs = self.block_size
return init_vector + cipher.encrypt(plaintext + \
" " * (bs - (len(plaintext) % bs)))
def decrypt(self, ciphertext, salt):
"""Reconstruct the cipher object and decrypt.
This method will not preserve trailing whitespace in the
retrieved value.
"""
# Prepare cipher key
key = PBKDF2(self.passphrase, salt).read(self.key_size)
# Extract IV
init_vector = ciphertext[:self.iv_size]
ciphertext = ciphertext[self.iv_size:]
cipher = AES.new(key, AES.MODE_CBC, init_vector)
return cipher.decrypt(ciphertext).rstrip(" ")
def load(self):
"""Load the data from the 'secret' file if exists."""
if os.path.exists(self.secret):
with open(self.secret, "rb") as file:
upic = pickle.Unpickler(file)
self.data = upic.load()
if not isinstance(self.data, dict):
raise ValueError("the data contained in the file " \
"'{}' is not a dictionary".format(self.secret))
def retrieve(self, key, *default):
"""Retrieve and decrypt the specified key.
If the key isn't present in the dictionary, either
return default if specified, or raise a KeyError.
If the value at this location isn't a string, return it as is.
"""
if key not in self.data:
if default:
return default[0]
raise KeyError(key)
value = self.data[key]
if isinstance(value, basestring):
salt = self.get_salt_from_key(key)
return self.decrypt(value, salt)
return value
def store(self, key, value):
"""Store the key in the file.
If the key already exists, replaces it.
If the value is not a string or unicode, it will be stored
WITHOUT encryption.
"""
if isinstance(value, basestring):
salt = self.get_salt_from_key(key)
crypted = self.encrypt(value, salt)
self.data[key] = crypted
else:
self.data[key] = value
# Write the new data in the file
with open(self.secret, "wb") as file:
pic = pickle.Pickler(file)
pic.dump(self.data)
| [
"[email protected]"
] | |
a4b8a7c035036e9e0e83c562c498c103c3a7ba94 | 7d72ece1edb0009e2f5dadd96838e6fa4d020c86 | /src/follow_road/MyAlgorithm.py | 78146757492d8d71d43311729f3470639eea528e | [] | no_license | RoboticsLabURJC/2018-phd-luis-caiza | d188a9621c7339349dd32ba3f382010daeb49b95 | 834e93889c8b8aacdf8edee0206341154ef17073 | refs/heads/master | 2020-03-30T02:05:28.334834 | 2019-04-24T19:32:17 | 2019-04-24T19:32:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,509 | py | import threading
import time
from datetime import datetime
import cv2
import numpy as np
import math
time_cycle = 80
#value_min_HSV = np.array([20, 0, 0]) #for follow road original
#value_max_HSV = np.array([100, 130, 130]) #for follow road original
value_min_HSV=np.array([0, 50, 50]) # red color used in follow a ball
value_max_HSV=np.array([10, 255, 255]) #red color used in follow a ball
vel_front = 0
vel_z = 0
vel_yaw = 0
class MyAlgorithm(threading.Thread):
def __init__(self, drone):
self.drone = drone
self.height = 240
self.width = 320
self.yaw = 0.0
self.imageV=None
self.imageF =None
self.stop_event = threading.Event()
self.kill_event = threading.Event()
self.lock = threading.Lock()
threading.Thread.__init__(self, args=self.stop_event)
def setImageFilteredVentral(self, image):
self.lock.acquire()
self.imageV=image
self.lock.release()
def getImageFilteredVentral(self):
self.lock.acquire()
tempImageV=self.imageV
self.lock.release()
return tempImageV
def setImageFilteredFrontal(self, image):
self.lock.acquire()
self.imageF=image
self.lock.release()
def getImageFilteredFrontal(self):
self.lock.acquire()
tempImageF=self.imageF
self.lock.release()
return tempImageF
def run (self):
self.stop_event.clear()
while (not self.kill_event.is_set()):
start_time = datetime.now()
if not self.stop_event.is_set():
self.execute()
finish_Time = datetime.now()
dt = finish_Time - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
if (ms < time_cycle):
time.sleep((time_cycle - ms) / 1000.0)
def stop (self):
self.stop_event.set()
def play (self):
if self.is_alive():
self.stop_event.clear()
else:
self.start()
def kill (self):
self.kill_event.set()
def execute(self):
# Add your code here
input_imageV = self.drone.getImageVentral().data
input_imageF = self.drone.getImageFrontal().data
if input_imageV is not None:
image_HSV_V = cv2.cvtColor(input_imageV, cv2.COLOR_RGB2HSV)
#Treshold image
image_HSV_filtered_V = cv2.inRange(image_HSV_V, value_min_HSV, value_max_HSV)
#Reducing noise
opening_V = cv2.morphologyEx(image_HSV_filtered_V, cv2.MORPH_OPEN, np.ones((5,5),np.uint8))
closing_V = cv2.morphologyEx(opening_V, cv2.MORPH_CLOSE, np.ones((10,10),np.uint8))
#Filtered image
image_HSV_filtered_Mask_V = np.dstack((closing_V, closing_V, closing_V))
#drawing contours
imgray_V = cv2.cvtColor(image_HSV_filtered_Mask_V, cv2.COLOR_BGR2GRAY)
ret_V, thresh_V = cv2.threshold(imgray_V, 127, 255, 0)
_, contours_V, hierarchy_V = cv2.findContours(thresh_V, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image_HSV_filtered_Mask_V, contours_V, -1, (0,255,0), 3)
#Getting the centre of the road
if input_imageF is not None:
image_HSV_F = cv2.cvtColor(input_imageF, cv2.COLOR_RGB2HSV)
#Treshold image
image_HSV_filtered_F = cv2.inRange(image_HSV_F, value_min_HSV, value_max_HSV)
#Reducing noise
opening_F = cv2.morphologyEx(image_HSV_filtered_F, cv2.MORPH_OPEN, np.ones((5,5),np.uint8))
image_HSV_filtered_Mask_F = np.dstack((opening_F, opening_F, opening_F))
#drawing contours
imgray_F = cv2.cvtColor(image_HSV_filtered_Mask_F, cv2.COLOR_BGR2GRAY)
ret_F, thresh_F = cv2.threshold(imgray_F, 127, 255, 0)
_, contours_F, hierarchy_F = cv2.findContours(thresh_F, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image_HSV_filtered_Mask_F, contours_F, -1, (0,255,0), 3)
#Getting the centre of the road
area = []
for pic, contour in enumerate(contours_F):
area.append(cv2.contourArea(contour))
if len(area) > 1:
if area[0] < area[1]:
M = cv2.moments(contours_F[1])
else:
M = cv2.moments(contours_F[0])
else:
try:
M = cv2.moments(contours_F[0])
except IndexError:
self.drone.sendCMDVelocities(0,0,0,0)
M = cv2.moments(0)
if int(M['m00']) != 0:
#print("Road detected")
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
vel_front = 0.0001 * (3000 - int(M['m00']))
vel_z = 0.01 * (110 - cy)
vel_yaw = 0.02 * (140 - cx)
self.drone.sendCMDVelocities(0,vel_front,vel_z,vel_yaw)
print("cx: " + str(cx) + " cy: " + str(cy) + " area: " + str(M['m00']) + " vel_z " + str(vel_z))
self.yaw = int(cx)
#drawing the center
cv2.circle(image_HSV_filtered_Mask_F, (cx, cy), 7, np.array([255, 0, 0]), -1)
#printing the filtered image
self.setImageFilteredVentral(image_HSV_filtered_Mask_V)
self.setImageFilteredFrontal(image_HSV_filtered_Mask_F)
| [
"[email protected]"
] | |
eed067e68e68bc9403d6958e844746a118bc601f | d6ce2f6bdddef373b9bbdf26d567307ce3667103 | /scripts/utils_specs/convert_spec_csv_to_json.py | 0db7b03c0e21edd6637ca3d51e06b9ffc1e88e4d | [
"MIT"
] | permissive | hezbranch/time_series_prediction | 505007fb248fe09f56943c3ad705a52ce77a193c | 9bffc3f279cbfaa3ec0acc937d15610c19e0975e | refs/heads/master | 2023-01-19T12:27:24.615657 | 2020-10-30T08:59:05 | 2020-10-30T08:59:05 | 296,434,092 | 1 | 0 | MIT | 2020-09-17T20:22:09 | 2020-09-17T20:22:08 | null | UTF-8 | Python | false | false | 2,503 | py | import argparse
import pandas as pd
import json
import copy
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--config_json_path", type=str)
parser.add_argument("--output_dir", type=str)
parser.add_argument("--row_template_json", type=str, default='row_template.json')
parser.add_argument("--sheet_template_json", type=str, default='sheet_template.json')
args = parser.parse_args()
with open(args.config_json_path, 'r') as f:
config = json.load(f)
with open(args.row_template_json, 'r') as f:
row_template = json.load(f)
with open(args.sheet_template_json, 'r') as f:
sheet_template = json.load(f)
for gid, sheet_name, csv_filename in zip(
config['spec_gid_list'],
config['spec_sheet_name_list'],
config['spec_csv_filename_list']):
sheet = copy.deepcopy(sheet_template)
sheet['name'] = sheet['name'].replace("{{sheet_name}}", sheet_name)
sheet['path'] = sheet['path'].replace("{{csv_filename}}", csv_filename)
out_csv_path = os.path.join(
args.output_dir,
config['output_csv_path_pattern'].replace("{{sheet_name}}", sheet_name)
)
out_json_path = os.path.join(
args.output_dir,
config['output_json_path_pattern'].replace("{{sheet_name}}", sheet_name)
)
csv_df = pd.read_csv(out_csv_path, dtype=str)
row_list = []
for rowid, row_df in csv_df.iterrows():
row = copy.deepcopy(row_template)
for k, v in row_template.items():
if isinstance(v, dict):
v = v.__repr__()
isdict = True
else:
isdict = False
assert isinstance(v, str)
while v.count("{{") > 0:
start = v.find("{{")
stop = v.find("}}", start)
varname = v[start+2:stop]
v = v.replace("{{%s}}" % varname, str(row_df[varname]))
if isdict:
row[k] = json.loads(v.replace("'", '"'))
else:
row[k] = v
row_list.append(row)
sheet['schema']['fields'] = row_list
sheet = json.dumps(sheet, indent=4, sort_keys=False)
with open(out_json_path, 'w') as f:
f.write(sheet)
print("Wrote to file: %s" % out_json_path)
| [
"[email protected]"
] | |
90118b22999d0850d70f1bd9e39f9ebafee8e412 | 6188f8ef474da80c9e407e8040de877273f6ce20 | /examples/docs_snippets/docs_snippets/guides/dagster/development_to_production/resources/resources_v1.py | c1339b0fabc7baf6e734f9610d9ced0cb55cf53e | [
"Apache-2.0"
] | permissive | iKintosh/dagster | 99f2a1211de1f3b52f8bcf895dafaf832b999de2 | 932a5ba35263deb7d223750f211c2ddfa71e6f48 | refs/heads/master | 2023-01-24T15:58:28.497042 | 2023-01-20T21:51:35 | 2023-01-20T21:51:35 | 276,410,978 | 1 | 0 | Apache-2.0 | 2020-07-01T15:19:47 | 2020-07-01T15:13:56 | null | UTF-8 | Python | false | false | 655 | py | # start_resource
# resources.py
from typing import Any, Dict, Optional
import requests
class HNAPIClient:
"""
Hacker News client that fetches live data
"""
def fetch_item_by_id(self, item_id: int) -> Optional[Dict[str, Any]]:
"""Fetches a single item from the Hacker News API by item id."""
item_url = f"https://hacker-news.firebaseio.com/v0/item/{item_id}.json"
item = requests.get(item_url, timeout=5).json()
return item
def fetch_max_item_id(self) -> int:
return requests.get(
"https://hacker-news.firebaseio.com/v0/maxitem.json", timeout=5
).json()
# end_resource
| [
"[email protected]"
] | |
cad332858fb916aae94cf392338574f290c1bdce | 1e3cf9c1341083675fa9b716f11c2834e2d18374 | /src/pyphoplacecellanalysis/External/pyqtgraph/examples/VideoSpeedTest.py | bb3ff76da268f9820a6aa048e58f05c851b9e606 | [
"MIT"
] | permissive | CommanderPho/pyPhoPlaceCellAnalysis | a60313c98b3ad2834c2bf101f3463714df092cf5 | 212399d826284b394fce8894ff1a93133aef783f | refs/heads/master | 2023-09-01T20:27:43.792099 | 2023-09-01T03:24:19 | 2023-09-01T03:24:19 | 444,885,155 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,668 | py | """
Tests the speed of image updates for an ImageItem and RawImageWidget.
The speed will generally depend on the type of data being shown, whether
it is being scaled and/or converted by lookup table, and whether OpenGL
is used by the view widget
"""
import argparse
import sys
from time import perf_counter
import numpy as np
import pyphoplacecellanalysis.External.pyqtgraph as pg
from pyphoplacecellanalysis.External.pyqtgraph.Qt import QT_LIB, QtCore, QtGui, QtWidgets
pg.setConfigOption('imageAxisOrder', 'row-major')
import importlib
ui_template = importlib.import_module(f'VideoTemplate_{QT_LIB.lower()}')
try:
import cupy as cp
pg.setConfigOption("useCupy", True)
_has_cupy = True
except ImportError:
cp = None
_has_cupy = False
try:
import numba
_has_numba = True
except ImportError:
numba = None
_has_numba = False
try:
from pyphoplacecellanalysis.External.pyqtgraph.widgets.RawImageWidget import RawImageGLWidget
except ImportError:
RawImageGLWidget = None
parser = argparse.ArgumentParser(description="Benchmark for testing video performance")
parser.add_argument('--cuda', default=False, action='store_true', help="Use CUDA to process on the GPU", dest="cuda")
parser.add_argument('--dtype', default='uint8', choices=['uint8', 'uint16', 'float'], help="Image dtype (uint8, uint16, or float)")
parser.add_argument('--frames', default=3, type=int, help="Number of image frames to generate (default=3)")
parser.add_argument('--image-mode', default='mono', choices=['mono', 'rgb'], help="Image data mode (mono or rgb)", dest='image_mode')
parser.add_argument('--levels', default=None, type=lambda s: tuple([float(x) for x in s.split(',')]), help="min,max levels to scale monochromatic image dynamic range, or rmin,rmax,gmin,gmax,bmin,bmax to scale rgb")
parser.add_argument('--lut', default=False, action='store_true', help="Use color lookup table")
parser.add_argument('--lut-alpha', default=False, action='store_true', help="Use alpha color lookup table", dest='lut_alpha')
parser.add_argument('--size', default='512x512', type=lambda s: tuple([int(x) for x in s.split('x')]), help="WxH image dimensions default='512x512'")
args = parser.parse_args(sys.argv[1:])
if RawImageGLWidget is not None:
# don't limit frame rate to vsync
sfmt = QtGui.QSurfaceFormat()
sfmt.setSwapInterval(0)
QtGui.QSurfaceFormat.setDefaultFormat(sfmt)
app = pg.mkQApp("Video Speed Test Example")
win = QtWidgets.QMainWindow()
win.setWindowTitle('pyqtgraph example: VideoSpeedTest')
ui = ui_template.Ui_MainWindow()
ui.setupUi(win)
win.show()
if RawImageGLWidget is None:
ui.rawGLRadio.setEnabled(False)
ui.rawGLRadio.setText(ui.rawGLRadio.text() + " (OpenGL not available)")
else:
ui.rawGLImg = RawImageGLWidget()
ui.stack.addWidget(ui.rawGLImg)
# read in CLI args
ui.cudaCheck.setChecked(args.cuda and _has_cupy)
ui.cudaCheck.setEnabled(_has_cupy)
ui.numbaCheck.setChecked(_has_numba and pg.getConfigOption("useNumba"))
ui.numbaCheck.setEnabled(_has_numba)
ui.framesSpin.setValue(args.frames)
ui.widthSpin.setValue(args.size[0])
ui.heightSpin.setValue(args.size[1])
ui.dtypeCombo.setCurrentText(args.dtype)
ui.rgbCheck.setChecked(args.image_mode=='rgb')
ui.maxSpin1.setOpts(value=255, step=1)
ui.minSpin1.setOpts(value=0, step=1)
levelSpins = [ui.minSpin1, ui.maxSpin1, ui.minSpin2, ui.maxSpin2, ui.minSpin3, ui.maxSpin3]
if args.cuda and _has_cupy:
xp = cp
else:
xp = np
if args.levels is None:
ui.scaleCheck.setChecked(False)
ui.rgbLevelsCheck.setChecked(False)
else:
ui.scaleCheck.setChecked(True)
if len(args.levels) == 2:
ui.rgbLevelsCheck.setChecked(False)
ui.minSpin1.setValue(args.levels[0])
ui.maxSpin1.setValue(args.levels[1])
elif len(args.levels) == 6:
ui.rgbLevelsCheck.setChecked(True)
for spin,val in zip(levelSpins, args.levels):
spin.setValue(val)
else:
raise ValueError("levels argument must be 2 or 6 comma-separated values (got %r)" % (args.levels,))
ui.lutCheck.setChecked(args.lut)
ui.alphaCheck.setChecked(args.lut_alpha)
#ui.graphicsView.useOpenGL() ## buggy, but you can try it if you need extra speed.
vb = pg.ViewBox()
ui.graphicsView.setCentralItem(vb)
vb.setAspectLocked()
img = pg.ImageItem()
vb.addItem(img)
LUT = None
def updateLUT():
global LUT, ui
dtype = ui.dtypeCombo.currentText()
if dtype == 'uint8':
n = 256
else:
n = 4096
LUT = ui.gradient.getLookupTable(n, alpha=ui.alphaCheck.isChecked())
if _has_cupy and xp == cp:
LUT = cp.asarray(LUT)
ui.gradient.sigGradientChanged.connect(updateLUT)
updateLUT()
ui.alphaCheck.toggled.connect(updateLUT)
def updateScale():
global ui, levelSpins
if ui.rgbLevelsCheck.isChecked():
for s in levelSpins[2:]:
s.setEnabled(True)
else:
for s in levelSpins[2:]:
s.setEnabled(False)
updateScale()
ui.rgbLevelsCheck.toggled.connect(updateScale)
cache = {}
def mkData():
with pg.BusyCursor():
global data, cache, ui, xp
frames = ui.framesSpin.value()
width = ui.widthSpin.value()
height = ui.heightSpin.value()
cacheKey = (ui.dtypeCombo.currentText(), ui.rgbCheck.isChecked(), frames, width, height)
if cacheKey not in cache:
if cacheKey[0] == 'uint8':
dt = xp.uint8
loc = 128
scale = 64
mx = 255
elif cacheKey[0] == 'uint16':
dt = xp.uint16
loc = 4096
scale = 1024
mx = 2**16 - 1
elif cacheKey[0] == 'float':
dt = xp.float32
loc = 1.0
scale = 0.1
mx = 1.0
else:
raise ValueError(f"unable to handle dtype: {cacheKey[0]}")
chan_shape = (height, width)
if ui.rgbCheck.isChecked():
frame_shape = chan_shape + (3,)
else:
frame_shape = chan_shape
data = xp.empty((frames,) + frame_shape, dtype=dt)
view = data.reshape((-1,) + chan_shape)
for idx in range(view.shape[0]):
subdata = xp.random.normal(loc=loc, scale=scale, size=chan_shape)
# note: gaussian filtering has been removed as it slows down array
# creation greatly.
if cacheKey[0] != 'float':
xp.clip(subdata, 0, mx, out=subdata)
view[idx] = subdata
data[:, 10:50, 10] = mx
data[:, 48, 9:12] = mx
data[:, 47, 8:13] = mx
cache = {cacheKey: data} # clear to save memory (but keep one to prevent unnecessary regeneration)
data = cache[cacheKey]
updateLUT()
updateSize()
def updateSize():
global ui, vb
frames = ui.framesSpin.value()
width = ui.widthSpin.value()
height = ui.heightSpin.value()
dtype = xp.dtype(str(ui.dtypeCombo.currentText()))
rgb = 3 if ui.rgbCheck.isChecked() else 1
ui.sizeLabel.setText('%d MB' % (frames * width * height * rgb * dtype.itemsize / 1e6))
vb.setRange(QtCore.QRectF(0, 0, width, height))
def noticeCudaCheck():
global xp, cache
cache = {}
if ui.cudaCheck.isChecked():
if _has_cupy:
xp = cp
else:
xp = np
ui.cudaCheck.setChecked(False)
else:
xp = np
mkData()
def noticeNumbaCheck():
pg.setConfigOption('useNumba', _has_numba and ui.numbaCheck.isChecked())
mkData()
ui.dtypeCombo.currentIndexChanged.connect(mkData)
ui.rgbCheck.toggled.connect(mkData)
ui.widthSpin.editingFinished.connect(mkData)
ui.heightSpin.editingFinished.connect(mkData)
ui.framesSpin.editingFinished.connect(mkData)
ui.widthSpin.valueChanged.connect(updateSize)
ui.heightSpin.valueChanged.connect(updateSize)
ui.framesSpin.valueChanged.connect(updateSize)
ui.cudaCheck.toggled.connect(noticeCudaCheck)
ui.numbaCheck.toggled.connect(noticeNumbaCheck)
ptr = 0
lastTime = perf_counter()
fps = None
def update():
global ui, ptr, lastTime, fps, LUT, img
if ui.lutCheck.isChecked():
useLut = LUT
else:
useLut = None
downsample = ui.downsampleCheck.isChecked()
if ui.scaleCheck.isChecked():
if ui.rgbLevelsCheck.isChecked():
useScale = [
[ui.minSpin1.value(), ui.maxSpin1.value()],
[ui.minSpin2.value(), ui.maxSpin2.value()],
[ui.minSpin3.value(), ui.maxSpin3.value()]]
else:
useScale = [ui.minSpin1.value(), ui.maxSpin1.value()]
else:
useScale = None
if ui.rawRadio.isChecked():
ui.rawImg.setImage(data[ptr%data.shape[0]], lut=useLut, levels=useScale)
ui.stack.setCurrentIndex(1)
elif ui.rawGLRadio.isChecked():
ui.rawGLImg.setImage(data[ptr%data.shape[0]], lut=useLut, levels=useScale)
ui.stack.setCurrentIndex(2)
else:
img.setImage(data[ptr%data.shape[0]], autoLevels=False, levels=useScale, lut=useLut, autoDownsample=downsample)
ui.stack.setCurrentIndex(0)
#img.setImage(data[ptr%data.shape[0]], autoRange=False)
ptr += 1
now = perf_counter()
dt = now - lastTime
lastTime = now
if fps is None:
fps = 1.0/dt
else:
s = np.clip(dt*3., 0, 1)
fps = fps * (1-s) + (1.0/dt) * s
ui.fpsLabel.setText('%0.2f fps' % fps)
app.processEvents() ## force complete redraw for every plot
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
if __name__ == '__main__':
pg.exec()
| [
"[email protected]"
] | |
93db36640e286172bee479c27dc086ac4f892ad8 | d90283bff72b5a55dd4d0f90c7325355b00ce7b1 | /p1804/p12/xxxx.py | 1bd3153f8551e4d5a98764db70ac390410388037 | [] | no_license | yuemeiss/p1804daima | f841f52e63081d53d50a199e4d148d4533605bb6 | 6ea08eb9971e42bf4ac535033a006d98ed98bf98 | refs/heads/master | 2020-03-15T23:29:59.691297 | 2018-08-06T02:42:49 | 2018-08-06T02:42:49 | 132,395,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | tu1 = (1,2,3)
alist=[123,5677,555]
for i in alist:
print(i)
for index,d in enumerate(alist):
print(index,d)
c=0
while c < len(tu1):
print(tu1[c])
c+=1
| [
"[email protected]"
] | |
8b09f80a72badcd81065c4921c3e31e1173a1a46 | c5b4d174ace61dd5914ca99fb0f2c710d0182324 | /pypes/tests/test__utils.py | bd9906b228853654176538174cd98e0cfc893330 | [
"Apache-2.0"
] | permissive | erramuzpe/pypes | 636c6b31023747a571af90390fd85b2dd6806dea | 3922d3162dc633b30961c036efdeb5d221ab1bfb | refs/heads/master | 2020-12-24T06:43:15.063955 | 2017-04-05T19:51:05 | 2017-04-05T19:51:05 | 73,461,509 | 0 | 0 | null | 2016-11-11T08:54:15 | 2016-11-11T08:54:14 | null | UTF-8 | Python | false | false | 2,645 | py | # -*- coding: utf-8 -*-
from pypes._utils import format_pair_list
def test_format_pair_list():
anat_fbasename = 'anat_hc'
regexp_subst = [
(r"/{anat}_.*corrected_seg8.mat$", "/{anat}_to_mni_affine.mat"),
(r"/m{anat}.*_corrected.nii$", "/{anat}_biascorrected.nii"),
(r"/w{anat}.*_biascorrected.nii$", "/{anat}_mni.nii"),
(r"/y_{anat}.*nii$", "/{anat}_to_mni_field.nii"),
(r"/iy_{anat}.*nii$", "/{anat}_to_mni_inv_field.nii"),
(r"/mwc1{anat}.*nii$", "/{anat}_gm_mod_w2tpm.nii"),
(r"/mwc2{anat}.*nii$", "/{anat}_wm_mod_w2tpm.nii"),
(r"/mwc3{anat}.*nii$", "/{anat}_csf_mod_w2tpm.nii"),
(r"/mwc4{anat}.*nii$", "/{anat}_nobrain_mod_w2tpm.nii"),
(r"/c1{anat}.*nii$", "/{anat}_gm.nii"),
(r"/c2{anat}.*nii$", "/{anat}_wm.nii"),
(r"/c3{anat}.*nii$", "/{anat}_csf.nii"),
(r"/c4{anat}.*nii$", "/{anat}_nobrain.nii"),
(r"/c5{anat}.*nii$", "/{anat}_nobrain_mask.nii"),
]
result = format_pair_list(regexp_subst, anat=anat_fbasename)
assert(result == [
(r"/anat_hc_.*corrected_seg8.mat$", "/anat_hc_to_mni_affine.mat"),
(r"/manat_hc.*_corrected.nii$", "/anat_hc_biascorrected.nii"),
(r"/wanat_hc.*_biascorrected.nii$", "/anat_hc_mni.nii"),
(r"/y_anat_hc.*nii$", "/anat_hc_to_mni_field.nii"),
(r"/iy_anat_hc.*nii$", "/anat_hc_to_mni_inv_field.nii"),
(r"/mwc1anat_hc.*nii$", "/anat_hc_gm_mod_w2tpm.nii"),
(r"/mwc2anat_hc.*nii$", "/anat_hc_wm_mod_w2tpm.nii"),
(r"/mwc3anat_hc.*nii$", "/anat_hc_csf_mod_w2tpm.nii"),
(r"/mwc4anat_hc.*nii$", "/anat_hc_nobrain_mod_w2tpm.nii"),
(r"/c1anat_hc.*nii$", "/anat_hc_gm.nii"),
(r"/c2anat_hc.*nii$", "/anat_hc_wm.nii"),
(r"/c3anat_hc.*nii$", "/anat_hc_csf.nii"),
(r"/c4anat_hc.*nii$", "/anat_hc_nobrain.nii"),
(r"/c5anat_hc.*nii$", "/anat_hc_nobrain_mask.nii"),
]) | [
"[email protected]"
] | |
1ab320425a4b1a6568c0ae0d930d6c9f420e792d | 168f8546daf36bead1a9b8f32e8a43fdc5d844cf | /Test/python/multiply.py | cec03f2eaf740cf2f1ca1e9f23d4046fa9dd1500 | [] | no_license | whztt07/RenderFish | ea67915a672096254444765347044c6229681d05 | 7d0a4fd6a01a949091ec05ba93c42aa1760b9408 | refs/heads/master | 2020-05-04T14:47:51.215280 | 2015-11-22T16:42:31 | 2015-11-22T16:42:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | '''py_class.py - Python source designed to demonstrate'''
'''the use of python embedding'''
class Multiply:
def __init__(self):
self.a = 6
self.b = 5
def multiply(self):
c = self.a*self.b
print 'The result of', self.a, 'x', self.b, ':', c
return c
def multiply2(self, a, b):
c = a*b
print 'The result of', a, 'x', b, ':', c
return c | [
"[email protected]"
] | |
eb3e1585341debf43f5683ae5d04f1b4cc7345dd | b9be3202a4db8875299d4a123b7c1e3c7d282eaf | /tensorflow/contrib/learn/python/learn/estimators/dnn.py | 63c262800864b7b2cbaf753220f58867cf376f3d | [
"Apache-2.0"
] | permissive | prafullasd/tensorflow | f25d0eb5997af2500d4bd2f7596d103d7028f048 | 2c55490c6d6d361985dbb0565ab08a648c819949 | refs/heads/master | 2021-01-18T13:31:25.223301 | 2016-06-03T20:27:32 | 2016-06-03T20:27:32 | 60,382,471 | 1 | 0 | null | 2016-06-03T22:58:26 | 2016-06-03T22:58:25 | null | UTF-8 | Python | false | false | 8,828 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators.base import DeprecatedMixin
from tensorflow.python.ops import nn
class DNNClassifier(dnn_linear_combined.DNNLinearCombinedClassifier):
"""A classifier for TensorFlow DNN models.
Example:
```
installed_app_id = sparse_column_with_hash_bucket("installed_id", 1e6)
impression_app_id = sparse_column_with_hash_bucket("impression_id", 1e6)
installed_emb = embedding_column(installed_app_id, dimension=16,
combiner="sum")
impression_emb = embedding_column(impression_app_id, dimension=16,
combiner="sum")
estimator = DNNClassifier(
feature_columns=[installed_emb, impression_emb],
hidden_units=[1024, 512, 256])
# Input builders
def input_fn_train: # returns X, Y
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns X, Y
pass
estimator.evaluate(input_fn_eval)
estimator.predict(x)
```
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
- if `feauture_columns` is None, then `input` must contains only real
valued `Tensor`.
Parameters:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. [64, 32] means first layer has 64 nodes and second one has
32.
feature_columns: An iterable containing all the feature columns used by the
model. All items in the set should be instances of classes derived from
`FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc.
n_classes: number of target classes. Default is binary classification.
It must be greater than 1.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If `None`,
will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not None, the probability we will drop out a given coordinate.
"""
def __init__(self,
hidden_units,
feature_columns=None,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None):
super(DNNClassifier, self).__init__(n_classes=n_classes,
weight_column_name=weight_column_name,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_hidden_units=hidden_units,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout)
def _get_train_ops(self, features, targets):
"""See base class."""
if self._dnn_feature_columns is None:
self._dnn_feature_columns = layers.infer_real_valued_columns(features)
return super(DNNClassifier, self)._get_train_ops(features, targets)
@property
def weights_(self):
return self.dnn_weights_
@property
def bias_(self):
return self.dnn_bias_
class DNNRegressor(dnn_linear_combined.DNNLinearCombinedRegressor):
"""A regressor for TensorFlow DNN models.
Example:
```
installed_app_id = sparse_column_with_hash_bucket("installed_id", 1e6)
impression_app_id = sparse_column_with_hash_bucket("impression_id", 1e6)
installed_emb = embedding_column(installed_app_id, dimension=16,
combiner="sum")
impression_emb = embedding_column(impression_app_id, dimension=16,
combiner="sum")
estimator = DNNRegressor(
feature_columns=[installed_emb, impression_emb],
hidden_units=[1024, 512, 256])
# Input builders
def input_fn_train: # returns X, Y
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns X, Y
pass
estimator.evaluate(input_fn_eval)
estimator.predict(x)
```
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
- if `feauture_columns` is None, then `input` must contains only real
valued `Tensor`.
Parameters:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. [64, 32] means first layer has 64 nodes and second one has
32.
feature_columns: An iterable containing all the feature columns used by the
model. All items in the set should be instances of classes derived from
`FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If `None`,
will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not None, the probability we will drop out a given coordinate.
"""
def __init__(self,
hidden_units,
feature_columns=None,
model_dir=None,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None):
super(DNNRegressor, self).__init__(weight_column_name=weight_column_name,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_hidden_units=hidden_units,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout)
def _get_train_ops(self, features, targets):
"""See base class."""
if self._dnn_feature_columns is None:
self._dnn_feature_columns = layers.infer_real_valued_columns(features)
return super(DNNRegressor, self)._get_train_ops(features, targets)
@property
def weights_(self):
return self.dnn_weights_
@property
def bias_(self):
return self.dnn_bias_
# TensorFlowDNNClassifier and TensorFlowDNNRegressor are deprecated.
class TensorFlowDNNClassifier(DeprecatedMixin, DNNClassifier,
_sklearn.ClassifierMixin):
pass
class TensorFlowDNNRegressor(DeprecatedMixin, DNNRegressor,
_sklearn.RegressorMixin):
pass
| [
"[email protected]"
] | |
a4c1c455cf5fb61154b9d3c2c35d0661314913f2 | 9b9a02657812ea0cb47db0ae411196f0e81c5152 | /repoData/sdiehl-sockjs-gevent/allPythonContent.py | 20f871a6a70bd7b62eafabef19118c5e95d31179 | [] | no_license | aCoffeeYin/pyreco | cb42db94a3a5fc134356c9a2a738a063d0898572 | 0ac6653219c2701c13c508c5c4fc9bc3437eea06 | refs/heads/master | 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175,363 | py | __FILENAME__ = conf
# -*- coding: utf-8 -*-
#
# gevent-sockjs documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 12 20:11:57 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'gevent-sockjs'
copyright = u'2012, Stephen Diehl & John Debs'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'dev'
# The full version, including alpha/beta/rc tags.
release = 'dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'gevent-sockjsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'gevent-sockjs.tex', u'gevent-sockjs Documentation',
u'Stephen Diehl \\& John Debs', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gevent-sockjs', u'gevent-sockjs Documentation',
[u'Stephen Diehl & John Debs'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'gevent-sockjs', u'gevent-sockjs Documentation',
u'Stephen Diehl & John Debs', 'gevent-sockjs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
########NEW FILE########
__FILENAME__ = devserver
"""
This module is most like what a user would define in their
application, namely the
- Routes
- Connection Handlers
The one's sketched here are the Echo, Disabled Websockets, and
the Close connection handlers which are used by the protocol test
suite.
"""
import gevent.monkey
# Monkey patching stdlib is not a necessity for all use cases
gevent.monkey.patch_all()
from server import SockJSServer
from router import SockJSRouter, SockJSConnection
# Need to moneky patch the threading module to use greenlets
import werkzeug.serving
class Echo(SockJSConnection):
def on_message(self, message):
self.send(message)
class DisabledWebsocket(SockJSConnection):
disallowed_transports = ('websocket',)
def on_message(self, message):
pass
class Close(SockJSConnection):
disallowed_transports = ()
def on_open(self, session):
self.close()
def on_message(self, message):
pass
router = SockJSRouter({
'echo': Echo,
'close': Close,
'disabled_websocket_echo': DisabledWebsocket,
})
@werkzeug.serving.run_with_reloader
def devel_server():
"""
A local server with code reload. Should only be used for
development.
"""
try:
sockjs = SockJSServer(('localhost',8081), router, trace=True)
sockjs.serve_forever()
except KeyboardInterrupt:
sockjs.kill()
if __name__ == '__main__':
devel_server()
########NEW FILE########
__FILENAME__ = errors
class InvalidJSON(Exception):
pass
class Http404(Exception):
def __init__(self, message=None):
if message:
self.message = message
else:
self.message = "404: Page Not Found"
assert isinstance(self.message, basestring)
def __str__(self):
return self.message
class Http405(Exception):
def __str__(self):
return '405: Method Not Allowed'
class Http500(Exception):
"""
Exception for catching exceptions, also has a slot for a
stack trace string.
"""
def __init__(self, stacktrace=None):
if stacktrace:
self.message = stacktrace
self.stacktrace = stacktrace
else:
self.message = "500: Internal Server Error"
self.stacktrace = None
assert isinstance(self.message, basestring)
def __str__(self):
return self.message
########NEW FILE########
__FILENAME__ = handler
import uuid
import sys
import re
import datetime
import time
import traceback
from Cookie import SimpleCookie
import gevent
from gevent.pywsgi import WSGIHandler
from geventwebsocket.handler import WebSocketHandler
import protocol
from errors import *
class SockJSHandler(WSGIHandler):
"""
Base request handler for all HTTP derivative transports, will
switch over to WSHandler in the case of using Websockets.
The primary purpose of this class it delegate raw response
from the server through the router and handle the low level
HTTP.
"""
# Dynamic URLs, urls serving data
DYNAMIC_FORMAT = re.compile(r"""
^/(?P<route>[^/]+)/ # sockjs route, alphanumeric not empty
(?P<server_id>[^/.]+)/ # load balancer id, alphanumeric not empty, without (.)
(?P<session_id>[^/.]+)/ # session id, alphanumeric not empty, without (.)
(?P<transport>[^/.]+)$ # transport string, (Example: xhr | jsonp ... )
""", re.X)
# Dynamic URLs, urls serving static pages
STATIC_FORMAT = re.compile(r"""
^/(?P<route>[^/]+)(/)? # sockjs route, alphanumeric not empty
(?P<suffix>[^/]+)?$ # url suffix ( Example: / , info, iframe.html )
""", re.X)
RAW_FORMAT = re.compile(r"""
^/(?P<route>[^/]+)/ # sockjs route, alphanumeric not empty
websocket$ # url suffix ( Example: / , info, iframe.html )
""", re.X)
def prep_response(self):
"""
Prepare the default headers.
Calling this will overload any existing headers.
"""
self.time_start = time.time()
self.status = None
self.headers = []
self.headers_sent = False
self.result = None
self.response_use_chunked = False
self.response_length = 0
def raw_headers(self):
"""
Return the available headers as a string, used for low
level socket handeling.
"""
head = []
# Protocol, status line
head.append('%s %s\r\n' % (self.request_version, self.status))
for header in self.response_headers:
head.append('%s: %s\r\n' % header)
head.append('\r\n')
return ''.join(head)
def raw_chunk(self, data):
"""
Return a raw HTTP chunk, hex encoded size.
"""
return "%x\r\n%s\r\n" % (len(data), data)
# Raw write actions
# -----------------
def write_text(self, text):
self.content_type = ("Content-Type", "text/plain; charset=UTF-8")
self.headers += [self.content_type]
self.start_response("200 OK", self.headers)
self.result = [text]
self.process_result()
def write_js(self, text):
self.content_type = ("Content-Type",
"application/javascript; charset=UTF-8")
self.headers += [self.content_type]
self.start_response("200 OK", self.headers)
self.result = [text]
self.process_result()
def write_json(self, json):
self.content_type = ("Content-Type", "application/json; charset=UTF-8")
self.headers += [self.content_type]
self.start_response("200 OK", self.headers)
self.result = [protocol.encode(json)]
self.log_request()
self.process_result()
def write_html(self, html):
content_type = ("Content-Type", "text/html; charset=UTF-8")
self.headers += [content_type]
self.start_response("200 OK", self.headers)
self.result = [html]
self.process_result()
def write_options(self, allowed_methods):
self.headers += [
('Access-Control-Allow-Methods',(', '.join(allowed_methods)))
]
self.enable_caching()
self.enable_cookie()
self.enable_cors()
self.write_nothing()
def write_nothing(self):
self.start_response("204 NO CONTENT", self.headers)
self.result = [None]
self.log_request()
self.process_result()
def greeting(self):
self.write_text('Welcome to SockJS!\n')
def do404(self, message=None, cookie=False):
"""
Do a 404 NOT FOUND, allow for custom messages and the
optional ability to return a cookie on the page.
"""
self.prep_response()
self.content_type = ("Content-Type", "text/plain; charset=UTF-8")
self.headers += [self.content_type]
if cookie:
self.enable_cookie()
self.start_response("404 NOT FOUND", self.headers)
if message:
self.result = [message]
else:
self.result = ['404 Error: Page not found']
self.process_result()
self.wsgi_input._discard()
self.time_finish = time.time()
self.log_request()
def do500(self, stacktrace=None, message=None):
"""
Handle 500 errors, if we're in an exception context then
print the stack trace is SockJSServer has trace=True.
"""
self.prep_response()
if self.server.trace and not message:
# If we get an explicit stack trace use that,
# otherwise grab it from the current frame.
if stacktrace:
pretty_trace = stacktrace
else:
exc_type, exc_value, exc_tb = sys.exc_info()
stack_trace = traceback.format_exception(exc_type, exc_value, exc_tb)
pretty_trace = str('\n'.join(stack_trace))
self.start_response("500 INTERNAL SERVER ERROR", self.headers)
self.result = [pretty_trace]
else:
self.content_type = ("Content-Type", "text/plain; charset=UTF-8")
self.headers += [self.content_type]
self.start_response("500 INTERNAL SERVER ERROR", self.headers)
self.result = [message or '500: Interneal Server Error']
self.process_result()
self.time_finish = time.time()
self.log_request()
# Header Manipulation
# -------------------
def enable_cors(self):
origin = self.environ.get("HTTP_ORIGIN", '*')
self.headers += [
('access-control-allow-origin', origin),
('access-control-allow-credentials', 'true')
]
def enable_nocache(self):
self.headers += [
('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0'),
]
def enable_cookie(self, cookies=None):
"""
Given a list of cookies, add them to the header.
If not then add a dummy JSESSIONID cookie.
"""
if self.environ.get('HTTP_COOKIE'):
cookies = [SimpleCookie(self.environ.get('HTTP_COOKIE'))]
if cookies:
for cookie in cookies:
for morsel in cookie.values():
morsel['path'] = '/'
# TODO: fixme
k, v = cookie.output().split(':')[0:2]
self.headers += [(k,v)]
else:
cookie = SimpleCookie()
cookie['JSESSIONID'] = 'dummy'
cookie['JSESSIONID']['path'] = '/'
k, v = cookie.output().split(':')
self.headers += [(k,v)]
def enable_caching(self):
d = datetime.datetime.now() + datetime.timedelta(days=365)
s = datetime.timedelta(days=365).total_seconds()
self.headers += [
('Cache-Control', 'max-age=%d, public' % s),
('Expires', d.strftime('%a, %d %b %Y %H:%M:%S')),
('access-control-max-age', int(s)),
]
def handle_websocket(self, tokens, raw=False):
handle = WSHandler(
self.socket,
self.client_address,
self.server,
self.rfile,
)
handle.tokens = tokens
handle.raw = raw
handle.__dict__.update(self.__dict__)
return handle.handle_one_response()
def handle_one_response(self):
path = self.environ.get('PATH_INFO')
meth = self.environ.get("REQUEST_METHOD")
self.router = self.server.application
self.session_pool = self.server.session_pool
# Static URLs
# -----------
static_url = self.STATIC_FORMAT.match(path)
dynamic_url = self.DYNAMIC_FORMAT.match(path)
raw_url = self.RAW_FORMAT.match(path)
# The degenerate raw websocket endpoint
if raw_url:
tokens = raw_url.groupdict()
tokens['transport'] = 'rawwebsocket'
# An ad-hoc session
tokens['session'] = uuid.uuid4()
return self.handle_websocket(tokens, raw=True)
elif static_url:
tokens = static_url.groupdict()
route = tokens['route']
suffix = tokens['suffix']
try:
static_serve = self.router.route_static(route, suffix)
raw_request_data = self.wsgi_input.readline()
self.wsgi_input._discard()
self.prep_response()
static_serve(self, meth, raw_request_data)
except Http404 as e:
return self.do404(e.message)
except Http500 as e:
return self.do500(e.stacktrace)
elif dynamic_url:
tokens = dynamic_url.groupdict()
route = tokens['route']
session_uid = tokens['session_id']
server = tokens['server_id']
transport = tokens['transport']
if transport == 'websocket':
return self.handle_websocket(tokens)
try:
# Router determines the downlink route as a
# function of the given url parameters.
downlink = self.router.route_dynamic(
route,
session_uid,
server,
transport
)
# A downlink is some data-dependent connection
# to the client taken as a result of a request.
raw_request_data = self.wsgi_input.readline()
self.prep_response()
threads = downlink(self, meth, raw_request_data)
gevent.joinall(threads)
except Http404 as e:
return self.do404(e.message, cookie=True)
except Http500 as e:
return self.do500(e.stacktrace)
except Exception:
return self.do500()
else:
self.do404()
class WSHandler(WebSocketHandler):
"""
A WSGI-esque handler but the underlying connection is a
websocket instead of a HTTP.
The base SockJS handler will delegate to this in the case of
using any websocket transport, it will then upgrade to the
websocket and throw away any existing HTTP information.
"""
def prep_response(self):
"""
Prepare the default headers.
Calling this will overload any existing headers.
"""
self.time_start = time.time()
self.status = None
self.headers = []
self.headers_sent = False
self.result = None
self.response_use_chunked = False
self.response_length = 0
def bad_request(self):
"""
Sent if we have invaild Connection headers.
"""
self.prep_response()
self.start_response('400 BAD REQUEST', [
("Content-Type", "text/plain; charset=UTF-8")
])
self.result = ['Can "Upgrade" only to "WebSocket".']
self.process_result()
def not_allowed(self):
self.prep_response()
self.start_response('405 NOT ALLOWED', [('allow', True)])
self.result = []
self.process_result()
def handle_one_response(self):
self.pre_start()
environ = self.environ
upgrade = environ.get('HTTP_UPGRADE', '').lower()
meth = self.environ.get('REQUEST_METHOD')
if meth != 'GET':
return self.not_allowed()
# Upgrade the connect if we have the proper headers
if upgrade == 'websocket':
connection = environ.get('HTTP_CONNECTION', '').lower()
if 'upgrade' in connection:
return self._handle_websocket()
# Malformed request
self.bad_request()
def _handle_websocket(self):
"""
Slightly overloaded version of gevent websocket handler,
delegates the connection to the right protocol and then
procedes to invoke the router to figure out what to do.
"""
environ = self.environ
try:
try:
if environ.get("HTTP_SEC_WEBSOCKET_VERSION"):
result = self._handle_hybi()
elif environ.get("HTTP_ORIGIN"):
result = self._handle_hixie()
except:
self.close_connection = True
raise
self.result = []
if not result:
return
self.route(environ, None)
return []
finally:
self.log_request()
def route(self, environ, start_response):
"""
Route the websocket pipe to its transport handler. Logic
is more or less identical to HTTP logic instead of
exposing the WSGI handler we expose the socket.
"""
self.router = self.server.application
websocket = environ.get('wsgi.websocket')
meth = environ.get("REQUEST_METHOD")
# The only mandatory url token
route = self.tokens['route']
session_uid = self.tokens.get('session_id', None)
server = self.tokens.get('server_id', None)
transport = self.tokens.get('transport', None)
# We're no longer dealing with HTTP so throw away
# anything we received.
self.wsgi_input._discard()
downlink = self.router.route_dynamic(
route,
session_uid,
server,
transport
)
#downlink.raw = self.raw
threads = downlink(websocket, None, None)
# This is a neat trick ( due to Jeffrey Gellens ), of
# keeping track of the transporst threads at the handler
# level, this ensures that if this thread is forcefully
# terminated the transports actions will subsequently
# die.
gevent.joinall(threads)
########NEW FILE########
__FILENAME__ = protocol
import hashlib
from errors import *
from simplejson.decoder import JSONDecodeError
# -----------
# Serializer
# -----------
# Fastest
# TODO:
# Should add some caveats about the unicode compatability
# with ujson...
try:
import ujson
has_ujson = True
except ImportError:
has_ujson = False
# Faster
try:
import simplejson
has_simplejson = True
except ImportError:
has_simplejson = False
# Slowest
try:
import json
has_json = True
except ImportError:
# should never happen
has_json = False
def pick_serializer():
if has_ujson:
return ujson
elif has_simplejson:
return simplejson
elif has_json:
return json
json = pick_serializer()
# Frames
# ------
OPEN = "o\n"
CLOSE = "c"
MESSAGE = "a"
HEARTBEAT = "h\n"
# ------------------
IFRAME_HTML = """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<script>
document.domain = document.domain;
_sockjs_onload = function(){SockJS.bootstrap_iframe();};
</script>
<script src="%s"></script>
</head>
<body>
<h2>Don't panic!</h2>
<p>This is a SockJS hidden iframe. It's used for cross domain magic.</p>
</body>
</html>
""".strip()
IFRAME_MD5 = hashlib.md5(IFRAME_HTML).hexdigest()
HTMLFILE_IFRAME_HTML = r"""
<!doctype html>
<html><head>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
</head><body><h2>Don't panic!</h2>
<script>
document.domain = document.domain;
var c = parent.%s;
c.start();
function p(d) {c.message(d);};
window.onload = function() {c.stop();};
</script>
""".strip()
def encode(message):
"""
Python to JSON
"""
# TODO: actually deal with the nuances of escaping and
# unicode
if isinstance(message, basestring):
# Don't both calling json, since its simple
msg = '["' + message + '"]'
elif isinstance(message, (object, dict, list)):
msg = json.dumps(message, separators=(',',':'))
else:
raise ValueError("Unable to serialize: %s", str(message))
return msg
def decode(data):
"""
JSON to Python
"""
messages = []
data = data.decode('utf-8')
# "a['123', 'abc']" -> [123, 'abc']
try:
messages = json.loads(data)
except JSONDecodeError:
raise InvalidJSON()
return messages
def close_frame(code, reason, newline=True):
if newline:
return '%s[%d,"%s"]\n' % (CLOSE, code, reason)
else:
return '%s[%d,"%s"]' % (CLOSE, code, reason)
def message_frame(data):
assert isinstance(data, basestring)
assert '[' in data
assert ']' in data
return ''.join([MESSAGE, data])
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
FRAMES = enum( 'CLOSE', 'OPEN', 'MESSAGE', 'HEARTBEAT' )
########NEW FILE########
__FILENAME__ = router
import re
import transports
import static
from errors import *
# Route Tables
# ============
class RegexRouter(object):
"""
A hybrid hash table, regex matching table.
Tries to do O(1) hash lookup falls back on
worst case O(n) regex matching.
"""
_re = []
_dct = {}
def __init__(self, dct):
for k, v in dct.iteritems():
try:
self._re.append((re.compile(k),v))
except:
pass
self._dct[k] = v
def __getitem__(self, k):
if self._dct.has_key(k):
return self._dct[k]
else:
for r, v in self._re:
if r.match(k):
return v
raise KeyError(k)
static_routes = RegexRouter({
None : static.Greeting,
'info' : static.InfoHandler,
r'iframe[0-9-.a-z_]*.html' : static.IFrameHandler,
})
dynamic_routes = {
# Ajax Tranports
# ==============
'xhr' : transports.XHRPolling,
'xhr_send' : transports.XHRSend,
'xhr_streaming' : transports.XHRStreaming,
'jsonp' : transports.JSONPolling,
'jsonp_send' : transports.JSONPSend,
# WebSockets
# ===============
'websocket' : transports.WebSocket,
'rawwebsocket' : transports.RawWebSocket,
# File Transports
# ===============
'eventsource' : transports.EventSource,
'htmlfile' : transports.HTMLFile,
'iframe' : transports.IFrame,
}
class SockJSConnection(object):
disallowed_transports = tuple()
def __init__(self, session):
self.session = session
@classmethod
def transport_allowed(cls, transport):
return transport not in cls.disallowed_transports
# Event Callbacks
# ===============
def on_open(self, request):
pass
def on_message(self, message):
raise NotImplementedError()
def on_close(self):
pass
def on_error(self, exception):
raise NotImplementedError()
# Server side actions
# ===================
def send(self, message):
if self.session:
self.session.add_message(message)
else:
raise Exception("Tried to send message over closed session")
def broadcast(self, channel, message):
raise NotImplementedError()
def close(self):
if self.session:
self.session.interrupt()
else:
raise Exception("Tried to close closed session")
class SockJSRouter(object):
routes = {}
def __init__(self, applications):
"""
Set up the routing table for the specific routes attached
to this server.
"""
for route, connection in applications.iteritems():
self.routes[route] = connection
def route_static(self, route, suffix):
try:
route_handle = self.routes[route]
except:
raise Http404('No such route')
try:
handle_cls = static_routes[suffix]
except KeyError:
raise Http404('No such static page ' + str(suffix))
return handle_cls(route_handle)
def route_dynamic(self, route, session_uid, server, transport):
"""
Return the downlink transport to the client resulting
from request.
"""
try:
conn_cls = self.routes[route]
except:
raise Http500('No such route')
try:
transport_cls = dynamic_routes[transport]
except:
raise Http500('No such transport')
if transport_cls.direction == 'send':
create_if_null = False
elif transport_cls.direction in ('recv', 'bi'):
create_if_null = True
else:
raise Exception('Could not determine direction')
session = self.server.get_session(session_uid, \
create_if_null)
if not session:
raise Http404()
# Initialize the transport and call, any side-effectful
# code is the __init__ method, the communication is
# invoked by __call__ method.
conn = conn_cls(session)
downlink = transport_cls(session, conn)
if session.is_new:
conn.on_open(session)
session.timeout.rawlink(lambda g: conn.on_close())
return downlink
def __call__(self, environ, start_response):
raise NotImplemented()
########NEW FILE########
__FILENAME__ = server
import session
from handler import SockJSHandler
from sessionpool import SessionPool
from gevent.pywsgi import WSGIServer
class SockJSServer(WSGIServer):
"""
The base SockJS server, subclasses gevent.pywsgi.WSGIServer
"""
session_backend = session.MemorySession
handler_class = SockJSHandler
def __init__(self, *args, **kwargs):
"""
Initialize the SockJS server
Options:
listener : ( address, port )
application : The SockJS router instance
trace : Show stack traces on 500 status code
Example::
sockjs = SockJSServer(('',8081), router)
sockjs.serve_forever()
"""
self.trace = kwargs.pop('trace', False)
super(SockJSServer, self).__init__(*args, **kwargs)
self.session_pool = SessionPool()
self.session_pool.start_gc()
# hack to get the server inside the router
self.application.server = self
def del_session(self, uid):
del self.sessions[uid]
def get_session(self, session_id='', create_if_null=False):
"""
Return an existing or initialize a new session with the
session id passed.
"""
# Is it an existing session?
session = self.session_pool.get(session_id)
# Otherwise let the client choose their session_id, if
# this transport direction allows
if create_if_null and session is None:
session = self.session_backend(self, session_id=session_id)
self.session_pool.add(session)
elif session:
session.incr_hits()
return session
def kill(self):
"""
Shutdown the server, block to inform the sessions that
they are closing.
"""
self.session_pool.shutdown()
super(SockJSServer, self).kill()
########NEW FILE########
__FILENAME__ = session
import uuid
from gevent.queue import Queue, Empty
from gevent.event import Event
from datetime import datetime, timedelta
class Session(object):
"""
Base class for Session objects. Provides for different
backends for queueing messages for sessions.
Subclasses are expected to overload the add_message and
get_messages to reflect their storage system.
"""
# Session's timeout after 5 seconds
expires = timedelta(seconds=5)
def __init__(self, server, session_id=None):
self.expires_at = datetime.now() + self.expires
self.expired = False
self.forever = False
self.session_id = self.generate_uid()
# Whether this was closed explictly by client vs
# internally by garbage collection.
self.interrupted = False
# When a polling request is closed by a network error - not by
# server, the session should be automatically closed. When there
# is a network error - we're in an undefined state. Some messages
# may have been lost, there is not much we can do about it.
self.network_error = False
# Async event, use rawlink to string callbacks
self.timeout = Event()
self.locked = Event()
def generate_uid(self):
"""
Returns a string of the unique identifier of the session.
"""
return str(uuid.uuid4())
def persist(self, extension=None, forever=False):
"""
Bump the time to live of the session by a given amount,
or forever.
"""
self.expired = False
if forever:
self.forever = True
return
# Slide the expiration time one more expiration interval
# into the future
if extension is None:
self.expires_at = datetime.now() + self.expires
else:
self.expires_at = datetime.now() + extension
self.forever = False
def post_delete(self):
pass
def kill(self):
self.killed = True
self.expire()
def expire(self):
"""
Manually expire a session.
"""
self.expired = True
self.forever = False
def incr_hits(self):
self.hits += 1
def is_new(self):
return self.hits == 0
def heartbeat(self):
self.persist()
self.heartbeats += 1
return self.heartbeats
def add_message(self, msg):
raise NotImplemented()
def get_messages(self, **kwargs):
raise NotImplemented()
def is_locked(self):
return self.locked.is_set()
def is_network_error(self):
return self.network_error
def is_expired(self):
return self.expired
def is_interrupted(self):
return self.interrupted
def lock(self):
self.locked.set()
def unlock(self):
self.locked.clear()
def __str__(self):
pass
class MemorySession(Session):
"""
In memory session with a outgoing gevent Queue as the message
store.
"""
def __init__(self, server, session_id=None):
super(MemorySession, self).__init__(server, session_id=session_id)
self.session_id = session_id or str(uuid.uuid4())[:8]
self.server = server
self.queue = Queue()
self.hits = 0
self.heartbeats = 0
self.connected = False
def add_message(self, msg):
self.queue.put_nowait(msg)
def get_messages(self, **kwargs):
timeout = kwargs.get('timeout', None)
self.incr_hits()
if self.queue.empty():
try:
return self.queue.get(**kwargs)
except Empty:
return []
else:
accum = []
try:
while not self.queue.empty():
if timeout:
accum.append(self.queue.get(timeout=timeout))
else:
accum.append(self.queue.get_nowait())
finally:
return accum
def interrupt(self):
"""
A kill event trigged through a client accessible endpoint
Internal expires will not have is_interupted() == True
"""
self.interrupted = True
self.kill()
def kill(self):
self.connected = False
# Expire only once
if not self.expired:
self.expired = True
self.timeout.set()
########NEW FILE########
__FILENAME__ = sessionpool
import uuid
import gevent
from heapq import heappush, heappop
from datetime import datetime
class SessionPool(object):
"""
A garbage collected Session Pool.
See: https://github.com/sdiehl/greengoop
"""
gc_cycle = 10.0
def __init__(self):
self.sessions = dict()
self.pool = []
self.gcthread = gevent.Greenlet(self._gc_sessions)
def __str__(self):
return str(self.sessions.items())
def start_gc(self):
"""
Start the session pool garbage collector. This is broken
out into a seperate function to give you more granular
control on the context this thread is spawned in.
"""
if not self.gcthread.started:
self.gcthread.start()
return self.gcthread
else:
print "Rejected attempt to start multiple garbage \
collectors on SessionPool instance."
def _gc_sessions(self):
while True:
gevent.sleep(self.gc_cycle)
self.gc()
def add(self, session):
session.cycle = None
self.sessions[session.session_id] = session
if not session.expired:
heappush(self.pool, session)
def get(self, session_id):
"""
Get active sessions by their session id.
"""
return self.sessions.get(session_id, None)
def remove(self, session_id):
session = self.sessions.get(session_id, None)
if session:
session.post_delete()
del self.sessions[session_id]
def shutdown(self):
"""
Manually expire all sessions in the pool.
"""
while self.pool:
head = heappop(self.pool)
head.expired = True
head.timeout.set()
def __del__(self):
"""
On Python interpreter garbage collection expire all sessions, not
guaranteed to run!
"""
self.shutdown()
def gc(self):
"""
Rearrange the heap flagging active sessions with the id
of this collection iteration. This data-structure is
time-independent so we sessions can be added to and from
without the need to lock the pool.
"""
if len(self.pool) == 0:
return
current_time = datetime.now()
while self.pool:
head = self.pool[0]
# Every session is fresh
if head.cycle == current_time or head.expires_at > current_time:
break
head = heappop(self.pool)
# Flag the session with the id of this GC cycle
head.cycle = current_time
# Session is to be GC'd immedietely
if head.expired:
del self.sessions[head.session_id]
head.post_delete()
continue
if not head.forever and head.expires_at < current_time:
del self.sessions[head.session_id]
head.post_delete()
else:
heappush(self.pool, head)
########NEW FILE########
__FILENAME__ = static
import random
import protocol
from errors import *
class Greeting():
def __init__(self, conn_cls):
self.conn_cls = conn_cls
def __call__(self, handler, request_method, raw_request_data):
handler.greeting()
class InfoHandler():
def __init__(self, conn_cls):
self.conn_cls = conn_cls
def __call__(self, handler, request_method, raw_request_data):
if request_method == 'GET':
entropy = random.randint(1, 2**32)
has_ws = self.conn_cls.transport_allowed('websocket')
handler.enable_nocache()
handler.enable_cors()
handler.write_json({
'cookie_needed' : True,
'websocket' : has_ws,
'origins' : ['*:*'],
'entropy' : entropy,
'route' : self.conn_cls.__name__
})
elif request_method == 'OPTIONS':
handler.write_options(['OPTIONS','GET'])
class IFrameHandler():
def __init__(self, route):
self.route = route
def __call__(self, handler, request_method, raw_request_data):
if request_method != 'GET':
raise Http405()
cached = handler.environ.get('HTTP_IF_NONE_MATCH')
# TODO: check this is equal to our MD5
if cached:
handler.start_response("304 NOT MODIFIED", handler.headers)
handler.enable_caching()
handler.result = [None]
handler.process_result()
return
handler.headers += [
('ETag', protocol.IFRAME_MD5),
]
# TODO: actually put this in here
html = protocol.IFRAME_HTML % ('http',)
handler.enable_caching()
handler.write_html(html)
########NEW FILE########
__FILENAME__ = transports
import socket
import gevent
import urllib2
import urlparse
import simplejson as json
from socket import error as socketerror
import protocol
from errors import *
from geventwebsocket.websocket import WebSocketError
class BaseTransport(object):
def __init__(self, session, conn):
self.session = session
self.conn = conn
def encode(self, data):
"""
Wrapper around the protocol's frame encoding.
"""
return protocol.encode(data)
def decode(self, data):
"""
Wrapper around the protocol's frame decoding.
"""
return protocol.decode(data)
def write_frame(self, data):
"""
Write the data in a frame specifically for this
transport. Deals with the edge cases of formatting the
messages for the transports. Things like \n characters
and Javascript callback frames.
"""
raise NotImplemented()
def __call__(self, handler, request_method, raw_request_data):
"""
Downlink function, action taken as a result of the
specified route.
"""
raise NotImplemented()
# Receiving Transports
# ====================
#
# Recieve messages from the client, provide them to the session
# object and its callbacks, provide confirmation of any actions
# taken per protocol.
class XHRSend(BaseTransport):
direction = 'send'
def __call__(self, handler, request_method, raw_request_data):
if request_method == 'OPTIONS':
handler.write_options(['OPTIONS', 'POST'])
return []
if raw_request_data == '':
handler.do500(message='Payload expected.')
return
try:
messages = self.decode(raw_request_data)
except InvalidJSON:
handler.do500(message='Broken JSON encoding.')
return
for msg in messages:
self.conn.on_message(msg)
handler.content_type = ("Content-Type", "text/plain; charset=UTF-8")
handler.headers = [handler.content_type]
handler.enable_cookie()
handler.enable_cors()
handler.write_nothing()
return []
class JSONPSend(BaseTransport):
direction = 'recv'
def __call__(self, handler, request_method, raw_request_data):
if request_method == 'OPTIONS':
handler.write_options(['OPTIONS', 'POST'])
return []
qs = urlparse.parse_qs(raw_request_data)
using_formdata = True
# Do we have a Payload?
try:
if qs.has_key('d'):
using_formdata = True
payload = qs['d']
else:
using_formdata = False
payload = raw_request_data
# todo: more granular exception catching
except Exception as e:
handler.do500(message='Payload expected.')
return
# Confirm that this at least looks like a JSON array
if not using_formdata:
if not ('[' in payload and ']' in payload):
handler.do500(message='Payload expected.')
return
try:
if using_formdata:
messages = self.decode(payload[0])
else:
messages = self.decode(payload)
except InvalidJSON:
handler.do500(message='Broken JSON encoding.')
for msg in messages:
self.conn.on_message(msg)
handler.content_type = ("Content-Type", "text/plain; charset=UTF-8")
handler.enable_cookie()
handler.enable_nocache()
handler.write_text('ok')
return []
class PollingTransport(BaseTransport):
"""
Long polling derivative transports, used for XHRPolling and
JSONPolling.
Subclasses overload the write_frame method for their
respective serialization methods.
"""
direction = 'recv'
TIMING = 5.0
def poll(self, handler):
"""
Spin lock the thread until we have a message on the
gevent queue.
"""
messages = self.session.get_messages(timeout=self.TIMING)
messages = self.encode(messages)
self.session.unlock()
handler.start_response("200 OK", [
("Access-Control-Allow-Origin", "*"),
("Connection", "close"),
self.content_type,
])
handler.write_text(self.write_frame(messages))
def __call__(self, handler, request_method, raw_request_data):
"""
On the first poll, send back the open frame, one
subsequent calls actually poll the queue.
"""
if request_method == 'OPTIONS':
handler.write_options(['OPTIONS', 'POST'])
return []
if self.session.is_new():
handler.enable_cookie()
handler.enable_cors()
handler.write_js(protocol.OPEN)
return []
elif self.session.is_network_error():
interrupt_error = protocol.close_frame(1002, "Connection interrupted")
handler.write_text(interrupt_error)
return []
elif self.session.is_expired():
close_error = protocol.close_frame(3000, "Go away!")
handler.write_text(close_error)
return []
elif self.session.is_locked():
lock_error = protocol.close_frame(2010, "Another connection still open")
self.session.network_error = True
handler.write_text(lock_error)
return []
else:
self.session.lock()
return [gevent.spawn(self.poll, handler)]
def write_frame(self, data):
raise NotImplemented()
# Polling Transports
# ==================
#
# Poll for new messages on the server.
class XHRPolling(PollingTransport):
direction = 'recv'
TIMING = 2
content_type = ("Content-Type", "text/html; charset=UTF-8")
def write_frame(self, data):
return protocol.message_frame(data) + '\n'
class JSONPolling(PollingTransport):
direction = 'recv'
content_type = ("Content-Type", "text/plain; charset=UTF-8")
def write_frame(self, data):
frame = protocol.json.dumps(protocol.message_frame(data))
return """%s(%s);\r\n""" % ( self.callback, frame)
def __call__(self, handler, request_method, raw_request_data):
try:
callback_param = handler.environ.get("QUERY_STRING").split('=')[1]
self.callback = urllib2.unquote(callback_param)
except IndexError:
handler.do500(message='"callback" parameter required')
return
if request_method == 'OPTIONS':
handler.write_options(['OPTIONS', 'POST'])
return []
if self.session.is_new():
handler.enable_nocache()
handler.enable_cookie()
handler.enable_cors()
open_frame = '%s("o");\r\n' % self.callback
handler.write_js(open_frame)
return []
elif self.session.is_expired():
close_error = protocol.close_frame(3000, "Go away!")
handler.write_text(close_error)
return []
elif self.session.is_locked():
lock_error = protocol.close_frame(2010, "Another connection still open")
handler.write_text(lock_error)
return []
else:
self.session.lock()
return [gevent.spawn(self.poll, handler)]
class XHRStreaming(PollingTransport):
direction = 'recv'
TIMING = 2
# THIS NUMBER MAY NOT BE RIGHT. DEEP MAGIC.
response_limit = 4224
prelude = 'h' * 2048 + '\n'
content_type = ("Content-Type", "application/javascript; charset=UTF-8")
def write_prelude(self, handler):
handler.enable_cookie()
handler.enable_cors()
# https://groups.google.com/forum/#!msg/sockjs/bl3af2zqc0A/w-o3OK3LKi8J
if handler.request_version == 'HTTP/1.1':
handler.headers += [
self.content_type,
("Transfer-Encoding", "chunked"),
('Connection', 'keep-alive'),
]
elif handler.request_version == 'HTTP/1.0':
handler.headers += [
self.content_type,
('Connection', 'close'),
]
# Use very low level api here, since we want more granular
# control over our response
handler.start_response("200 OK", handler.headers)
headers = handler.raw_headers()
try:
writer = handler.socket.makefile()
written = 0
writer.write(headers)
writer.flush()
prelude_chunk = handler.raw_chunk(self.prelude)
writer.write(prelude_chunk)
writer.flush()
except socket.error:
self.session.expire()
return (writer, written)
def stream(self, handler):
writer, written = self.write_prelude(handler)
try:
open_chunk = handler.raw_chunk('o\n')
writer.write(open_chunk)
writer.flush()
while written < self.response_limit:
messages = self.session.get_messages(timeout=self.TIMING)
messages = self.encode(messages)
frame = protocol.message_frame(messages) + '\n'
chunk = handler.raw_chunk(frame)
writer.write(chunk)
writer.flush()
written += len(chunk)
except socket.error:
self.session.expire()
zero_chunk = handler.raw_chunk('')
writer.write(zero_chunk)
self.session.unlock()
def __call__(self, handler, request_method, raw_request_data):
"""
"""
if request_method == 'OPTIONS':
handler.write_options(['OPTIONS', 'POST'])
return []
elif self.session.is_network_error():
writer, written = self.write_prelude(handler)
try:
interrupt_error = protocol.close_frame(1002, "Connection interrupted")
interrupt_error_chunk = handler.raw_chunk(interrupt_error)
writer.write(interrupt_error_chunk)
writer.flush()
except socket.error:
self.session.expire()
zero_chunk = handler.raw_chunk('')
writer.write(zero_chunk)
self.session.network_error = True
return []
elif self.session.is_locked():
writer, written = self.write_prelude(handler)
try:
close_error = protocol.close_frame(2010, "Another connection still open")
close_error_chunk = handler.raw_chunk(close_error)
writer.write(close_error_chunk)
writer.flush()
except socket.error:
self.session.expire()
zero_chunk = handler.raw_chunk('')
writer.write(zero_chunk)
self.session.network_error = True
return []
self.session.lock()
return [
gevent.spawn(self.stream, handler),
]
def pad(s):
return s + ' ' * (1024 - len(s) + 14)
class HTMLFile(BaseTransport):
direction = 'recv'
response_limit = 4096
def write_frame(self, data):
pass
def stream(self, handler):
try:
callback_param = handler.environ.get("QUERY_STRING").split('=')[1]
self.callback = urllib2.unquote(callback_param)
except IndexError:
handler.do500(message='"callback" parameter required')
return
# Turn on cookie, turn off caching, set headers
handler.enable_cookie()
handler.enable_nocache()
handler.headers += [
("Content-Type", "text/html; charset=UTF-8"),
("Transfer-Encoding", "chunked"),
('Connection', 'keep-alive'),
]
# Start writing
handler.start_response("200 OK", handler.headers)
headers = handler.raw_headers()
writer = handler.socket.makefile()
writer.write(headers)
written = 0
# Send down HTMLFile IFRAME
html = protocol.HTMLFILE_IFRAME_HTML % self.callback
html = pad(html)
chunk = handler.raw_chunk(html)
writer.write(chunk)
writer.flush()
written += len(chunk)
chunk = '<script>\np("o");\n</script>\r\n'
chunk = handler.raw_chunk(chunk)
writer.write(chunk)
writer.flush()
written += len(chunk)
try:
while written < self.response_limit:
messages = self.session.get_messages(timeout=5)
messages = self.encode(messages)
frame = protocol.message_frame(messages)
frame = json.dumps(frame)
chunk = '<script>\np(%s);\n</script>\r\n' % frame
chunk = handler.raw_chunk(chunk)
writer.write(chunk)
writer.flush()
written += len(chunk)
except socket.error:
self.session.expire()
zero_chunk = handler.raw_chunk('')
writer.write(zero_chunk)
writer.close()
def __call__(self, handler, request_method, raw_request_data):
return [
gevent.spawn(self.stream, handler),
]
class IFrame(BaseTransport):
direction = 'recv'
class EventSource(BaseTransport):
direction = 'recv'
TIMING = 5.0
response_limit = 4096
def encode(self, data):
# TODO: Not using protocol.encode because it doesn't escape
# things properly here. The other version should be fixed at
# some point to avoid duplication.
data = json.dumps(data, separators=(',', ':'))
if isinstance(data, basestring):
# Don't both calling json, since its simple
data = '[' + data + ']'
elif isinstance(data, (object, dict, list)):
data = json.dumps(data, separators=(',',':'))
else:
raise ValueError("Unable to serialize: %s", str(data))
return protocol.message_frame(data)
def stream(self, handler):
handler.enable_cookie()
handler.enable_nocache()
handler.headers += [
("Content-Type", "text/event-stream; charset=UTF-8"),
]
write = handler.start_response("200 OK", handler.headers)
write("\r\n")
if self.session.is_new():
write("data: o\r\n\r\n")
written = 0
while written < self.response_limit:
messages = self.session.get_messages(timeout=self.TIMING)
if messages:
messages = self.encode(messages)
else:
messages = protocol.HEARTBEAT
messages = "data: %s\r\n\r\n" % messages
write(messages)
written += len(messages)
writer = handler.socket.makefile()
zero_chunk = handler.raw_chunk('')
writer.write(zero_chunk)
def __call__(self, handler, request_method, raw_request_data):
return [
gevent.spawn(self.stream, handler),
]
# Socket Transports
# ==================
#
# Provides a bidirectional connection to and from the client.
# Sending and receiving are split in two different threads.
class WebSocket(BaseTransport):
direction = 'bi'
def poll(self, socket):
"""
Spin lock the thread until we have a message on the
gevent queue.
"""
while not self.session.expired:
messages = self.session.get_messages()
messages = self.encode(messages)
socket.send(protocol.message_frame(messages))
close_error = protocol.close_frame(3000, "Go away!", newline=False)
socket.send(close_error)
# Session expires, so unlock
socket.close()
self.session.unlock()
def put(self, socket):
wsprotocol = socket.protocol
while not self.session.is_expired():
try:
messages = socket.receive() # blocking
# geventwebsocket doesn't wrap these failure modes
# into nice exceptions so we have to catch base Python
# Exceptions. :(
# Ignore invalid frames
except ValueError:
continue
except TypeError:
continue
# Ignore empty frames
except WebSocketError:
continue
# If the peer closes early then a fobj.read attribute
# won't exist so ignore.
except AttributeError:
break
#except socketerror:
#break
# Hybi = Closed
# Hixie = None
if messages is None:
break
try:
messages = protocol.decode(messages)
except InvalidJSON:
# When user sends broken data - broken JSON for example, the
# server must terminate the ws connection.
break
for msg in messages:
self.conn.on_message(msg)
self.session.incr_hits()
# Session expires, so unlock
socket.close()
self.session.unlock()
self.session.expire()
def __call__(self, socket, request_method, raw_request_data):
socket.send('o')
if self.session.is_expired():
close_error = protocol.close_frame(3000, "Go away!", newline=False)
socket.send(close_error)
socket.close()
return []
#elif self.session.is_locked():
#lock_error = protocol.close_frame(2010, "Another connection still open")
#socket.send(lock_error)
#socket.close()
#return []
self.session.lock()
return [
gevent.spawn(self.poll, socket),
gevent.spawn(self.put, socket),
]
class RawWebSocket(BaseTransport):
direction = 'bi'
def poll(self, socket):
while not self.session.is_expired():
messages = self.session.get_messages()
for message in messages:
# TODO: this is a hack because the rest of the
# transports actually use framing and this is the
# one abberation. But it works...
if len(message) == 1:
socket.send(message[0])
else:
socket.send(message)
socket.close()
def put(self, socket):
while not self.session.is_expired():
# Just read atomic strings and do what the connection
# wants.
message = socket.receive() # blocking
if message is None:
break
self.conn.on_message([message])
self.session.incr_hits()
socket.close()
def __call__(self, socket, request_method, raw_request_data):
if self.session.is_expired():
socket.close()
return []
return [
gevent.spawn(self.poll, socket),
gevent.spawn(self.put, socket),
]
########NEW FILE########
__FILENAME__ = httplib_fork
"""HTTP/1.1 client library
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|
| response = getresponse()
v
Unread-response [Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
from array import array
import os
import socket
from sys import py3kwarning
from urlparse import urlsplit
import warnings
with warnings.catch_warnings():
if py3kwarning:
warnings.filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["HTTP", "HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "error", "responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# status codes
# informational
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# successful
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
# redirection
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
# client error
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
# server error
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
# Mapping status codes to official W3C names
responses = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
}
# maximal amount of data to read at one time in _safe_read
MAXAMOUNT = 1048576
# maximal line length when calling readline().
_MAXLINE = 65536
class HTTPMessage(mimetools.Message):
def addheader(self, key, value):
"""Add header for field key handling repeats."""
prev = self.dict.get(key)
if prev is None:
self.dict[key] = value
else:
combined = ", ".join((prev, value))
self.dict[key] = combined
def addcontinue(self, key, more):
"""Add more field data from a continuation line."""
prev = self.dict[key]
self.dict[key] = prev + "\n " + more
def readheaders(self):
"""Read header lines.
Read header lines up to the entirely blank line that terminates them.
The (normally blank) line that ends the headers is skipped, but not
included in the returned list. If a non-header line ends the headers,
(which is an error), an attempt is made to backspace over it; it is
never included in the returned list.
The variable self.status is set to the empty string if all went well,
otherwise it is an error message. The variable self.headers is a
completely uninterpreted list of lines contained in the header (so
printing them will reproduce the header exactly as it appears in the
file).
If multiple header fields with the same name occur, they are combined
according to the rules in RFC 2616 sec 4.2:
Appending each subsequent field-value to the first, each separated
by a comma. The order in which header fields with the same field-name
are received is significant to the interpretation of the combined
field value.
"""
# XXX The implementation overrides the readheaders() method of
# rfc822.Message. The base class design isn't amenable to
# customized behavior here so the method here is a copy of the
# base class code with a few small changes.
self.dict = {}
self.unixfrom = ''
self.headers = hlist = []
self.status = ''
headerseen = ""
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while True:
if tell:
try:
startofline = tell()
except IOError:
startofline = tell = None
self.seekable = 0
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
self.status = 'EOF in headers'
break
# Skip unix From name time lines
if firstline and line.startswith('From '):
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
# XXX Not sure if continuation lines are handled properly
# for http and/or for repeating headers
# It's a continuation line.
hlist.append(line)
self.addcontinue(headerseen, line.strip())
continue
elif self.iscomment(line):
# It's a comment. Ignore it.
continue
elif self.islast(line):
# Note! No pushback here! The delimiter line gets eaten.
break
headerseen = self.isheader(line)
if headerseen:
# It's a legal header line, save it.
hlist.append(line)
self.addheader(headerseen, line[len(headerseen)+1:].strip())
continue
else:
# It's not a header line; throw it back and stop here.
if not self.dict:
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
# Try to undo the read.
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = self.status + '; bad seek'
break
class HTTPResponse:
# strict: If true, raise BadStatusLine if the status line can't be
# parsed as a valid HTTP/1.0 or 1.1 status line. By default it is
# false because it prevents clients from talking to HTTP/0.9
# servers. Note that a response with a sufficiently corrupted
# status line will look like an HTTP/0.9 response.
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
def __init__(self, sock, debuglevel=0, strict=0, method=None, buffering=False):
if buffering:
# The caller won't be using any sock.recv() calls, so buffering
# is fine and recommended for performance.
self.fp = sock.makefile('rb')
else:
# The buffer size is specified as zero, because the headers of
# the response are read with readline(). If the reads were
# buffered the readline() calls could consume some of the
# response, which make be read via a recv() on the underlying
# socket.
self.fp = sock.makefile('rb', 0)
self.debuglevel = debuglevel
self.strict = strict
self._method = method
self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
# Initialize with Simple-Response defaults
line = self.fp.readline()
if self.debuglevel > 0:
print "reply:", repr(line)
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise BadStatusLine(line)
try:
[version, status, reason] = line.split(None, 2)
except ValueError:
try:
[version, status] = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail and status
# will be treated as 0.9 response.
version = ""
if not version.startswith('HTTP/'):
if self.strict:
self.close()
raise BadStatusLine(line)
else:
# assume it's a Simple-Response from an 0.9 server
self.fp = LineAndFileWrapper(line, self.fp)
return "HTTP/0.9", 200, ""
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.msg is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong("header line")
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print "header:", skip
self.status = status
self.reason = reason.strip()
if version == 'HTTP/1.0':
self.version = 10
elif version.startswith('HTTP/1.'):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
elif version == 'HTTP/0.9':
self.version = 9
else:
raise UnknownProtocol(version)
if self.version == 9:
self.length = None
self.chunked = 0
self.will_close = 1
self.msg = HTTPMessage(StringIO())
return
self.msg = HTTPMessage(self.fp, 0)
if self.debuglevel > 0:
for hdr in self.msg.headers:
print "header:", hdr,
# don't let the msg keep an fp
self.msg.fp = None
# are we using the chunked-style of transfer encoding?
tr_enc = self.msg.getheader('transfer-encoding')
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = 1
self.chunk_left = None
else:
self.chunked = 0
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
length = self.msg.getheader('content-length')
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == 'HEAD'):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if not self.will_close and \
not self.chunked and \
self.length is None:
self.will_close = 1
def _check_close(self):
conn = self.msg.getheader('connection')
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
conn = self.msg.getheader('connection')
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.msg.getheader('keep-alive'):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.msg.getheader('proxy-connection')
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def close(self):
if self.fp:
self.fp.close()
self.fp = None
def isclosed(self):
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
# XXX It would be nice to have readline and __iter__ for this, too.
def read(self, amt=None):
if self.fp is None:
return ''
if self._method == 'HEAD':
self.close()
return ''
if self.chunked:
return self._read_chunked(amt)
if amt is None:
# unbounded read
if self.length is None:
s = self.fp.read()
else:
s = self._safe_read(self.length)
self.length = 0
self.close() # we read everything
return s
if self.length is not None:
if amt > self.length:
# clip the read to the "end of response"
amt = self.length
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
s = self.fp.read(amt)
if self.length is not None:
self.length -= len(s)
if not self.length:
self.close()
return s
def _read_chunked(self, amt):
assert self.chunked != _UNKNOWN
chunk_left = self.chunk_left
value = []
while True:
if chunk_left is None:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("chunk size")
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
chunk_left = int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self.close()
raise IncompleteRead(''.join(value))
if chunk_left == 0:
break
if amt is None:
value.append(self._safe_read(chunk_left))
elif amt < chunk_left:
value.append(self._safe_read(amt))
self.chunk_left = chunk_left - amt
return ''.join(value)
elif amt == chunk_left:
value.append(self._safe_read(amt))
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return ''.join(value)
else:
value.append(self._safe_read(chunk_left))
amt -= chunk_left
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
return ''.join(value)
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("trailer line")
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line == '\r\n':
break
# we read everything; close the "file"
self.close()
return ''.join(value)
def _safe_read(self, amt):
"""Read the number of bytes requested, compensating for partial reads.
Normally, we have a blocking socket, but a read() can be interrupted
by a signal (resulting in a partial read).
Note that we cannot distinguish between EOF and an interrupt when zero
bytes have been read. IncompleteRead() will be raised in this
situation.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
# NOTE(gps): As of svn r74426 socket._fileobject.read(x) will never
# return less than x bytes unless EOF is encountered. It now handles
# signal interruptions (socket.error EINTR) internally. This code
# never caught that exception anyways. It seems largely pointless.
# self.fp.read(amt) will work fine.
s = []
while amt > 0:
chunk = self.fp.read(min(amt, MAXAMOUNT))
if not chunk:
raise IncompleteRead(''.join(s), amt)
s.append(chunk)
amt -= len(chunk)
return ''.join(s)
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
if self.msg is None:
raise ResponseNotReady()
return self.msg.getheader(name, default)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise ResponseNotReady()
return self.msg.items()
class HTTPConnection:
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
strict = 0
def __init__(self, host, port=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
self.timeout = timeout
self.source_address = source_address
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
self._set_hostport(host, port)
if strict is not None:
self.strict = strict
def set_tunnel(self, host, port=None, headers=None):
""" Sets up the host and the port for the HTTP CONNECT Tunnelling.
The headers argument should be a mapping of extra HTTP headers
to send with the CONNECT request.
"""
self._tunnel_host = host
self._tunnel_port = port
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def _set_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
self.host = host
self.port = port
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
self._set_hostport(self._tunnel_host, self._tunnel_port)
self.send("CONNECT %s:%d HTTP/1.0\r\n" % (self.host, self.port))
for header, value in self._tunnel_headers.iteritems():
self.send("%s: %s\r\n" % (header, value))
self.send("\r\n")
response = self.response_class(self.sock, strict = self.strict,
method = self._method)
(version, code, message) = response._read_status()
if code != 200:
self.close()
raise socket.error("Tunnel connection failed: %d %s" % (code,
message.strip()))
while True:
line = response.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if line == '\r\n': break
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = socket.create_connection((self.host,self.port),
self.timeout)
if self._tunnel_host:
self._tunnel()
def close(self):
"""Close the connection to the HTTP server."""
if self.sock:
self.sock.close() # close it manually... there may be other refs
self.sock = None
if self.__response:
self.__response.close()
self.__response = None
self.__state = _CS_IDLE
def send(self, data):
"""Send `data' to the server."""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print "send:", repr(data)
blocksize = 8192
if hasattr(data,'read') and not isinstance(data, array):
if self.debuglevel > 0: print "sendIng a read()able"
datablock = data.read(blocksize)
while datablock:
self.sock.sendall(datablock)
datablock = data.read(blocksize)
else:
self.sock.sendall(data)
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _send_output(self, message_body=None):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
A message_body may be specified, to be appended to the request.
"""
self._buffer.extend(("", ""))
msg = "\r\n".join(self._buffer)
del self._buffer[:]
# If msg and message_body are sent in a single send() call,
# it will avoid performance problems caused by the interaction
# between delayed ack and the Nagle algorithm.
if isinstance(message_body, str):
msg += message_body
message_body = None
self.send(msg)
if message_body is not None:
#message_body was not a string (i.e. it is a file) and
#we must run the risk of Nagle
self.send(message_body)
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest()
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
hdr = '%s %s %s' % (method, url, self._http_vsn_str)
self._output(hdr)
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
try:
host_enc = self.host.encode("ascii")
except UnicodeEncodeError:
host_enc = self.host.encode("idna")
# Wrap the IPv6 Host Header with [] (RFC 2732)
if host_enc.find(':') >= 0:
host_enc = "[" + host_enc + "]"
if self.port == self.default_port:
self.putheader('Host', host_enc)
else:
self.putheader('Host', "%s:%s" % (host_enc, self.port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
hdr = '%s: %s' % (header, '\r\n\t'.join([str(v) for v in values]))
self._output(hdr)
def endheaders(self, message_body=None):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional
message_body argument can be used to pass message body
associated with the request. The message body will be sent in
the same packet as the message headers if possible. The
message_body should be a string.
"""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body)
def request(self, method, url, body=None, headers={}):
"""Send a complete request to the server."""
self._send_request(method, url, body, headers)
def _set_content_length(self, body):
# Set the content-length based on the body.
thelen = None
try:
thelen = str(len(body))
except TypeError, te:
# If this is a file-like object, try to
# fstat its file descriptor
try:
thelen = str(os.fstat(body.fileno()).st_size)
except (AttributeError, OSError):
# Don't send a length if this failed
if self.debuglevel > 0: print "Cannot stat!!"
if thelen is not None:
self.putheader('Content-Length', thelen)
def _send_request(self, method, url, body, headers):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = dict.fromkeys([k.lower() for k in headers])
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
if body and ('content-length' not in header_names):
self._set_content_length(body)
for hdr, value in headers.iteritems():
self.putheader(hdr, value)
self.endheaders(body)
def getresponse(self, buffering=False):
"Get the response from the server."
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
#
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady()
args = (self.sock,)
kwds = {"strict":self.strict, "method":self._method}
if self.debuglevel > 0:
args += (self.debuglevel,)
if buffering:
#only add this keyword if non-default, for compatibility with
#other response_classes.
kwds["buffering"] = True;
response = self.response_class(*args, **kwds)
response.begin()
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
class HTTP:
"Compatibility class with httplib.py from 1.5."
_http_vsn = 10
_http_vsn_str = 'HTTP/1.0'
debuglevel = 0
_connection_class = HTTPConnection
def __init__(self, host='', port=None, strict=None):
"Provide a default host, since the superclass requires one."
# some joker passed 0 explicitly, meaning default port
if port == 0:
port = None
# Note that we may pass an empty string as the host; this will throw
# an error when we attempt to connect. Presumably, the client code
# will call connect before then, with a proper host.
self._setup(self._connection_class(host, port, strict))
def _setup(self, conn):
self._conn = conn
# set up delegation to flesh out interface
self.send = conn.send
self.putrequest = conn.putrequest
self.putheader = conn.putheader
self.endheaders = conn.endheaders
self.set_debuglevel = conn.set_debuglevel
conn._http_vsn = self._http_vsn
conn._http_vsn_str = self._http_vsn_str
self.file = None
def connect(self, host=None, port=None):
"Accept arguments to set the host/port, since the superclass doesn't."
if host is not None:
self._conn._set_hostport(host, port)
self._conn.connect()
def getfile(self):
"Provide a getfile, since the superclass' does not use this concept."
return self.file
def getreply(self, buffering=False):
"""Compat definition since superclass does not define it.
Returns a tuple consisting of:
- server status code (e.g. '200' if all goes well)
- server "reason" corresponding to status code
- any RFC822 headers in the response from the server
"""
try:
if not buffering:
response = self._conn.getresponse()
else:
#only add this keyword if non-default for compatibility
#with other connection classes
response = self._conn.getresponse(buffering)
except BadStatusLine, e:
### hmm. if getresponse() ever closes the socket on a bad request,
### then we are going to have problems with self.sock
### should we keep this behavior? do people use it?
# keep the socket open (as a file), and return it
self.file = self._conn.sock.makefile('rb', 0)
# close our socket -- we want to restart after any protocol error
self.close()
self.headers = None
return -1, e.line, None
self.headers = response.msg
self.file = response.fp
return response.status, response.reason, response.msg
def close(self):
self._conn.close()
# note that self.file == response.fp, which gets closed by the
# superclass. just clear the object ref here.
### hmm. messy. if status==-1, then self.file is owned by us.
### well... we aren't explicitly closing, but losing this ref will
### do it
self.file = None
try:
import ssl
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
HTTPConnection.__init__(self, host, port, strict, timeout,
source_address)
self.key_file = key_file
self.cert_file = cert_file
def connect(self):
"Connect to a host on a given (SSL) port."
sock = socket.create_connection((self.host, self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)
__all__.append("HTTPSConnection")
class HTTPS(HTTP):
"""Compatibility with 1.5 httplib interface
Python 1.5.2 did not have an HTTPS class, but it defined an
interface for sending http requests that is also useful for
https.
"""
_connection_class = HTTPSConnection
def __init__(self, host='', port=None, key_file=None, cert_file=None,
strict=None):
# provide a default host, pass the X509 cert info
# urf. compensate for bad input.
if port == 0:
port = None
self._setup(self._connection_class(host, port, key_file,
cert_file, strict))
# we never actually use these for anything, but we keep them
# here for compatibility with post-1.5.2 CVS.
self.key_file = key_file
self.cert_file = cert_file
def FakeSocket (sock, sslobj):
warnings.warn("FakeSocket is deprecated, and won't be in 3.x. " +
"Use the result of ssl.wrap_socket() directly instead.",
DeprecationWarning, stacklevel=2)
return sslobj
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
# for backwards compatibility
error = HTTPException
class LineAndFileWrapper:
"""A limited file-like object for HTTP/0.9 responses."""
# The status-line parsing code calls readline(), which normally
# get the HTTP status line. For a 0.9 response, however, this is
# actually the first line of the body! Clients need to get a
# readable file object that contains that line.
def __init__(self, line, file):
self._line = line
self._file = file
self._line_consumed = 0
self._line_offset = 0
self._line_left = len(line)
def __getattr__(self, attr):
return getattr(self._file, attr)
def _done(self):
# called when the last byte is read from the line. After the
# call, all read methods are delegated to the underlying file
# object.
self._line_consumed = 1
self.read = self._file.read
self.readline = self._file.readline
self.readlines = self._file.readlines
def read(self, amt=None):
if self._line_consumed:
return self._file.read(amt)
assert self._line_left
if amt is None or amt > self._line_left:
s = self._line[self._line_offset:]
self._done()
if amt is None:
return s + self._file.read()
else:
return s + self._file.read(amt - len(s))
else:
assert amt <= self._line_left
i = self._line_offset
j = i + amt
s = self._line[i:j]
self._line_offset = j
self._line_left -= amt
if self._line_left == 0:
self._done()
return s
def readline(self):
if self._line_consumed:
return self._file.readline()
assert self._line_left
s = self._line[self._line_offset:]
self._done()
return s
def readlines(self, size=None):
if self._line_consumed:
return self._file.readlines(size)
assert self._line_left
L = [self._line[self._line_offset:]]
self._done()
if size is None:
return L + self._file.readlines()
else:
return L + self._file.readlines(size)
########NEW FILE########
__FILENAME__ = test_extended
#!/usr/bin/env python
"""
"""
import os
import time
import json
import re
import unittest2 as unittest
from utils import GET, GET_async, POST, POST_async, OPTIONS
from utils import WebSocket8Client
import uuid
import nose
# Base URL
# ========
test_top_url = os.environ.get('SOCKJS_URL', 'http://localhost:8081')
base_url = test_top_url + '/echo'
close_base_url = test_top_url + '/close'
wsoff_base_url = test_top_url + '/disabled_websocket_echo'
class Test(unittest.TestCase):
# We are going to test several `404/not found` pages. We don't
# define a body or a content type.
def verify404(self, r, cookie=False):
self.assertEqual(r.status, 404)
if cookie is False:
self.verify_no_cookie(r)
elif cookie is True:
self.verify_cookie(r)
# In some cases `405/method not allowed` is more appropriate.
def verify405(self, r):
self.assertEqual(r.status, 405)
self.assertFalse(r['content-type'])
self.assertTrue(r['allow'])
self.assertFalse(r.body)
# Multiple transport protocols need to support OPTIONS method. All
# responses to OPTIONS requests must be cacheable and contain
# appropriate headers.
def verify_options(self, url, allowed_methods):
for origin in [None, 'test']:
h = {}
if origin:
h['Origin'] = origin
r = OPTIONS(url, headers=h)
self.assertEqual(r.status, 204)
self.assertTrue(re.search('public', r['Cache-Control']))
self.assertTrue(re.search('max-age=[1-9][0-9]{6}', r['Cache-Control']),
"max-age must be large, one year (31536000) is best")
self.assertTrue(r['Expires'])
self.assertTrue(int(r['access-control-max-age']) > 1000000)
self.assertEqual(r['Access-Control-Allow-Methods'], allowed_methods)
self.assertFalse(r.body)
self.verify_cors(r, origin)
self.verify_cookie(r)
# All transports except WebSockets need sticky session support
# from the load balancer. Some load balancers enable that only
# when they see `JSESSIONID` cookie. For all the session urls we
# must set this cookie.
def verify_cookie(self, r):
self.assertEqual(r['Set-Cookie'].split(';')[0].strip(),
'JSESSIONID=dummy')
self.assertEqual(r['Set-Cookie'].split(';')[1].lower().strip(),
'path=/')
def verify_no_cookie(self, r):
self.assertFalse(r['Set-Cookie'])
# Most of the XHR/Ajax based transports do work CORS if proper
# headers are set.
def verify_cors(self, r, origin=None):
self.assertEqual(r['access-control-allow-origin'], origin or '*')
# In order to get cookies (`JSESSIONID` mostly) flying, we
# need to set `allow-credentials` header to true.
self.assertEqual(r['access-control-allow-credentials'], 'true')
# Sometimes, due to transports limitations we need to request
# private data using GET method. In such case it's very important
# to disallow any caching.
def verify_not_cached(self, r, origin=None):
self.assertEqual(r['Cache-Control'],
'no-store, no-cache, must-revalidate, max-age=0')
self.assertFalse(r['Expires'])
self.assertFalse(r['Last-Modified'])
@classmethod
def tearDownClass(cls):
"""
Wait five seconds for the current sessions to expire.
"""
time.sleep(5)
# Footnote
# ========
# Make this script runnable.
if __name__ == '__main__':
nose.main()
########NEW FILE########
__FILENAME__ = test_protocol
#!/usr/bin/env python
"""
[**SockJS-protocol**](https://github.com/sockjs/sockjs-protocol) is an
effort to define a protocol between in-browser
[SockJS-client](https://github.com/sockjs/sockjs-client) and its
server-side counterparts, like
[SockJS-node](https://github.com/sockjs/sockjs-client). This should
help others to write alternative server implementations.
This protocol definition is also a runnable test suite, do run it
against your server implementation. Supporting all the tests doesn't
guarantee that SockJS client will work flawlessly, end-to-end tests
using real browsers are always required.
"""
import os
import time
import json
import re
import unittest2 as unittest
from utils import GET, GET_async, POST, POST_async, OPTIONS
from utils import WebSocket8Client
import uuid
import nose
from nose.tools import timed
# Base URL
# ========
"""
The SockJS server provides one or more SockJS services. The services
are usually exposed with a simple url prefixes, like:
`http://localhost:8000/echo` or
`http://localhost:8000/broadcast`. We'll call this kind of url a
`base_url`. There is nothing wrong with base url being more complex,
like `http://localhost:8000/a/b/c/d/echo`. Base url should
never end with a slash.
Base url is the url that needs to be supplied to the SockJS client.
All paths under base url are controlled by SockJS server and are
defined by SockJS protocol.
SockJS protocol can be using either http or https.
To run this tests server pointed by `base_url` needs to support
following services:
- `echo` - responds with identical data as received
- `disabled_websocket_echo` - identical to `echo`, but with websockets disabled
- `close` - server immediately closes the session
This tests should not be run more often than once in five seconds -
many tests operate on the same (named) sessions and they need to have
enough time to timeout.
"""
test_top_url = os.environ.get('SOCKJS_URL', 'http://localhost:8081')
base_url = test_top_url + '/echo'
close_base_url = test_top_url + '/close'
wsoff_base_url = test_top_url + '/disabled_websocket_echo'
# Static URLs
# ===========
class Test(unittest.TestCase):
# We are going to test several `404/not found` pages. We don't
# define a body or a content type.
def verify404(self, r, cookie=False):
self.assertEqual(r.status, 404)
if cookie is False:
self.verify_no_cookie(r)
elif cookie is True:
self.verify_cookie(r)
# In some cases `405/method not allowed` is more appropriate.
def verify405(self, r):
self.assertEqual(r.status, 405)
self.assertFalse(r['content-type'])
self.assertTrue(r['allow'])
self.assertFalse(r.body)
# Multiple transport protocols need to support OPTIONS method. All
# responses to OPTIONS requests must be cacheable and contain
# appropriate headers.
def verify_options(self, url, allowed_methods):
for origin in [None, 'test']:
h = {}
if origin:
h['Origin'] = origin
r = OPTIONS(url, headers=h)
self.assertEqual(r.status, 204)
self.assertTrue(re.search('public', r['Cache-Control']))
self.assertTrue(re.search('max-age=[1-9][0-9]{6}', r['Cache-Control']),
"max-age must be large, one year (31536000) is best")
self.assertTrue(r['Expires'])
self.assertTrue(int(r['access-control-max-age']) > 1000000)
self.assertEqual(r['Access-Control-Allow-Methods'], allowed_methods)
self.assertFalse(r.body)
self.verify_cors(r, origin)
self.verify_cookie(r)
# All transports except WebSockets need sticky session support
# from the load balancer. Some load balancers enable that only
# when they see `JSESSIONID` cookie. For all the session urls we
# must set this cookie.
def verify_cookie(self, r):
self.assertEqual(r['Set-Cookie'].split(';')[0].strip(),
'JSESSIONID=dummy')
self.assertEqual(r['Set-Cookie'].split(';')[1].lower().strip(),
'path=/')
def verify_no_cookie(self, r):
self.assertFalse(r['Set-Cookie'])
# Most of the XHR/Ajax based transports do work CORS if proper
# headers are set.
def verify_cors(self, r, origin=None):
self.assertEqual(r['access-control-allow-origin'], origin or '*')
# In order to get cookies (`JSESSIONID` mostly) flying, we
# need to set `allow-credentials` header to true.
self.assertEqual(r['access-control-allow-credentials'], 'true')
# Sometimes, due to transports limitations we need to request
# private data using GET method. In such case it's very important
# to disallow any caching.
def verify_not_cached(self, r, origin=None):
self.assertEqual(r['Cache-Control'],
'no-store, no-cache, must-revalidate, max-age=0')
self.assertFalse(r['Expires'])
self.assertFalse(r['Last-Modified'])
@classmethod
def tearDownClass(cls):
"""
Wait five seconds for the current sessions to expire.
"""
time.sleep(5)
# Greeting url: `/`
# ----------------
class BaseUrlGreeting(Test):
# The most important part of the url scheme, is without doubt, the
# top url. Make sure the greeting is valid.
def test_greeting(self):
for url in [base_url, base_url + '/']:
r = GET(url)
self.assertEqual(r.status, 200)
self.assertEqual(r['content-type'], 'text/plain; charset=UTF-8')
self.assertEqual(r.body, 'Welcome to SockJS!\n')
self.verify_no_cookie(r)
# Other simple requests should return 404.
def test_notFound(self):
for suffix in ['/a', '/a.html', '//', '///', '/a/a', '/a/a/', '/a',
'/a/']:
self.verify404(GET(base_url + suffix))
# IFrame page: `/iframe*.html`
# ----------------------------
class IframePage(Test):
"""
Some transports don't support cross domain communication
(CORS). In order to support them we need to do a cross-domain
trick: on remote (server) domain we serve an simple html page,
that loads back SockJS client javascript and is able to
communicate with the server within the same domain.
"""
iframe_body = re.compile('''
^<!DOCTYPE html>
<html>
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<script>
document.domain = document.domain;
_sockjs_onload = function\(\){SockJS.bootstrap_iframe\(\);};
</script>
<script src="(?P<sockjs_url>[^"]*)"></script>
</head>
<body>
<h2>Don't panic!</h2>
<p>This is a SockJS hidden iframe. It's used for cross domain magic.</p>
</body>
</html>$
'''.strip())
# SockJS server must provide this html page.
def test_simpleUrl(self):
self.verify(base_url + '/iframe.html')
# To properly utilize caching, the same content must be served
# for request which try to version the iframe. The server may want
# to give slightly different answer for every SockJS client
# revision.
def test_versionedUrl(self):
for suffix in ['/iframe-a.html', '/iframe-.html', '/iframe-0.1.2.html',
'/iframe-0.1.2abc-dirty.2144.html']:
self.verify(base_url + suffix)
# In some circumstances (`devel` set to true) client library
# wants to skip caching altogether. That is achieved by
# supplying a random query string.
def test_queriedUrl(self):
for suffix in ['/iframe-a.html?t=1234', '/iframe-0.1.2.html?t=123414',
'/iframe-0.1.2abc-dirty.2144.html?t=qweqweq123']:
self.verify(base_url + suffix)
# Malformed urls must give 404 answer.
def test_invalidUrl(self):
for suffix in ['/iframe.htm', '/iframe', '/IFRAME.HTML', '/IFRAME',
'/iframe.HTML', '/iframe.xml', '/iframe-/.html']:
r = GET(base_url + suffix)
self.verify404(r)
# The '/iframe.html' page and its variants must give `200/ok` and be
# served with 'text/html' content type.
def verify(self, url):
r = GET(url)
self.assertEqual(r.status, 200)
self.assertEqual(r['content-type'], 'text/html; charset=UTF-8')
# The iframe page must be strongly cacheable, supply
# Cache-Control, Expires and Etag headers and avoid
# Last-Modified header.
self.assertTrue(re.search('public', r['Cache-Control']))
self.assertTrue(re.search('max-age=[1-9][0-9]{6}', r['Cache-Control']),
"max-age must be large, one year (31536000) is best")
self.assertTrue(r['Expires'])
self.assertTrue(r['ETag'])
self.assertFalse(r['last-modified'])
# Body must be exactly as specified, with the exception of
# `sockjs_url`, which should be configurable.
match = self.iframe_body.match(r.body.strip())
self.assertTrue(match)
# `Sockjs_url` must be a valid url and should utilize caching.
sockjs_url = match.group('sockjs_url')
self.assertTrue(sockjs_url.startswith('/') or
sockjs_url.startswith('http'))
self.verify_no_cookie(r)
return r
# The iframe page must be strongly cacheable. ETag headers must
# not change too often. Server must support 'if-none-match'
# requests.
def test_cacheability(self):
r1 = GET(base_url + '/iframe.html')
r2 = GET(base_url + '/iframe.html')
self.assertEqual(r1['etag'], r2['etag'])
self.assertTrue(r1['etag']) # Let's make sure ETag isn't None.
r = GET(base_url + '/iframe.html', headers={'If-None-Match': r1['etag']})
self.assertEqual(r.status, 304)
self.assertFalse(r['content-type'])
self.assertFalse(r.body)
# Info test: `/info`
# ------------------
#
# Warning: this is a replacement of `/chunking_test` functionality
# from SockJS 0.1.
class InfoTest(Test):
# This url is called before the client starts the session. It's
# used to check server capabilities (websocket support, cookies
# requiremet) and to get the value of "origin" setting (currently
# not used).
#
# But more importantly, the call to this url is used to measure
# the roundtrip time between the client and the server. So, please,
# do respond to this url in a timely fashin.
def test_basic(self):
r = GET(base_url + '/info')
self.assertEqual(r.status, 200)
self.assertEqual(r['content-type'],
'application/json; charset=UTF-8')
self.verify_no_cookie(r)
self.verify_not_cached(r)
self.verify_cors(r)
data = json.loads(r.body)
# Are websockets enabled on the server?
self.assertEqual(data['websocket'], True)
# Do transports need to support cookies (ie: for load
# balancing purposes. Test server must have `cookie_needed`
# option enabled.
self.assertEqual(data['cookie_needed'], True)
# List of allowed origins. Currently ignored.
self.assertEqual(data['origins'], ['*:*'])
# Source of entropy for random number generator.
self.assertTrue(isinstance(data['entropy'], int))
# As browsers don't have a good entropy source, the server must
# help with tht. Info url must supply a good, unpredictable random
# number from the range 0..2^32 to feed the browser.
def test_entropy(self):
r1 = GET(base_url + '/info')
data1 = json.loads(r1.body)
r2 = GET(base_url + '/info')
data2 = json.loads(r2.body)
self.assertTrue(isinstance(data1['entropy'], int))
self.assertTrue(isinstance(data2['entropy'], int))
self.assertNotEqual(data1['entropy'], data2['entropy'])
# Info url must support CORS.
def test_options(self):
self.verify_options(base_url + '/info', 'OPTIONS, GET')
# The 'disabled_websocket_echo' service should have websockets
# disabled.
def test_disabled_websocket(self):
r = GET(wsoff_base_url + '/info')
self.assertEqual(r.status, 200)
data = json.loads(r.body)
self.assertEqual(data['websocket'], False)
# Session URLs
# ============
# Top session URL: `/<server>/<session>`
# --------------------------------------
#
# The session between the client and the server is always initialized
# by the client. The client chooses `server_id`, which should be a
# three digit number: 000 to 999. It can be supplied by user or
# randomly generated. The main reason for this parameter is to make it
# easier to configure load balancer - and enable sticky sessions based
# on first part of the url.
#
# Second parameter `session_id` must be a random string, unique for
# every session.
#
# It is undefined what happens when two clients share the same
# `session_id`. It is a client responsibility to choose identifier
# with enough entropy.
#
# Neither server nor client API's can expose `session_id` to the
# application. This field must be protected from the app.
class SessionURLs(Test):
# The server must accept any value in `server` and `session` fields.
def test_anyValue(self):
self.verify('/a/a')
for session_part in ['/_/_', '/1/1', '/abcdefgh_i-j%20/abcdefg_i-j%20']:
self.verify(session_part)
# To test session URLs we're going to use `xhr-polling` transport
# facilitites.
def verify(self, session_part):
r = POST(base_url + session_part + '/xhr')
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'o\n')
# But not an empty string, anything containing dots or paths with
# less or more parts.
def test_invalidPaths(self):
for suffix in ['//', '/a./a', '/a/a.', '/./.' ,'/', '///']:
self.verify404(GET(base_url + suffix + '/xhr'))
self.verify404(POST(base_url + suffix + '/xhr'))
# A session is identified by only `session_id`. `server_id` is a
# parameter for load balancer and must be ignored by the server.
def test_ignoringServerId(self):
session_id = str(uuid.uuid4())
r = POST(base_url + '/000/' + session_id + '/xhr')
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'o\n')
payload = '["a"]'
r = POST(base_url + '/000/' + session_id + '/xhr_send', body=payload)
self.assertEqual(r.status, 204)
self.assertFalse(r.body)
r = POST(base_url + '/999/' + session_id + '/xhr')
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'a["a"]\n')
# Protocol and framing
# --------------------
#
# SockJS tries to stay API-compatible with WebSockets, but not on the
# network layer. For technical reasons SockJS must introduce custom
# framing and simple custom protocol.
#
# ### Framing accepted by the client
#
# SockJS client accepts following frames:
#
# * `o` - Open frame. Every time a new session is established, the
# server must immediately send the open frame. This is required, as
# some protocols (mostly polling) can't distinguish between a
# properly established connection and a broken one - we must
# convince the client that it is indeed a valid url and it can be
# expecting further messages in the future on that url.
#
# * `h` - Heartbeat frame. Most loadbalancers have arbitrary timeouts
# on connections. In order to keep connections from breaking, the
# server must send a heartbeat frame every now and then. The typical
# delay is 25 seconds and should be configurable.
#
# * `a` - Array of json-encoded messages. For example: `a["message"]`.
#
# * `c` - Close frame. This frame is send to the browser every time
# the client asks for data on closed connection. This may happen
# multiple times. Close frame contains a code and a string explaining
# a reason of closure, like: `c[3000,"Go away!"]`.
#
# ### Framing accepted by the server
#
# SockJS server does not have any framing defined. All incoming data
# is treated as incoming messages, either single json-encoded messages
# or an array of json-encoded messages, depending on transport.
#
# ### Tests
#
# To explain the protocol we'll use `xhr-polling` transport
# facilities.
class Protocol(Test):
# When server receives a request with unknown `session_id` it must
# recognize that as request for a new session. When server opens a
# new sesion it must immediately send an frame containing a letter
# `o`.
def test_simpleSession(self):
trans_url = base_url + '/000/' + str(uuid.uuid4())
r = POST(trans_url + '/xhr')
"New line is a frame delimiter specific for xhr-polling"
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'o\n')
# After a session was established the server needs to accept
# requests for sending messages.
"Xhr-polling accepts messages as a list of JSON-encoded strings."
payload = '["a"]'
r = POST(trans_url + '/xhr_send', body=payload)
self.assertEqual(r.status, 204)
self.assertFalse(r.body)
'''We're using an echo service - we'll receive our message
back. The message is encoded as an array 'a'.'''
r = POST(trans_url + '/xhr')
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'a["a"]\n')
# Sending messages to not existing sessions is invalid.
payload = '["a"]'
r = POST(base_url + '/000/bad_session/xhr_send', body=payload)
self.verify404(r, cookie=True)
# The session must time out after 5 seconds of not having a
# receiving connection. The server must send a heartbeat frame
# every 25 seconds. The heartbeat frame contains a single `h`
# character. This delay may be configurable.
pass
# The server must not allow two receiving connections to wait
# on a single session. In such case the server must send a
# close frame to the new connection.
r1 = POST_async(trans_url + '/xhr', load=False)
r2 = POST(trans_url + '/xhr')
r1.close()
self.assertEqual(r2.body, 'c[2010,"Another connection still open"]\n')
self.assertEqual(r2.status, 200)
# The server may terminate the connection, passing error code and
# message.
def test_closeSession(self):
trans_url = close_base_url + '/000/' + str(uuid.uuid4())
r = POST(trans_url + '/xhr')
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'o\n')
r = POST(trans_url + '/xhr')
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'c[3000,"Go away!"]\n')
# Until the timeout occurs, the server must constantly serve
# the close message.
r = POST(trans_url + '/xhr')
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'c[3000,"Go away!"]\n')
# WebSocket protocols: `/*/*/websocket`
# -------------------------------------
import websocket
websocket.setdefaulttimeout(5)
# The most important feature of SockJS is to support native WebSocket
# protocol. A decent SockJS server should support at least the
# following variants:
#
# - hixie-75 (Chrome 4, Safari 5.0.0)
# - hixie-76/hybi-00 (Chrome 6, Safari 5.0.1)
# - hybi-07 (Firefox 6)
# - hybi-10 (Firefox 7, Chrome 14)
#
class WebsocketHttpErrors(Test):
# Normal requests to websocket should not succeed.
def test_httpMethod(self):
r = GET(base_url + '/0/0/websocket')
self.assertEqual(r.status, 400)
self.assertTrue('Can "Upgrade" only to "WebSocket".' in r.body)
# Server should be able to reject connections if origin is
# invalid.
def test_verifyOrigin(self):
#r = GET(base_url + '/0/0/websocket', {'Upgrade': 'WebSocket',
# 'Origin': 'VeryWrongOrigin'})
#self.assertEqual(r.status, 400)
#self.assertEqual(r.body, 'Unverified origin.')
pass
# Some proxies and load balancers can rewrite 'Connection' header,
# in such case we must refuse connection.
def test_invalidConnectionHeader(self):
r = GET(base_url + '/0/0/websocket', headers={'Upgrade': 'WebSocket',
'Connection': 'close'})
self.assertEqual(r.status, 400)
self.assertTrue('"Connection" must be "Upgrade".', r.body)
# WebSocket should only accept GET
def test_invalidMethod(self):
for h in [{'Upgrade': 'WebSocket', 'Connection': 'Upgrade'},
{}]:
r = POST(base_url + '/0/0/websocket', headers=h)
self.verify405(r)
# Support WebSocket Hixie-76 protocol
class WebsocketHixie76(Test):
def test_transport(self):
ws_url = 'ws:' + base_url.split(':',1)[1] + \
'/000/' + str(uuid.uuid4()) + '/websocket'
ws = websocket.create_connection(ws_url)
self.assertEqual(ws.recv(), u'o')
ws.send(u'["a"]')
self.assertEqual(ws.recv(), u'a["a"]')
ws.close()
def test_close(self):
ws_url = 'ws:' + close_base_url.split(':',1)[1] + \
'/000/' + str(uuid.uuid4()) + '/websocket'
ws = websocket.create_connection(ws_url)
self.assertEqual(ws.recv(), u'o')
self.assertEqual(ws.recv(), u'c[3000,"Go away!"]')
# The connection should be closed after the close frame.
with self.assertRaises(websocket.ConnectionClosedException):
ws.recv()
ws.close()
# Empty frames must be ignored by the server side.
def test_empty_frame(self):
ws_url = 'ws:' + base_url.split(':',1)[1] + \
'/000/' + str(uuid.uuid4()) + '/websocket'
ws = websocket.create_connection(ws_url)
self.assertEqual(ws.recv(), u'o')
# Server must ignore empty messages.
ws.send(u'')
ws.send(u'"a"')
self.assertEqual(ws.recv(), u'a["a"]')
ws.close()
# For WebSockets, as opposed to other transports, it is valid to
# reuse `session_id`. The lifetime of SockJS WebSocket session is
# defined by a lifetime of underlying WebSocket connection. It is
# correct to have two separate sessions sharing the same
# `session_id` at the same time.
def test_reuseSessionId(self):
on_close = lambda(ws): self.assertFalse(True)
ws_url = 'ws:' + base_url.split(':',1)[1] + \
'/000/' + str(uuid.uuid4()) + '/websocket'
ws1 = websocket.create_connection(ws_url, on_close=on_close)
self.assertEqual(ws1.recv(), u'o')
ws2 = websocket.create_connection(ws_url, on_close=on_close)
self.assertEqual(ws2.recv(), u'o')
ws1.send(u'"a"')
self.assertEqual(ws1.recv(), u'a["a"]')
ws2.send(u'"b"')
self.assertEqual(ws2.recv(), u'a["b"]')
ws1.close()
ws2.close()
# It is correct to reuse the same `session_id` after closing a
# previous connection.
ws1 = websocket.create_connection(ws_url)
self.assertEqual(ws1.recv(), u'o')
ws1.send(u'"a"')
self.assertEqual(ws1.recv(), u'a["a"]')
ws1.close()
# Verify WebSocket headers sanity. Due to HAProxy design the
# websocket server must support writing response headers *before*
# receiving -76 nonce. In other words, the websocket code must
# work like that:
#
# * Receive request headers.
# * Write response headers.
# * Receive request nonce.
# * Write response nonce.
def test_headersSanity(self):
url = base_url.split(':',1)[1] + \
'/000/' + str(uuid.uuid4()) + '/websocket'
ws_url = 'ws:' + url
http_url = 'http:' + url
origin = '/'.join(http_url.split('/')[:3])
h = {'Upgrade': 'WebSocket',
'Connection': 'Upgrade',
'Origin': origin,
'Sec-WebSocket-Key1': '4 @1 46546xW%0l 1 5',
'Sec-WebSocket-Key2': '12998 5 Y3 1 .P00'
}
r = GET_async(http_url, headers=h)
self.assertEqual(r.status, 101)
self.assertEqual(r['sec-websocket-location'], ws_url)
self.assertEqual(r['connection'].lower(), 'upgrade')
self.assertEqual(r['upgrade'].lower(), 'websocket')
self.assertEqual(r['sec-websocket-origin'], origin)
self.assertFalse(r['content-length'])
r.close()
# When user sends broken data - broken JSON for example, the
# server must terminate the ws connection.
@timed(1)
def test_broken_json(self):
ws_url = 'ws:' + base_url.split(':',1)[1] + \
'/000/' + str(uuid.uuid4()) + '/websocket'
ws = websocket.create_connection(ws_url)
self.assertEqual(ws.recv(), u'o')
ws.send(u'"a')
with self.assertRaises(websocket.ConnectionClosedException):
ws.recv()
ws.close()
# The server must support Hybi-10 protocol
class WebsocketHybi10(Test):
def test_transport(self):
trans_url = base_url + '/000/' + str(uuid.uuid4()) + '/websocket'
ws = WebSocket8Client(trans_url)
self.assertEqual(ws.recv(), 'o')
# Server must ignore empty messages.
ws.send(u'')
ws.send(u'"a"')
self.assertEqual(ws.recv(), 'a["a"]')
ws.close()
def test_close(self):
trans_url = close_base_url + '/000/' + str(uuid.uuid4()) + '/websocket'
ws = WebSocket8Client(trans_url)
self.assertEqual(ws.recv(), u'o')
self.assertEqual(ws.recv(), u'c[3000,"Go away!"]')
with self.assertRaises(ws.ConnectionClosedException):
ws.recv()
ws.close()
# Verify WebSocket headers sanity. Server must support both
# Hybi-07 and Hybi-10.
def test_headersSanity(self):
for version in ['7', '8', '13']:
url = base_url.split(':',1)[1] + \
'/000/' + str(uuid.uuid4()) + '/websocket'
ws_url = 'ws:' + url
http_url = 'http:' + url
origin = '/'.join(http_url.split('/')[:3])
h = {'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Version': version,
'Sec-WebSocket-Origin': 'http://asd',
'Sec-WebSocket-Key': 'x3JJHMbDL1EzLkh9GBhXDw==',
}
r = GET_async(http_url, headers=h)
self.assertEqual(r.status, 101)
self.assertEqual(r['sec-websocket-accept'], 'HSmrc0sMlYUkAGmm5OPpG2HaGWk=')
self.assertEqual(r['connection'].lower(), 'upgrade')
self.assertEqual(r['upgrade'].lower(), 'websocket')
self.assertFalse(r['content-length'])
r.close()
# When user sends broken data - broken JSON for example, the
# server must terminate the ws connection.
def test_broken_json(self):
ws_url = 'ws:' + base_url.split(':',1)[1] + \
'/000/' + str(uuid.uuid4()) + '/websocket'
ws = WebSocket8Client(ws_url)
self.assertEqual(ws.recv(), u'o')
ws.send(u'"a')
with self.assertRaises(ws.ConnectionClosedException):
ws.recv()
ws.close()
# As a fun part, Firefox 6.0.2 supports Websockets protocol '7'. But,
# it doesn't send a normal 'Connection: Upgrade' header. Instead it
# sends: 'Connection: keep-alive, Upgrade'. Brilliant.
def test_firefox_602_connection_header(self):
url = base_url.split(':',1)[1] + \
'/000/' + str(uuid.uuid4()) + '/websocket'
ws_url = 'ws:' + url
http_url = 'http:' + url
origin = '/'.join(http_url.split('/')[:3])
h = {'Upgrade': 'websocket',
'Connection': 'keep-alive, Upgrade',
'Sec-WebSocket-Version': '7',
'Sec-WebSocket-Origin': 'http://asd',
'Sec-WebSocket-Key': 'x3JJHMbDL1EzLkh9GBhXDw==',
}
r = GET_async(http_url, headers=h)
self.assertEqual(r.status, 101)
# XhrPolling: `/*/*/xhr`, `/*/*/xhr_send`
# ---------------------------------------
#
# The server must support xhr-polling.
class XhrPolling(Test):
# The transport must support CORS requests, and answer correctly
# to OPTIONS requests.
def test_options(self):
for suffix in ['/xhr', '/xhr_send']:
self.verify_options(base_url + '/abc/abc' + suffix,
'OPTIONS, POST')
# Test the transport itself.
def test_transport(self):
url = base_url + '/000/' + str(uuid.uuid4())
r = POST(url + '/xhr')
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'o\n')
self.assertEqual(r['content-type'],
'application/javascript; charset=UTF-8')
self.verify_cookie(r)
self.verify_cors(r)
# Xhr transports receive json-encoded array of messages.
r = POST(url + '/xhr_send', body='["x"]')
self.assertEqual(r.status, 204)
self.assertFalse(r.body)
# The content type of `xhr_send` must be set to `text/plain`,
# even though the response code is `204`. This is due to
# Firefox/Firebug behaviour - it assumes that the content type
# is xml and shouts about it.
self.assertEqual(r['content-type'], 'text/plain; charset=UTF-8')
self.verify_cookie(r)
self.verify_cors(r)
r = POST(url + '/xhr')
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'a["x"]\n')
# Publishing messages to a non-existing session must result in
# a 404 error.
def test_invalid_session(self):
url = base_url + '/000/' + str(uuid.uuid4())
r = POST(url + '/xhr_send', body='["x"]')
self.verify404(r, cookie=None)
# The server must behave when invalid json data is send or when no
# json data is sent at all.
def test_invalid_json(self):
url = base_url + '/000/' + str(uuid.uuid4())
r = POST(url + '/xhr')
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'o\n')
r = POST(url + '/xhr_send', body='["x')
self.assertEqual(r.status, 500)
self.assertTrue("Broken JSON encoding." in r.body)
r = POST(url + '/xhr_send', body='')
self.assertEqual(r.status, 500)
self.assertTrue("Payload expected." in r.body)
r = POST(url + '/xhr_send', body='["a"]')
self.assertFalse(r.body)
self.assertEqual(r.status, 204)
r = POST(url + '/xhr')
self.assertEqual(r.body, 'a["a"]\n')
self.assertEqual(r.status, 200)
# The server must accept messages send with different content
# types.
def test_content_types(self):
url = base_url + '/000/' + str(uuid.uuid4())
r = POST(url + '/xhr')
self.assertEqual(r.body, 'o\n')
ctypes = ['text/plain', 'T', 'application/json', 'application/xml', '',
'application/json; charset=utf-8', 'text/xml; charset=utf-8',
'text/xml']
for ct in ctypes:
r = POST(url + '/xhr_send', body='["a"]', headers={'Content-Type': ct})
self.assertEqual(r.status, 204)
self.assertFalse(r.body)
r = POST(url + '/xhr')
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'a[' + (',').join(['"a"']*len(ctypes)) +']\n')
# JSESSIONID cookie must be set by default.
def test_jsessionid(self):
url = base_url + '/000/' + str(uuid.uuid4())
r = POST(url + '/xhr')
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'o\n')
self.verify_cookie(r)
# And must be echoed back if it's already set.
url = base_url + '/000/' + str(uuid.uuid4())
r = POST(url + '/xhr', headers={'Cookie': 'JSESSIONID=abcdef'})
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'o\n')
self.assertEqual(r['Set-Cookie'].split(';')[0].strip(),
'JSESSIONID=abcdef')
self.assertEqual(r['Set-Cookie'].split(';')[1].lower().strip(),
'path=/')
# XhrStreaming: `/*/*/xhr_streaming`
# ----------------------------------
class XhrStreaming(Test):
def test_options(self):
self.verify_options(base_url + '/abc/abc/xhr_streaming',
'OPTIONS, POST')
def test_transport(self):
url = base_url + '/000/' + str(uuid.uuid4())
r = POST_async(url + '/xhr_streaming')
self.assertEqual(r.status, 200)
self.assertEqual(r['Content-Type'],
'application/javascript; charset=UTF-8')
self.verify_cookie(r)
self.verify_cors(r)
# The transport must first send 2KiB of `h` bytes as prelude.
self.assertEqual(r.read(), 'h' * 2048 + '\n')
self.assertEqual(r.read(), 'o\n')
r1 = POST(url + '/xhr_send', body='["x"]')
self.assertEqual(r1.status, 204)
self.assertFalse(r1.body)
self.assertEqual(r.read(), 'a["x"]\n')
r.close()
def test_response_limit(self):
# Single streaming request will buffer all data until
# closed. In order to remove (garbage collect) old messages
# from the browser memory we should close the connection every
# now and then. By default we should close a streaming request
# every 128KiB messages was send. The test server should have
# this limit decreased to 4096B.
url = base_url + '/000/' + str(uuid.uuid4())
r = POST_async(url + '/xhr_streaming')
self.assertEqual(r.status, 200)
self.assertTrue(r.read()) # prelude
self.assertEqual(r.read(), 'o\n')
# Test server should gc streaming session after 4096 bytes
# were sent (including framing).
msg = '"' + ('x' * 128) + '"'
for i in range(31):
r1 = POST(url + '/xhr_send', body='[' + msg + ']')
self.assertEqual(r1.status, 204)
self.assertEqual(r.read(), 'a[' + msg + ']\n')
# The connection should be closed after enough data was
# delivered.
self.assertFalse(r.read())
# EventSource: `/*/*/eventsource`
# -------------------------------
#
# For details of this protocol framing read the spec:
#
# * [http://dev.w3.org/html5/eventsource/](http://dev.w3.org/html5/eventsource/)
#
# Beware leading spaces.
class EventSource(Test):
def test_transport(self):
url = base_url + '/000/' + str(uuid.uuid4())
r = GET_async(url + '/eventsource')
self.assertEqual(r.status, 200)
self.assertEqual(r['Content-Type'],
'text/event-stream; charset=UTF-8')
# As EventSource is requested using GET we must be very
# carefull not to allow it being cached.
self.verify_not_cached(r)
self.verify_cookie(r)
# The transport must first send a new line prelude, due to a
# bug in Opera.
self.assertEqual(r.read(), '\r\n')
self.assertEqual(r.read(), 'data: o\r\n\r\n')
r1 = POST(url + '/xhr_send', body='["x"]')
self.assertFalse(r1.body)
self.assertEqual(r1.status, 204)
self.assertEqual(r.read(), 'data: a["x"]\r\n\r\n')
# This protocol doesn't allow binary data and we need to
# specially treat leading space, new lines and things like
# \x00. But, now the protocol json-encodes everything, so
# there is no way to trigger this case.
r1 = POST(url + '/xhr_send', body=r'[" \u0000\n\r "]')
self.assertFalse(r1.body)
self.assertEqual(r1.status, 204)
self.assertEqual(r.read(),
'data: a[" \\u0000\\n\\r "]\r\n\r\n')
r.close()
def test_response_limit(self):
# Single streaming request should be closed after enough data
# was delivered (by default 128KiB, but 4KiB for test server).
# Although EventSource transport is better, and in theory may
# not need this mechanism, there are some bugs in the browsers
# that actually prevent the automatic GC.
url = base_url + '/000/' + str(uuid.uuid4())
r = GET_async(url + '/eventsource')
self.assertEqual(r.status, 200)
self.assertTrue(r.read()) # prelude
self.assertEqual(r.read(), 'data: o\r\n\r\n')
# Test server should gc streaming session after 4096 bytes
# were sent (including framing).
msg = '"' + ('x' * 4096) + '"'
r1 = POST(url + '/xhr_send', body='[' + msg + ']')
self.assertEqual(r1.status, 204)
self.assertEqual(r.read(), 'data: a[' + msg + ']\r\n\r\n')
# The connection should be closed after enough data was
# delivered.
self.assertFalse(r.read())
# HtmlFile: `/*/*/htmlfile`
# -------------------------
#
# Htmlfile transport is based on research done by Michael Carter. It
# requires a famous `document.domain` trick. Read on:
#
# * [http://stackoverflow.com/questions/1481251/what-does-document-domain-document-domain-do](http://stackoverflow.com/questions/1481251/what-does-document-domain-document-domain-do)
# * [http://cometdaily.com/2007/11/18/ie-activexhtmlfile-transport-part-ii/](http://cometdaily.com/2007/11/18/ie-activexhtmlfile-transport-part-ii/)
#
class HtmlFile(Test):
head = r'''
<!doctype html>
<html><head>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
</head><body><h2>Don't panic!</h2>
<script>
document.domain = document.domain;
var c = parent.%s;
c.start();
function p(d) {c.message(d);};
window.onload = function() {c.stop();};
</script>
'''.strip()
def test_transport(self):
url = base_url + '/000/' + str(uuid.uuid4())
r = GET_async(url + '/htmlfile?c=%63allback')
self.assertEqual(r.status, 200)
self.assertEqual(r['Content-Type'],
'text/html; charset=UTF-8')
# As HtmlFile is requested using GET we must be very careful
# not to allow it being cached.
self.verify_not_cached(r)
self.verify_cookie(r)
d = r.read()
self.assertEqual(d.strip(), self.head % ('callback',))
self.assertGreater(len(d), 1024)
self.assertEqual(r.read(),
'<script>\np("o");\n</script>\r\n')
r1 = POST(url + '/xhr_send', body='["x"]')
self.assertFalse(r1.body)
self.assertEqual(r1.status, 204)
self.assertEqual(r.read(),
'<script>\np("a[\\"x\\"]");\n</script>\r\n')
r.close()
def test_no_callback(self):
r = GET(base_url + '/a/a/htmlfile')
self.assertEqual(r.status, 500)
self.assertTrue('"callback" parameter required' in r.body)
def test_response_limit(self):
# Single streaming request should be closed after enough data
# was delivered (by default 128KiB, but 4KiB for test server).
url = base_url + '/000/' + str(uuid.uuid4())
r = GET_async(url + '/htmlfile?c=callback')
self.assertEqual(r.status, 200)
self.assertTrue(r.read()) # prelude
self.assertEqual(r.read(),
'<script>\np("o");\n</script>\r\n')
# Test server should gc streaming session after 4096 bytes
# were sent (including framing).
msg = ('x' * 4096)
r1 = POST(url + '/xhr_send', body='["' + msg + '"]')
self.assertEqual(r1.status, 204)
self.assertEqual(r.read(),
'<script>\np("a[\\"' + msg + '\\"]");\n</script>\r\n')
# The connection should be closed after enough data was
# delivered.
self.assertFalse(r.read())
# JsonpPolling: `/*/*/jsonp`, `/*/*/jsonp_send`
# ---------------------------------------------
class JsonPolling(Test):
def test_transport(self):
url = base_url + '/000/' + str(uuid.uuid4())
r = GET(url + '/jsonp?c=%63allback')
self.assertEqual(r.status, 200)
self.assertEqual(r['Content-Type'],
'application/javascript; charset=UTF-8')
# As JsonPolling is requested using GET we must be very
# carefull not to allow it being cached.
self.verify_not_cached(r)
self.verify_cookie(r)
self.assertEqual(r.body, 'callback("o");\r\n')
r = POST(url + '/jsonp_send', body='d=%5B%22x%22%5D',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
# Konqueror does weird things on 204. As a workaround we need
# to respond with something - let it be the string `ok`.
self.assertEqual(r.body, 'ok')
self.assertEqual(r.status, 200)
self.assertEqual(r['Content-Type'], 'text/plain; charset=UTF-8')
self.verify_cookie(r)
r = GET(url + '/jsonp?c=%63allback')
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'callback("a[\\"x\\"]");\r\n')
def test_no_callback(self):
r = GET(base_url + '/a/a/jsonp')
self.assertEqual(r.status, 500)
self.assertTrue('"callback" parameter required' in r.body)
# The server must behave when invalid json data is send or when no
# json data is sent at all.
def test_invalid_json(self):
url = base_url + '/000/' + str(uuid.uuid4())
r = GET(url + '/jsonp?c=x')
self.assertEqual(r.body, 'x("o");\r\n')
r = POST(url + '/jsonp_send', body='d=%5B%22x',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(r.status, 500)
self.assertTrue("Broken JSON encoding." in r.body)
for data in ['', 'd=', 'p=p']:
r = POST(url + '/jsonp_send', body=data,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(r.status, 500)
self.assertTrue("Payload expected." in r.body)
r = POST(url + '/jsonp_send', body='d=%5B%22b%22%5D',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(r.body, 'ok')
r = GET(url + '/jsonp?c=x')
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'x("a[\\"b\\"]");\r\n')
# The server must accept messages sent with different content
# types.
def test_content_types(self):
url = base_url + '/000/' + str(uuid.uuid4())
r = GET(url + '/jsonp?c=x')
self.assertEqual(r.body, 'x("o");\r\n')
r = POST(url + '/jsonp_send', body='d=%5B%22abc%22%5D',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(r.body, 'ok')
r = POST(url + '/jsonp_send', body='["%61bc"]',
headers={'Content-Type': 'text/plain'})
self.assertEqual(r.body, 'ok')
r = GET(url + '/jsonp?c=x')
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'x("a[\\"abc\\",\\"%61bc\\"]");\r\n')
# Raw WebSocket url: `/websocket`
# -------------------------------
#
# SockJS protocol defines a bit of higher level framing. This is okay
# when the browser using SockJS-client establishes the connection, but
# it's not really appropriate when the connection is being esablished
# from another program. Although SockJS focuses on server-browser
# communication, it should be straightforward to connect to SockJS
# from command line or some any programming language.
#
# In order to make writing command-line clients easier, we define this
# `/websocket` entry point. This entry point is special and doesn't
# use any additional custom framing, no open frame, no
# heartbeats. Only raw WebSocket protocol.
class RawWebsocket(Test):
def test_transport(self):
ws = WebSocket8Client(base_url + '/websocket')
ws.send(u'Hello world!\uffff')
self.assertEqual(ws.recv(), u'Hello world!\uffff')
ws.close()
def test_close(self):
ws = WebSocket8Client(close_base_url + '/websocket')
with self.assertRaises(ws.ConnectionClosedException):
ws.recv()
ws.close()
# JSON Unicode Encoding
# =====================
#
# SockJS takes the responsibility of encoding Unicode strings for the
# user. The idea is that SockJS should properly deliver any valid
# string from the browser to the server and back. This is actually
# quite hard, as browsers do some magical character
# translations. Additionally there are some valid characters from
# JavaScript point of view that are not valid Unicode, called
# surrogates (JavaScript uses UCS-2, which is not really Unicode).
#
# Dealing with unicode surrogates (0xD800-0xDFFF) is quite special. If
# possible we should make sure that server does escape decode
# them. This makes sense for SockJS servers that support UCS-2
# (SockJS-node), but can't really work for servers supporting unicode
# properly (Python).
#
# The browser must escape quite a list of chars, this is due to
# browser mangling outgoing chars on transports like XHR.
escapable_by_client = re.compile(u"[\\\"\x00-\x1f\x7f-\x9f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u2000-\u20ff\ufeff\ufff0-\uffff\x00-\x1f\ufffe\uffff\u0300-\u0333\u033d-\u0346\u034a-\u034c\u0350-\u0352\u0357-\u0358\u035c-\u0362\u0374\u037e\u0387\u0591-\u05af\u05c4\u0610-\u0617\u0653-\u0654\u0657-\u065b\u065d-\u065e\u06df-\u06e2\u06eb-\u06ec\u0730\u0732-\u0733\u0735-\u0736\u073a\u073d\u073f-\u0741\u0743\u0745\u0747\u07eb-\u07f1\u0951\u0958-\u095f\u09dc-\u09dd\u09df\u0a33\u0a36\u0a59-\u0a5b\u0a5e\u0b5c-\u0b5d\u0e38-\u0e39\u0f43\u0f4d\u0f52\u0f57\u0f5c\u0f69\u0f72-\u0f76\u0f78\u0f80-\u0f83\u0f93\u0f9d\u0fa2\u0fa7\u0fac\u0fb9\u1939-\u193a\u1a17\u1b6b\u1cda-\u1cdb\u1dc0-\u1dcf\u1dfc\u1dfe\u1f71\u1f73\u1f75\u1f77\u1f79\u1f7b\u1f7d\u1fbb\u1fbe\u1fc9\u1fcb\u1fd3\u1fdb\u1fe3\u1feb\u1fee-\u1fef\u1ff9\u1ffb\u1ffd\u2000-\u2001\u20d0-\u20d1\u20d4-\u20d7\u20e7-\u20e9\u2126\u212a-\u212b\u2329-\u232a\u2adc\u302b-\u302c\uaab2-\uaab3\uf900-\ufa0d\ufa10\ufa12\ufa15-\ufa1e\ufa20\ufa22\ufa25-\ufa26\ufa2a-\ufa2d\ufa30-\ufa6d\ufa70-\ufad9\ufb1d\ufb1f\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufb4e]")
#
# The server is able to send much more chars verbatim. But, it can't
# send Unicode surrogates over Websockets, also various \u2xxxx chars
# get mangled. Additionally, if the server is capable of handling
# UCS-2 (ie: 16 bit character size), it should be able to deal with
# Unicode surrogates 0xD800-0xDFFF:
# http://en.wikipedia.org/wiki/Mapping_of_Unicode_characters#Surrogates
escapable_by_server = re.compile(u"[\x00-\x1f\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufff0-\uffff]")
client_killer_string_esc = '"' + ''.join([
r'\u%04x' % (i) for i in range(65536)
if escapable_by_client.match(unichr(i))]) + '"'
server_killer_string_esc = '"' + ''.join([
r'\u%04x'% (i) for i in range(255, 65536)
if escapable_by_server.match(unichr(i))]) + '"'
class JSONEncoding(Test):
def test_xhr_server_encodes(self):
# Make sure that server encodes at least all the characters
# it's supposed to encode.
trans_url = base_url + '/000/' + str(uuid.uuid4())
r = POST(trans_url + '/xhr')
self.assertEqual(r.body, 'o\n')
self.assertEqual(r.status, 200)
payload = '["' + json.loads(server_killer_string_esc) + '"]'
r = POST(trans_url + '/xhr_send', body=payload)
self.assertEqual(r.status, 204)
r = POST(trans_url + '/xhr')
self.assertEqual(r.status, 200)
# skip framing, quotes and parenthesis
recv = r.body.strip()[2:-1]
# Received string is indeed what we send previously, aka - escaped.
self.assertEqual(recv, server_killer_string_esc)
def test_xhr_server_decodes(self):
# Make sure that server decodes the chars we're customly
# encoding.
trans_url = base_url + '/000/' + str(uuid.uuid4())
r = POST(trans_url + '/xhr')
self.assertEqual(r.body, 'o\n')
self.assertEqual(r.status, 200)
payload = '[' + client_killer_string_esc + ']' # Sending escaped
r = POST(trans_url + '/xhr_send', body=payload)
self.assertEqual(r.status, 204)
r = POST(trans_url + '/xhr')
self.assertEqual(r.status, 200)
# skip framing, quotes and parenthesis
recv = r.body.strip()[2:-1]
# Received string is indeed what we send previously. We don't
# really need to know what exactly got escaped and what not.
a = json.loads(recv)
b = json.loads(client_killer_string_esc)
self.assertEqual(a, b)
# Handling close
# ==============
#
# Dealing with session closure is quite complicated part of the
# protocol. The exact details here don't matter that much to the
# client side, but it's good to have a common behaviour on the server
# side.
#
# This is less about defining the protocol and more about sanity
# checking implementations.
class HandlingClose(Test):
# When server is closing session, it should unlink current
# request. That means, if a new request appears, it should receive
# an application close message rather than "Another connection
# still open" message.
def test_close_frame(self):
url = close_base_url + '/000/' + str(uuid.uuid4())
r1 = POST_async(url + '/xhr_streaming')
r1.read() # prelude
self.assertEqual(r1.read(), 'o\n')
self.assertEqual(r1.read(), 'c[3000,"Go away!"]\n')
r2 = POST_async(url + '/xhr_streaming')
r2.read() # prelude
self.assertEqual(r2.read(), 'c[3000,"Go away!"]\n')
# HTTP streaming requests should be automatically closed after
# close.
self.assertEqual(r1.read(), None)
self.assertEqual(r2.read(), None)
def test_close_request(self):
url = base_url + '/000/' + str(uuid.uuid4())
r1 = POST_async(url + '/xhr_streaming')
r1.read() # prelude
self.assertEqual(r1.read(), 'o\n')
r2 = POST_async(url + '/xhr_streaming')
r2.read() # prelude
self.assertEqual(r2.read(), 'c[2010,"Another connection still open"]\n')
# HTTP streaming requests should be automatically closed after
# getting the close frame.
self.assertEqual(r2.read(), None)
# When a polling request is closed by a network error - not by
# server, the session should be automatically closed. When there
# is a network error - we're in an undefined state. Some messages
# may have been lost, there is not much we can do about it.
def test_abort_xhr_streaming(self):
url = base_url + '/000/' + str(uuid.uuid4())
r1 = POST_async(url + '/xhr_streaming')
r1.read() # prelude
self.assertEqual(r1.read(), 'o\n')
# Can't do second polling request now.
r2 = POST_async(url + '/xhr_streaming')
r2.read() # prelude
self.assertEqual(r2.read(), 'c[2010,"Another connection still open"]\n')
self.assertEqual(r2.read(), None)
r1.close()
# Polling request now, after we aborted previous one, should
# trigger a connection closure. Implementations may close
# the session and forget the state related. Alternatively
# they may return a 1002 close message.
r3 = POST_async(url + '/xhr_streaming')
r3.read() # prelude
self.assertTrue(r3.read() in ['o\n', 'c[1002,"Connection interrupted"]\n'])
r3.close()
# The same for polling transports
def test_abort_xhr_polling(self):
url = base_url + '/000/' + str(uuid.uuid4())
r1 = POST(url + '/xhr')
self.assertEqual(r1.body, 'o\n')
r1 = POST_async(url + '/xhr', load=False)
# Can't do second polling request now.
r2 = POST(url + '/xhr')
self.assertEqual(r2.body, 'c[2010,"Another connection still open"]\n')
r1.close()
# Polling request now, after we aborted previous one, should
# trigger a connection closure. Implementations may close
# the session and forget the state related. Alternatively
# they may return a 1002 close message.
r3 = POST(url + '/xhr')
self.assertTrue(r3.body in ['o\n', 'c[1002,"Connection interrupted"]\n'])
# Footnote
# ========
# Make this script runnable.
if __name__ == '__main__':
nose.main()
########NEW FILE########
__FILENAME__ = utils
import urlparse
import httplib_fork as httplib
from ws4py.client.threadedclient import WebSocketClient
import Queue
import logging
class HttpResponse:
def __init__(self, method, url,
headers={}, body=None, async=False, load=True):
headers = headers.copy()
u = urlparse.urlparse(url)
kwargs = {'timeout': None if async else 1.0}
if u.scheme == 'http':
conn = httplib.HTTPConnection(u.netloc, **kwargs)
elif u.scheme == 'https':
conn = httplib.HTTPSConnection(u.netloc, **kwargs)
else:
assert False, "Unsupported scheme " + u.scheme
assert u.fragment == ''
path = u.path + ('?' + u.query if u.query else '')
self.conn = conn
if not body:
if method is 'POST':
# The spec says: "Applications SHOULD use this field
# to indicate the transfer-length of the message-body,
# unless this is prohibited by the rules in section
# 4.4."
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13
# While httplib sets it only if there is body.
headers['Content-Length'] = 0
conn.request(method, path, headers=headers)
else:
if isinstance(body, unicode):
body = body.encode('utf-8')
conn.request(method, path, headers=headers, body=body)
if load:
if not async:
self._load()
else:
self._async_load()
@property
def status(self):
if self.res.status == 500 and hasattr(self, 'body'):
logging.error(self.body)
return self.res.status
def __getitem__(self, key):
return self.headers.get(key.lower())
def _load(self):
self.res = self.conn.getresponse()
self.headers = dict( (k.lower(), v) for k, v in self.res.getheaders() )
self.body = self.res.read()
self.close()
def close(self):
if self.conn:
self.conn.close()
self.conn = None
def _async_load(self):
self.res = self.conn.getresponse()
self.headers = dict( (k.lower(), v) for k, v in self.res.getheaders() )
def read(self):
data = self.res.read(10240)
if data:
return data
else:
self.close()
return None
def GET(url, **kwargs):
try:
return HttpResponse('GET', url, **kwargs)
except Exception as e:
logging.error(url)
raise e
def GET_async(url, **kwargs):
try:
return HttpResponse('GET', url, async=True, **kwargs)
except Exception as e:
logging.error(url)
raise e
def POST(url, **kwargs):
try:
return HttpResponse('POST', url, **kwargs)
except Exception as e:
logging.error(url)
raise e
def POST_async(url, **kwargs):
try:
return HttpResponse('POST', url, async=True, **kwargs)
except Exception as e:
logging.error(url)
raise e
def OPTIONS(url, **kwargs):
try:
return HttpResponse('OPTIONS', url, **kwargs)
except Exception as e:
logging.error(url)
raise e
class WebSocket8Client(object):
class ConnectionClosedException(Exception): pass
def __init__(self, url):
queue = Queue.Queue()
self.queue = queue
class IntWebSocketClient(WebSocketClient):
def received_message(self, m):
queue.put(unicode(str(m), 'utf-8'))
def read_from_connection(self, amount):
r = super(IntWebSocketClient, self).read_from_connection(amount)
if not r:
queue.put(Ellipsis)
return r
self.client = IntWebSocketClient(url)
self.client.connect()
def close(self):
if self.client:
self.client.running = False
self.client.close()
self.client._th.join()
self.client = None
def send(self, data):
self.client.send(data)
def recv(self):
try:
r = self.queue.get(timeout=1.0)
if r is Ellipsis:
raise self.ConnectionClosedException()
return r
except:
self.close()
raise
########NEW FILE########
| [
"[email protected]"
] | |
f1318351ae4716d2341351aa7ba537219924a05b | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/nlp/mass/src/language_model/masked_language_model.py | 52aed8d53ed7b0a0eae8a67d7231364bbf913a00 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 4,698 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Masked language model."""
import numpy as np
from .base import LanguageModel
class MaskedLanguageModel(LanguageModel):
"""
Do mask operation on sentence.
If k is assigned, then mask sentence with length k.
Otherwise, use mask_ratio.
Args:
k (int): Length of fragment.
mask_ratio (float): Mask ratio.
"""
def __init__(self, k: int = None, mask_ratio=0.5,
mask_all_prob=None):
super(MaskedLanguageModel, self).__init__()
self.mask_ratio = mask_ratio
self._k = k
self._threshold = mask_all_prob
def emit(self, sentence: np.ndarray, vocabulary):
"""
Mask mono source sentence.
A sample used to train model is processed with following step:
encoder input (source): [x1, x2, x3, x4, x5, x6, x7, x8, </eos>]
masked encoder input: [x1, x2, _, _, _, x6, x7, x8, </eos>]
decoder input: [ _, x3, x4]
| | |
V V V
decoder output: [ x3, x4, x5]
Notes:
A simple rule is made that source sentence starts without <BOS>
but end with <EOS>.
Args:
vocabulary (Dictionary): Vocabulary.
sentence (np.ndarray): Raw sentence instance.
Returns:
dict, an example.
"""
encoder_input = sentence.copy()
seq_len = encoder_input.shape[0]
# If v=0, then u must equal to 0. [u, v)
u, v = self._get_masked_interval(len(encoder_input),
self._k, self._threshold)
if u == 0:
_len = v - u if v - u != 0 else seq_len
decoder_input = np.array([vocabulary.mask_index] * _len, dtype=np.int32)
decoder_input[1:] = encoder_input[:_len - 1].copy()
else:
decoder_input = np.array([vocabulary.mask_index] * (v - u), dtype=np.int32)
decoder_input[1:] = encoder_input[u:v - 1].copy()
if v == 0:
decoder_output = encoder_input.copy()
encoder_input[:] = vocabulary.mask_index
else:
decoder_output = encoder_input[u:v].copy()
encoder_input[np.arange(start=u, stop=v)] = vocabulary.mask_index
if u != v and u > 0:
padding = np.array([vocabulary.padding_index] * u, dtype=np.int32)
decoder_input = np.concatenate((padding, decoder_input))
decoder_output = np.concatenate((padding, decoder_output))
assert decoder_input.shape[0] == decoder_output.shape[0], "seq len must equal."
return {
"sentence_length": seq_len,
"tgt_sen_length": decoder_output.shape[0],
"encoder_input": encoder_input, # end with </eos>
"decoder_input": decoder_input,
"decoder_output": decoder_output # end with </eos>
}
def _get_masked_interval(self, length, fix_length=None,
threshold_to_mask_all=None):
"""
Generate a sequence length according to length and mask_ratio.
Args:
length (int): Sequence length.
Returns:
Tuple[int, int], [start position, end position].
"""
# Can not larger than sequence length.
# Mask_length belongs to [0, length].
if fix_length is not None:
interval_length = min(length, fix_length)
else:
interval_length = min(length, round(self.mask_ratio * length))
_magic = np.random.random()
if threshold_to_mask_all is not None and _magic <= threshold_to_mask_all:
return 0, length
# If not sequence to be masked, then return 0, 0.
if interval_length == 0:
return 0, 0
# Otherwise, return start position and interval length.
start_pos = np.random.randint(low=0, high=length - interval_length + 1)
return start_pos, start_pos + interval_length
| [
"[email protected]"
] | |
5c3dda335336b3b644e37fe7f8f4f46f4fd0ee86 | 60ce73bf2f86940438e5b7fecaaccad086888dc5 | /working_scrapers/Illinois_dekalb.py | d04843c1e230207cd3080ec2535d4860593519dd | [] | no_license | matthewgomies/jailcrawl | 22baf5f0e6dc66fec1b1b362c26c8cd2469dcb0d | 9a9ca7e1328ae549860ebeea9b149a785f152f39 | refs/heads/master | 2023-02-16T06:39:42.107493 | 2021-01-15T16:37:57 | 2021-01-15T16:37:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,161 | py | #!/usr/bin/python
'''
This is a template script
MG
'''
from urllib.request import urlopen, Request
import pandas as pd
import os
import time
import numpy as np
from datetime import datetime
import datetime as dt
import sys
from io import StringIO
from joblib import Parallel, delayed
import requests
from jailscrape.common import save_to_s3, get_browser, get_logger, record_error, save_pages_array
from jailscrape import crawlers
# jailscrape.common is a file that is part of the project which keeps
# most common boilerplate code out of this file
from selenium.webdriver.common.keys import Keys
import watchtower
from bs4 import BeautifulSoup
import re
import math
# NOTE: These are imports. They ideally don't change very often.
# It's OK to have a large, maximal set here and to bulk-edit files to add to these.
# MG - Extra imports
import selenium as sm
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
ROW_INDEX = 171 # Change this for each scraper. This references the row
# of the main jailcrawl spreadsheet. This index will be used to look up
# the URL as well as state/county info
THIS_STATE = 'illinois' # Change the current state/county information.
THIS_COUNTY = 'dekalb'
def main(roster_row):
try:
logger = get_logger(roster_row) # Get a standard logger
# Here are standard variable values/how to initialize them.
# These aren't initialized here since in the save_single_page
# case, they can be done in the called function
browser = get_browser() # Get a standard browser
urlAddress = roster_row['Working Link'] # Set the main URL from the spreadsheet
page_index = 0 # Set an initial value of "page_index", which we will use to separate output pages
logger.info('Set working link to _%s_', urlAddress) # Log the chosen URL
####################################
# Begin core specific scraping code
if roster_row['State'].lower() != THIS_STATE or roster_row['County'].lower() != THIS_COUNTY:
raise Exception("Expected county definition info from _%s, %s_, but found info: _%s_" % (THIS_COUNTY, THIS_STATE, roster_row))
#Given the urlAddress passed to the function we will navigate to the page
browser.get(urlAddress)
time.sleep(np.random.uniform(7,10,1))
#Extract the HTML#
store_source = browser.page_source
## Code to save the first page and log appropriately
save_to_s3(store_source, page_index, roster_row)
logger.info('Saved page _%s_', page_index)
#Finding the last page
soup = BeautifulSoup(store_source, 'lxml')
page=0
for link in soup.findAll("div", {"class":"loca-search-head text-center"}):
page=str(link.text)
page=re.sub(' Results for "_"', "", page)
page=int(page)/10
page=math.ceil(page)
#Crawling through all the pages
string = str(1)
for i in range(2,page+1):
if i>30 :
print("Exceeds 300 inmates")
elif i==2:
elem = browser.find_element_by_xpath('/html/body/div/div/div/div[2]/div[3]/div[12]/ul/li[3]/a')
elem.click()
time.sleep(np.random.uniform(3,5,1))
store_source = browser.page_source
string=str(i)
## Code to save the page and log appropriately
page_index=int(string)-1
save_to_s3(store_source, page_index, roster_row)
logger.info('Saved page _%s_', page_index)
elif i==3:
elem = browser.find_element_by_xpath('/html/body/div/div/div/div[2]/div[3]/div[12]/ul/li[4]/a')
elem.click()
time.sleep(np.random.uniform(3,5,1))
store_source = browser.page_source
string=str(i)
## Code to save the page and log appropriately
page_index=int(string)-1
save_to_s3(store_source, page_index, roster_row)
logger.info('Saved page _%s_', page_index)
elif i==4:
elem = browser.find_element_by_xpath('/html/body/div/div/div/div[2]/div[3]/div[12]/ul/li[5]/a')
elem.click()
time.sleep(np.random.uniform(3,5,1))
store_source = browser.page_source
string=str(i)
## Code to save the page and log appropriately
page_index=int(string)-1
save_to_s3(store_source, page_index, roster_row)
logger.info('Saved page _%s_', page_index)
elif i>=5:
elem = browser.find_element_by_xpath('/html/body/div/div/div/div[2]/div[3]/div[12]/ul/li[6]/a')
elem.click()
time.sleep(np.random.uniform(3,5,1))
store_source = browser.page_source
string=str(i)
## Code to save the page and log appropriately
page_index=int(string)-1
save_to_s3(store_source, page_index, roster_row)
logger.info('Saved page _%s_', page_index)
# End core specific scraping code
####################################
#Close the browser
logger.info('complete!')
except Exception as errorMessage:
try:
browser.close()
record_error(message=str(errorMessage), roster_row=roster_row, browser=browser)
except:
record_error(message=str(errorMessage), roster_row=roster_row)
# Record error in S3 for a general error
logger.error('Error: %s', errorMessage)
# Log error
sys.exit(1)
if __name__ == "__main__":
#This will load in the current jail roster list
#Select the index of the roster this script is for:
#Write the name of the county and state
roster = pd.read_csv('/opt/jail_roster_final_rmDuplicates.csv',encoding = "utf-8")
main(roster[roster['index'] == ROW_INDEX].iloc[0])
| [
"[email protected]"
] | |
6f409ce181ccfacc565feea9433e652a11fe88ae | c6939d3e5d5628673d44d29ef38b0511556a83aa | /new_shangmi/shangmi/apis_v1.py | a76dd4e4cbce2576231baab2a614b4a0b49d8b0d | [] | no_license | General-Coder/shangmiteam | a536867a7e03f33eec3d2c55c0f55a1cb7ae1b85 | a628e38a545ffc36caa4c05d2fb5b73398a26ac1 | refs/heads/master | 2020-04-11T06:03:28.468625 | 2018-12-11T09:33:14 | 2018-12-11T09:33:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,462 | py | import json
import requests
from django.conf import settings
from django.forms import model_to_dict
from django.http import JsonResponse, HttpResponse
from django.views.generic import View
from .utils import *
from .models import *
from django.core.cache import caches
from .getqr import *
import uuid
user_cache = caches['user']
class LoginAPI(View):
def post(self, request):
params = request.POST
code = params.get('code')
avatar = params.get('avatar')
# gender = params.get('gender')
nick_name = params.get('name')
mini_type = params.get('mini_type')
token = params.get("token")
user_id = user_cache.get(token)
if user_id:
user_cache.set(token, user_id, settings.LOGIN_TIMEOUT)
return JsonResponse({'code': 0, 'data': {'token': token, "uid": user_id}})
if mini_type == 'background':
appid = 'wx4a8c99d5d8b43556'
secret = '014ad578b31357e53b61b9ab69db0761'
elif mini_type == 'customer':
appid = 'wx8b50ab8fa813a49e'
secret = 'b32f63c36ea123710173c4c9d4b15e8b'
else:
appid = 'wxebd828458f8b2b38'
secret = 'a40cb9c5ecb1f4f5c0f31b75829fed03'
url = settings.SMALL_WEIXIN_OPENID_URL
params = {"appid": appid,
"secret": secret,
"js_code": code,
"grant_type": 'authorization_code'
}
response = requests.get(url, params=params)
data = json.loads(response.content.decode())
if 'openid' in data:
openid = data.get('openid')
user = ShangmiUser.objects.get_or_create(openid=openid)[0]
# token = generate_validate_token(str(user.id))
token = uuid.uuid4().hex
user_cache.set(token, user.id, settings.LOGIN_TIMEOUT)
user.nick_name = nick_name
user.icon = avatar
user.source = mini_type
user.save()
return HttpResponse(json.dumps({'code': 0, 'data': {'token': token, "uid": user.id}}),
content_type='application/json')
else:
return HttpResponse(json.dumps({'code': 1, 'msg': 'failed'}),
content_type='application/json')
class ActivesAPI(View):
def get(self, req):
actives = Active.objects.filter(
is_active=True
)
fast = actives.filter(is_fast=True)
unfast = actives.filter(is_fast=False)
# fast_data = [model_to_dict(i) for i in fast]
unfast_data = [model_to_dict(i) for i in unfast]
fast_data = []
for i in fast:
tmp = model_to_dict(i)
if i.need_num == 0:
tmp["percent"] = "0%"
else:
tmp["percent"] = str((i.complete_num / i.need_num) * 100) + "%"
fast_data.append(tmp)
unfast_data = []
for i in unfast:
tmp = model_to_dict(i)
if i.need_num == 0:
tmp["percent"] = "0%"
else:
tmp["percent"] = str((i.complete_num / i.need_num) * 100) + "%"
unfast_data.append(tmp)
result = {
"code": 1,
"msg": "ok",
"data": {
"fast": fast_data,
"unfast": unfast_data
}
}
return JsonResponse(result)
class AdvAPI(View):
def get(self,req):
advs = Advertise.objects.filter(
is_used=True
)
res = [model_to_dict(i) for i in advs]
data = {
"code":1,
"msg": "ok",
"data": res
}
return JsonResponse(data)
class IndexAPI(View):
# @login_req
def get(self, req):
user = ShangmiUser.objects.get(pk=int(user_cache.get(req.GET.get("token"))))
actives = UserActiveLog.objects.filter(user=user)
# 未通过的
doing_count = actives.filter(status=0).count()
# 审核通过的
finish_count = actives.filter(status=1).count()
# 用户余额
try:
money = Balance.objects.get(user=user).money
except:
money = 0
data = {
"code": 0,
"data": {
'money': money,
'doing_count': doing_count,
'finish_count': finish_count
}
}
return JsonResponse(data)
# 用户参加活动明细
class UserActiveLogAPI(View):
def get(self, req):
user = ShangmiUser.objects.get(
pk=int(user_cache.get(
req.GET.get("token")
)
)
)
logs = UserActiveLog.objects.filter(
user=user,
status=1
).order_by("-create_time")
data_logs = []
for i in logs:
tmp = model_to_dict(i)
tmp['create_time'] = i.create_time.strftime("%Y年%m月%d日 %H:%M")
tmp["status"] = i.get_status_display()
tmp["active_msg"] = model_to_dict(i.active)
tmp["type"] = i.get_type_display()
data_logs.append(tmp)
return JsonResponse({"code": 0, "data": data_logs})
# 付款明细
class UserPayLogAPI(View):
def get(self, req):
user = ShangmiUser.objects.get(
pk=int(user_cache.get(
req.GET.get("token")
)
)
)
logs = UserPayLog.objects.filter(user=user, status=1).order_by("-create_time")
datas = []
for i in logs:
tmp = model_to_dict(i)
tmp['create_time'] = i.create_time.strftime("%Y年%m月%d日 %H:%M:%S")
tmp["store_name"] = i.store.name
tmp["money"] = i.money / 100
tmp["integral"] = i.integral / 100
datas.append(tmp)
data = {
"code": 0,
"data": datas
}
return JsonResponse(data)
# 任务明细
class TaskDetailAPI(View):
def get(self, req):
user = ShangmiUser.objects.get(
pk=int(user_cache.get(
req.GET.get("token")
)
)
)
datas = UserActiveLog.objects.filter(user=user).order_by("-create_time")
details = []
for i in datas:
tmp = model_to_dict(i)
tmp['create_time'] = i.create_time.strftime("%Y年%m月%d日 %H:%M")
tmp["status"] = i.get_status_display()
tmp["active_msg"] = model_to_dict(i.active)
tmp["type"] = i.get_type_display()
details.append(tmp)
data = {
"code": 0,
"data": details
}
return JsonResponse(data)
class ActiveAPI(View):
def get(self, req):
id = int(req.GET.get("id"))
active = Active.objects.get(pk=id)
data = {
"code": 0,
"data": model_to_dict(active)
}
return JsonResponse(data)
class ShareGetMoneyAPI(View):
def post(self, req):
token = req.POST.get("token")
share_uid = req.POST.get("uid")
user = user_cache.get()
class JoinActiveAPI(View):
def post(self, req):
user = ShangmiUser.objects.get(pk=int(user_cache.get(
req.POST.get("token")
)))
uid = req.POST.get("uid")
id = req.POST.get("id")
active = Active.objects.get(id=id)
if active.is_active == False:
data = {
"code": 3,
"data": "活动已结束"
}
return JsonResponse(data)
# 先判断该用户是不是已经参与了
if UserActiveLog.objects.filter(user_id=user.id).exists():
data = {
"code": 2,
"data": "您已参加,想赚更多可分享"
}
return JsonResponse(data)
log = UserActiveLog.objects.create(
active_id=id,
user_id=user.id,
integral=active.give_money,
type="join",
status=1
)
active.complete_num += 1
active.save()
# 更新用户余额表
user_balance = Balance.objects.get_or_create(user_id=user.id)[0]
user_balance.money += active.give_money
user_balance.save()
if int(uid) != -1 and int(uid) != user.id:
UserActiveLog.objects.create(
active_id=id,
user_id=uid,
integral=active.share_give_money,
type="share",
status=1
)
# 更新分享人用户积分余额
share_user_balance = Balance.objects.get(user_id=uid)
share_user_balance.money += active.share_give_money
share_user_balance.save()
data = {
"code": 0,
"data": "参与成功,积分已发放到个人中心"
}
return JsonResponse(data)
class QrcodeAPI(View):
def get(self, request):
params = request.GET
active_id = int(params.get('active_id'))
wx_mini_path = 'pages/join/join?uid=-1&aid=%s' % active_id
image_data = get_qrcode(wx_mini_path)
return HttpResponse(image_data,content_type="image/png")
class StoreAPI(View):
def get(self, req):
user = ShangmiUser.objects.get(
pk=int(user_cache.get(
req.GET.get("token")
)
)
)
balance = Balance.objects.get(user_id=user.id)
store_id = int(req.GET.get("sid"))
store = Store.objects.get(id=store_id)
if store.is_active == False:
data = {
"code": 2,
"data": "该店暂不参与"
}
return JsonResponse(data)
else:
store_dict = model_to_dict(store)
store_dict["boss_name"] = store.boss.nick_name
store_dict["boss_icon"] = store.boss.icon
store_dict["user_balance"] = balance.money / 100
return JsonResponse({"code": 0, "data": store_dict}) | [
"[email protected]"
] | |
e3421447a8225cc4e8464a1815d43de78d1715f1 | 30a1b285ff4aab39eebe342c5dbca255a69b454c | /full-problems/maxDiff.py | 347a657be99ca517cd6ae0e9e6234e8672f61c47 | [
"Apache-2.0"
] | permissive | vikas-t/practice-problems | cd5852ea112421a2a39db31ae9092c6a148b2af8 | ea654d1cad5374c824c52da9d3815a9546eb43fa | refs/heads/master | 2021-10-27T14:08:42.724019 | 2019-04-17T18:26:23 | 2019-04-17T18:26:23 | 170,156,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | #!/usr/bin/python3
# https://practice.geeksforgeeks.org/problems/maximum-difference/0
def sol(arr, n):
d = -1
min_i = 0
min_till_here = 0
for i in range(1, n):
if arr[i] < arr[min_till_here]:
min_till_here = i
if min_till_here != min_i and min_till_here < i:
min_i = min_till_here
d = max(d, arr[i]-arr[min_i])
return d
arr = [5, 15, 3, 4, 5, 14]
print(sol(arr, len(arr))) | [
"[email protected]"
] | |
b298869f7dc5f7a2e2768feabbc8a3758fdcedd7 | 5c2e0fe391f7c720d0a6c117a64f4c8e89fece93 | /research/object_detection/models/faster_rcnn_inception_v2_feature_extractor.py | d2681127b2706faec7433bb8adbcfb619375bd4a | [
"Apache-2.0"
] | permissive | lyltencent/tf_models_v15 | e3bed9dfee42685118b0f3d21bb9de37d58cf500 | 0081dbe36831342051c09a2f94ef9ffa95da0e79 | refs/heads/master | 2022-10-20T20:00:26.594259 | 2020-09-19T05:37:22 | 2020-09-19T05:37:22 | 161,750,047 | 0 | 1 | Apache-2.0 | 2021-03-31T21:04:01 | 2018-12-14T07:47:33 | Python | UTF-8 | Python | false | false | 12,050 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception V2 Faster R-CNN implementation.
See "Rethinking the Inception Architecture for Computer Vision"
https://arxiv.org/abs/1512.00567
"""
import tensorflow as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from nets import inception_v2
slim = tf.contrib.slim
def _batch_norm_arg_scope(list_ops,
use_batch_norm=True,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
batch_norm_scale=False,
train_batch_norm=False):
"""Slim arg scope for InceptionV2 batch norm."""
if use_batch_norm:
batch_norm_params = {
'is_training': train_batch_norm,
'scale': batch_norm_scale,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon
}
normalizer_fn = slim.batch_norm
else:
normalizer_fn = None
batch_norm_params = None
return slim.arg_scope(list_ops,
normalizer_fn=normalizer_fn,
normalizer_params=batch_norm_params)
class FasterRCNNInceptionV2FeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Faster R-CNN Inception V2 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0,
depth_multiplier=1.0,
min_depth=16):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
self._depth_multiplier = depth_multiplier
self._min_depth = min_depth
super(FasterRCNNInceptionV2FeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
"""Faster R-CNN Inception V2 preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
Args:
preprocessed_inputs: A [batch, height, width, channels] float32 tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
Raises:
InvalidArgumentError: If the spatial size of `preprocessed_inputs`
(height or width) is less than 33.
ValueError: If the created network is missing the required activation.
"""
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
with tf.control_dependencies([shape_assert]):
with tf.variable_scope('InceptionV2',
reuse=self._reuse_weights) as scope:
with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d],
batch_norm_scale=True,
train_batch_norm=self._train_batch_norm):
_, activations = inception_v2.inception_v2_base(
preprocessed_inputs,
final_endpoint='Mixed_4e',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
return activations['Mixed_4e']
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name (unused).
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
net = proposal_feature_maps
depth = lambda d: max(int(d * self._depth_multiplier), self._min_depth)
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
data_format = 'NHWC'
concat_dim = 3 if data_format == 'NHWC' else 1
with tf.variable_scope('InceptionV2', reuse=self._reuse_weights):
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1,
padding='SAME',
data_format=data_format):
with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d],
batch_norm_scale=True,
train_batch_norm=self._train_batch_norm):
with tf.variable_scope('Mixed_5a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3],
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
scope='MaxPool_1a_3x3')
net = tf.concat([branch_0, branch_1, branch_2], concat_dim)
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1],
scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3],
concat_dim)
with tf.variable_scope('Mixed_5c'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1],
scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
proposal_classifier_features = tf.concat(
[branch_0, branch_1, branch_2, branch_3], concat_dim)
return proposal_classifier_features
| [
"[email protected]"
] | |
b6c8582e27830c87d8baddbf1ebed41b4789d50e | ca55dcaa64ea9db4068e13091321cfebecc0ff41 | /codeUp/codeUp100/1022.py | ce1b4c02ce9f4e7fb71c474a37ec6ab7fc90b694 | [] | no_license | gomtinQQ/algorithm-python | 8fb8343594b945099ae2a4dfa794ecb47e54ab0b | 751562922b66e335f621d366bb73dacdc7125140 | refs/heads/master | 2022-12-07T23:05:44.535593 | 2020-08-21T12:29:58 | 2020-08-21T12:29:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | '''
1022 : [기초-입출력] 문장 1개 입력받아 그대로 출력하기(설명)
공백 문자가 포함되어 있는 문장을 입력받고 그대로 출력하는 연습을 해보자.
'''
str = input()
print(str) | [
"[email protected]"
] | |
83f702f40210def83db43b117c01fb32c0afec26 | f0f4a0f24b3a7cc8bf0366cf329923e9bd5b00c7 | /activity/activity_DepositDigestIngestAssets.py | 515d8398361d40ebc252d28d7bed3993d5a6e601 | [
"MIT"
] | permissive | elifesciences/elife-bot | 45c79993d13bacb37f59ba57462179dd7c6f1e2e | 2324e26943f805a0602ea3251ff0f6a5db27f1a0 | refs/heads/develop | 2023-08-17T15:25:42.170870 | 2023-08-14T16:47:02 | 2023-08-14T16:47:02 | 7,503,542 | 21 | 10 | MIT | 2023-09-07T19:50:30 | 2013-01-08T15:09:54 | Python | UTF-8 | Python | false | false | 4,579 | py | import os
import json
from S3utility.s3_notification_info import parse_activity_data
from provider.storage_provider import storage_context
from provider import digest_provider, download_helper
import provider.utils as utils
from activity.objects import Activity
"""
DepositDigestIngestAssets.py activity
"""
class activity_DepositDigestIngestAssets(Activity):
def __init__(self, settings, logger, client=None, token=None, activity_task=None):
super(activity_DepositDigestIngestAssets, self).__init__(
settings, logger, client, token, activity_task
)
self.name = "DepositDigestIngestAssets"
self.pretty_name = "Deposit Digest Ingest Assets"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 5
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 5
self.description = "Deposit Assets for a Digest (Pre-Ingest)"
# Track some values
self.input_file = None
self.digest = None
self.dest_resource = None
# Local directory settings
self.directories = {
"TEMP_DIR": os.path.join(self.get_tmp_dir(), "tmp_dir"),
"INPUT_DIR": os.path.join(self.get_tmp_dir(), "input_dir"),
}
# Track the success of some steps
self.build_status = None
def do_activity(self, data=None):
"do the work"
if self.logger:
self.logger.info("data: %s" % json.dumps(data, sort_keys=True, indent=4))
# Create output directories
self.make_activity_directories()
# parse the data with the digest_provider
real_filename, bucket_name, bucket_folder = parse_activity_data(data)
# Download from S3
self.input_file = download_helper.download_file_from_s3(
self.settings,
real_filename,
bucket_name,
bucket_folder,
self.directories.get("INPUT_DIR"),
)
# Parse input and build digest
digest_config = digest_provider.digest_config(
self.settings.digest_config_section, self.settings.digest_config_file
)
self.build_status, self.digest = digest_provider.build_digest(
self.input_file,
self.directories.get("TEMP_DIR"),
self.logger,
digest_config,
)
if not self.build_status:
self.logger.info(
"Failed to build the Digest in Deposit Digest Ingest Assets for %s",
real_filename,
)
return self.ACTIVITY_PERMANENT_FAILURE
# check if there is an image and if not return True
if not digest_provider.has_image(self.digest):
self.logger.info(
"Digest for file %s has no images to deposit", real_filename
)
return self.ACTIVITY_SUCCESS
# bucket name
cdn_bucket_name = (
self.settings.publishing_buckets_prefix + self.settings.digest_cdn_bucket
)
# deposit the image file to S3
self.deposit_digest_image(self.digest, cdn_bucket_name)
return self.ACTIVITY_SUCCESS
def image_dest_resource(self, digest, cdn_bucket_name):
"concatenate the S3 bucket object path we copy the file to"
msid = utils.msid_from_doi(digest.doi)
article_id = utils.pad_msid(msid)
# file name from the digest image file
file_name = digest.image.file.split(os.sep)[-1]
new_file_name = digest_provider.new_file_name(file_name, msid)
storage_provider = self.settings.storage_provider + "://"
dest_resource = (
storage_provider + cdn_bucket_name + "/" + article_id + "/" + new_file_name
)
return dest_resource
def deposit_digest_image(self, digest, cdn_bucket_name):
"deposit the image file from the digest to the bucket"
self.dest_resource = self.image_dest_resource(digest, cdn_bucket_name)
storage = storage_context(self.settings)
self.logger.info("Depositing digest image to S3 key %s", self.dest_resource)
# set the bucket object resource from the local file
metadata = {"ContentType": utils.content_type_from_file_name(digest.image.file)}
storage.set_resource_from_filename(
self.dest_resource, digest.image.file, metadata
)
self.logger.info("Deposited digest image %s to S3", digest.image.file)
return True
| [
"[email protected]"
] | |
bd6a7d150cf3eb9fac42f5a543f377ad8356ad67 | 27691e5ef8e49fb29189b01dd76a1dc3720e7ae8 | /AC/ABC-TDD/180/c.py | 76f7581f37b0ab7ec2b1fda1f0887f7b32dc1463 | [] | no_license | oshou/procon | 61e5f5bc819e0fe5ab29749fc2f894fe6f3b1d07 | 3d000c64b5917c65b51ed7da5b90cb79892d5909 | refs/heads/master | 2023-05-10T23:56:50.861468 | 2021-09-23T06:07:29 | 2021-09-23T06:07:29 | 116,886,484 | 1 | 0 | null | 2023-05-05T02:28:41 | 2018-01-10T00:21:38 | Go | UTF-8 | Python | false | false | 257 | py | n = int(input())
ans = []
for i in range(1, n+1):
if i*i > n:
break
if n % i == 0:
ans.append(i)
tmp = n//i
if i != tmp:
ans.append(n//i)
ans = sorted(ans)
counts = len(ans)
for num in ans:
print(num)
| [
"[email protected]"
] | |
315ab7aa2ef9d0579f0d045b6dfb17919ba8530a | c741f04141784a2571d2d27d95e0d994e4584ab1 | /learning/py3/0-1/21-模块-包-4.py | d70f489fbe8852df7919744087de49fb955d0899 | [] | no_license | haodonghui/python | bbdece136620bc6f787b4942d6e1760ed808afd4 | 365062ba54297c81093b7f378742e76d438658b7 | refs/heads/master | 2022-02-03T23:52:37.288503 | 2022-01-27T05:23:25 | 2022-01-27T05:23:25 | 191,729,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | from package1 import *
p.prt(4, '从一个包中导入*')
'''
4、
设想一下,如果我们使用 from sound.effects import *会发生什么
Python 会进入文件系统,找到这个包里面所有的子模块,一个一个的把它们都导入进来。
Windows是一个大小写不区分的系统。
在这类平台上,没有人敢担保一个叫做 ECHO.py 的文件导入为模块 echo 还是 Echo 甚至 ECHO。
为了解决这个问题,只能烦劳包作者提供一个精确的包的索引了。
导入语句遵循如下规则:
如果包定义文件 __init__.py 存在一个叫做 __all__ 的列表变量,
那么在使用 from package import * 的时候就把这个列表中的所有名字作为包内容导入。
作为包的作者,可别忘了在更新包之后保证 __all__ 也更新了啊。你说我就不这么做,我就不使用导入*这种用法,好吧,没问题,谁让你是老板呢
'''
def package_example():
p.prt(4,
'learning/py3/0-1/package1/__init__.py存在 __all__ = [\'p\'],顶部使用from package1 import * ,只导入了 package1包下的p模块')
p2.prt(4,
'learning/py3/0-1/package1/__init__.py存在 __all__ = [\'p\',\'p2\'],顶部使用from package1 import * ,只导入了 package1包下的p模块')
package_example()
| [
"[email protected]"
] | |
c64d4a116649878ddb94e95e66e3c58c114e7155 | fb39e15da72e41cf1903aa3670833e35b668edad | /main.py | 37961bed2279b1b12f56a1ef46d4b9588b8717ea | [] | no_license | jackfrostwillbeking/script_test | e9b8c91babc2c9d6ed111a77b5156f3624683a1e | f47370f05632e1a76cbcacd24737ec370d2faf58 | refs/heads/master | 2021-04-06T17:11:36.479236 | 2018-03-09T10:21:19 | 2018-03-09T10:21:19 | 124,380,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | import subprocess
import sys
res = subprocess.call(["/bin/bash","-c","./test_script.sh"])
sys.exit(0)
| [
"[email protected]"
] | |
a7012e26515d2e214c34f1a948756e9af8cff489 | 5837fd85b18b56d23612de1e36d79b5a06827542 | /sniterator.py | d5e3435d3bd0924383507459b0e3f279464d9c66 | [
"MIT"
] | permissive | ChristopherWilks/snaptron | 75e33c4f25a65f3093555a7bf235ab69865f7086 | 75903c30d54708b19d91772142013687c74d88d8 | refs/heads/master | 2023-02-19T01:38:57.343293 | 2023-02-11T21:47:52 | 2023-02-11T21:47:52 | 45,953,724 | 26 | 7 | NOASSERTION | 2022-06-17T21:10:44 | 2015-11-11T02:03:37 | Python | UTF-8 | Python | false | false | 3,341 | py | #!/usr/bin/env python2.7
# This file is part of Snaptron.
#
# Snaptron is free software: you can redistribute it and/or modify
# it under the terms of the
#
# The MIT License
#
# Copyright (c) 2016- by Christopher Wilks <[email protected]>
# and Ben Langmead <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import subprocess
import shlex
class SnaptronServerIterator():
def __init__(self,cmds,stdout=subprocess.PIPE,shell=False,bufsize=-1,direct_output=False):
self.cmds = cmds
self.stdout = stdout
#performance trick, pipe output from subprocess directly to this process's output
#to avoid the cost of python line processing
if direct_output:
self.stdout = sys.stdout
self.shell = shell
self.bufsize = bufsize
#used to run them in parallel, but that's a bad idea because:
#1) results will come back in random order
#2) we need to control the number of potential processes spun up by any given query (so for now we'll keep this at 1)
if direct_output:
for cmd in self.cmds:
extern_proc = subprocess.Popen(cmd, shell=self.shell, bufsize=self.bufsize)
extern_proc.wait()
else:
#TODO: stop this running in parallel for the above cited reasons, but will need to handle
#the sequential nature in the next() method
self.extern_procs = [subprocess.Popen(cmd, stdout=self.stdout, shell=self.shell, bufsize=self.bufsize) for cmd in self.cmds]
self.idx = 0
def __iter__(self):
return self
#this is only used if the self.stdout isn't directed to the current process's sys.stdout
#i.e. direct_output is False
def next(self):
line = self.extern_procs[self.idx].stdout.readline()
if line == '':
exitc=self.extern_procs[self.idx].wait()
if exitc != 0:
raise RuntimeError("%s returned non-0 exit code\n" % (self.cmds[self.idx]))
self.idx+=1
if self.idx >= len(self.extern_procs):
raise StopIteration
line = self.extern_procs[self.idx].stdout.readline()
return line
| [
"[email protected]"
] | |
9549acb29d9a0c5bf134052cccc04c0ca9a590e6 | f5f538edf999d5a7eb265b90efa4599a81367489 | /ptranking/metric/adhoc_metric.py | c2e41ba50be9154df1b89366b12914dfe3e440f0 | [
"MIT"
] | permissive | ii-metric/ptranking | ad4db16e5a995b11103b04af46aed099e525af82 | fd4fe1373fd2dfd7c6342eb666f36e34b71e8298 | refs/heads/master | 2023-03-24T03:18:16.414348 | 2021-03-19T06:06:43 | 2021-03-19T06:06:43 | 328,522,824 | 0 | 1 | MIT | 2021-03-19T06:06:44 | 2021-01-11T02:02:01 | Python | UTF-8 | Python | false | false | 18,252 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Description
The widely used IR evaluation metrics, such as AP (average precision), nDCG and ERR
Note: commonly the metric-computation is not conducted on gpu
"""
import torch
import numpy as np
from ptranking.data.data_utils import LABEL_TYPE
""" Precision """
def torch_precision_at_k(batch_sys_sorted_labels, k=None, gpu=False):
''' Precision at k
:param sys_sorted_labels: [batch_size, ranking_size] system's predicted ltr_adhoc of labels in a descending order
:param ks: cutoff values
:return: [batch_size, len(ks)]
'''
max_cutoff = batch_sys_sorted_labels.size(1)
used_cutoff = min(max_cutoff, k)
batch_sys_sorted_labels = batch_sys_sorted_labels[:, 0:used_cutoff]
batch_bi_sys_sorted_labels = torch.clamp(batch_sys_sorted_labels, min=0, max=1) # binary
batch_sys_cumsum_reles = torch.cumsum(batch_bi_sys_sorted_labels, dim=1)
batch_ranks = (torch.arange(used_cutoff).type(torch.cuda.FloatTensor).expand_as(batch_sys_cumsum_reles) + 1.0) \
if gpu else (torch.arange(used_cutoff).expand_as(batch_sys_cumsum_reles) + 1.0)
batch_sys_rankwise_precision = batch_sys_cumsum_reles / batch_ranks
batch_sys_p_at_k = batch_sys_rankwise_precision[:, used_cutoff-1:used_cutoff]
return batch_sys_p_at_k
def torch_precision_at_ks(batch_sys_sorted_labels, ks=None, gpu=False):
''' Precision at ks
:param sys_sorted_labels: [batch_size, ranking_size] system's predicted ltr_adhoc of labels in a descending order
:param ks: cutoff values
:return: [batch_size, len(ks)]
'''
valid_max_cutoff = batch_sys_sorted_labels.size(1)
need_padding = True if valid_max_cutoff < max(ks) else False
used_ks = [k for k in ks if k <= valid_max_cutoff] if need_padding else ks
max_cutoff = max(used_ks)
inds = torch.from_numpy(np.asarray(used_ks) - 1)
batch_sys_sorted_labels = batch_sys_sorted_labels[:, 0:max_cutoff]
batch_bi_sys_sorted_labels = torch.clamp(batch_sys_sorted_labels, min=0, max=1) # binary
batch_sys_cumsum_reles = torch.cumsum(batch_bi_sys_sorted_labels, dim=1)
batch_ranks = (torch.arange(max_cutoff).type(torch.cuda.FloatTensor).expand_as(batch_sys_cumsum_reles) + 1.0) if gpu \
else (torch.arange(max_cutoff).expand_as(batch_sys_cumsum_reles) + 1.0)
batch_sys_rankwise_precision = batch_sys_cumsum_reles / batch_ranks
batch_sys_p_at_ks = batch_sys_rankwise_precision[:, inds]
if need_padding:
padded_p_at_ks = torch.zeros(batch_sys_sorted_labels.size(0), len(ks))
padded_p_at_ks[:, 0:len(used_ks)] = batch_sys_p_at_ks
return padded_p_at_ks
else:
return batch_sys_p_at_ks
""" Average Precision """
def torch_ap_at_k(batch_sys_sorted_labels, batch_ideal_sorted_labels, k=None, gpu=False):
'''
AP(average precision) at ks (i.e., different cutoff values)
:param ideal_sorted_labels: [batch_size, ranking_size] the ideal ltr_adhoc of labels
:param sys_sorted_labels: [batch_size, ranking_size] system's predicted ltr_adhoc of labels in a descending order
:param ks:
:return: [batch_size, len(ks)]
'''
max_cutoff = batch_sys_sorted_labels.size(1)
used_cutoff = min(max_cutoff, k)
batch_sys_sorted_labels = batch_sys_sorted_labels[:, 0:used_cutoff]
batch_bi_sys_sorted_labels = torch.clamp(batch_sys_sorted_labels, min=0, max=1) # binary
batch_sys_cumsum_reles = torch.cumsum(batch_bi_sys_sorted_labels, dim=1)
batch_ranks = (torch.arange(used_cutoff).type(torch.cuda.FloatTensor).expand_as(batch_sys_cumsum_reles) + 1.0) if gpu \
else (torch.arange(used_cutoff).expand_as(batch_sys_cumsum_reles) + 1.0)
batch_sys_rankwise_precision = batch_sys_cumsum_reles / batch_ranks # rank-wise precision
batch_sys_cumsum_precision = torch.cumsum(batch_sys_rankwise_precision * batch_bi_sys_sorted_labels, dim=1) # exclude precisions of which the corresponding documents are not relevant
batch_std_cumsum_reles = torch.cumsum(batch_ideal_sorted_labels, dim=1)
batch_sys_rankwise_ap = batch_sys_cumsum_precision / batch_std_cumsum_reles[:, 0:used_cutoff]
batch_sys_ap_at_k = batch_sys_rankwise_ap[:, used_cutoff-1:used_cutoff]
return batch_sys_ap_at_k
def torch_ap_at_ks(batch_sys_sorted_labels, batch_ideal_sorted_labels, ks=None, gpu=False):
'''
AP(average precision) at ks (i.e., different cutoff values)
:param ideal_sorted_labels: [batch_size, ranking_size] the ideal ltr_adhoc of labels
:param sys_sorted_labels: [batch_size, ranking_size] system's predicted ltr_adhoc of labels in a descending order
:param ks:
:return: [batch_size, len(ks)]
'''
valid_max_cutoff = batch_sys_sorted_labels.size(1)
need_padding = True if valid_max_cutoff < max(ks) else False
used_ks = [k for k in ks if k <= valid_max_cutoff] if need_padding else ks
max_cutoff = max(used_ks)
inds = torch.from_numpy(np.asarray(used_ks) - 1)
batch_sys_sorted_labels = batch_sys_sorted_labels[:, 0:max_cutoff]
batch_bi_sys_sorted_labels = torch.clamp(batch_sys_sorted_labels, min=0, max=1) # binary
batch_sys_cumsum_reles = torch.cumsum(batch_bi_sys_sorted_labels, dim=1)
batch_ranks = (torch.arange(max_cutoff).type(torch.cuda.FloatTensor).expand_as(batch_sys_cumsum_reles) + 1.0) if gpu \
else (torch.arange(max_cutoff).expand_as(batch_sys_cumsum_reles) + 1.0)
batch_sys_rankwise_precision = batch_sys_cumsum_reles / batch_ranks # rank-wise precision
batch_sys_cumsum_precision = torch.cumsum(batch_sys_rankwise_precision * batch_bi_sys_sorted_labels, dim=1) # exclude precisions of which the corresponding documents are not relevant
batch_std_cumsum_reles = torch.cumsum(batch_ideal_sorted_labels, dim=1)
batch_sys_rankwise_ap = batch_sys_cumsum_precision / batch_std_cumsum_reles[:, 0:max_cutoff]
batch_sys_ap_at_ks = batch_sys_rankwise_ap[:, inds]
if need_padding:
padded_ap_at_ks = torch.zeros(batch_sys_sorted_labels.size(0), len(ks))
padded_ap_at_ks[:, 0:len(used_ks)] = batch_sys_ap_at_ks
return padded_ap_at_ks
else:
return batch_sys_ap_at_ks
""" NERR """
def torch_rankwise_err(batch_sorted_labels, max_label=None, k=10, point=True, gpu=False):
assert batch_sorted_labels.size(1) >= k
assert max_label is not None # it is either query-level or corpus-level
batch_labels = batch_sorted_labels[:, 0:k]
batch_satis_probs = (torch.pow(2.0, batch_labels) - 1.0) / torch.pow(2.0, max_label)
batch_unsatis_probs = torch.ones_like(batch_labels) - batch_satis_probs
batch_cum_unsatis_probs = torch.cumprod(batch_unsatis_probs, dim=1)
batch_ranks = torch.arange(k).type(torch.cuda.FloatTensor).expand_as(batch_labels) + 1.0 if gpu \
else torch.arange(k).expand_as(batch_labels) + 1.0
batch_expt_ranks = 1.0 / batch_ranks
batch_cascad_unsatis_probs = torch.ones_like(batch_expt_ranks)
batch_cascad_unsatis_probs[:, 1:k] = batch_cum_unsatis_probs[:, 0:k-1]
batch_expt_satis_ranks = batch_expt_ranks * batch_satis_probs * batch_cascad_unsatis_probs # w.r.t. all rank positions
if point: # a specific position
batch_err_at_k = torch.sum(batch_expt_satis_ranks, dim=1, keepdim=True)
return batch_err_at_k
else:
batch_rankwise_err = torch.cumsum(batch_expt_satis_ranks, dim=1)
return batch_rankwise_err
def torch_nerr_at_k(batch_sys_sorted_labels, batch_ideal_sorted_labels, k=None, gpu=False, label_type=LABEL_TYPE.MultiLabel):
valid_max_cutoff = batch_sys_sorted_labels.size(1)
cutoff = min(valid_max_cutoff, k)
if LABEL_TYPE.MultiLabel == label_type:
max_label = torch.max(batch_ideal_sorted_labels)
batch_sys_err_at_k = torch_rankwise_err(batch_sys_sorted_labels, max_label=max_label, k=cutoff, point=True, gpu=gpu)
batch_ideal_err_at_k = torch_rankwise_err(batch_ideal_sorted_labels, max_label=max_label, k=cutoff, point=True, gpu=gpu)
batch_nerr_at_k = batch_sys_err_at_k / batch_ideal_err_at_k
return batch_nerr_at_k
else:
raise NotImplementedError
def torch_nerr_at_ks(batch_sys_sorted_labels, batch_ideal_sorted_labels, ks=None, gpu=False, label_type=LABEL_TYPE.MultiLabel):
'''
:param sys_sorted_labels: [batch_size, ranking_size] the standard labels sorted in descending order according to predicted relevance scores
:param ks:
:return: [batch_size, len(ks)]
'''
valid_max_cutoff = batch_sys_sorted_labels.size(1)
need_padding = True if valid_max_cutoff < max(ks) else False
used_ks = [k for k in ks if k <= valid_max_cutoff] if need_padding else ks
max_label = torch.max(batch_ideal_sorted_labels)
max_cutoff = max(used_ks)
inds = torch.from_numpy(np.asarray(used_ks) - 1)
if LABEL_TYPE.MultiLabel == label_type:
batch_sys_rankwise_err = torch_rankwise_err(batch_sys_sorted_labels, max_label=max_label, k=max_cutoff, point=False, gpu=gpu)
batch_ideal_rankwise_err = torch_rankwise_err(batch_ideal_sorted_labels, max_label=max_label, k=max_cutoff, point=False, gpu=gpu)
batch_rankwise_nerr = batch_sys_rankwise_err/batch_ideal_rankwise_err
batch_nerr_at_ks = batch_rankwise_nerr[:, inds]
if need_padding:
padded_nerr_at_ks = torch.zeros(batch_sys_sorted_labels.size(0), len(ks))
padded_nerr_at_ks[:, 0:len(used_ks)] = batch_nerr_at_ks
return padded_nerr_at_ks
else:
return batch_nerr_at_ks
else:
raise NotImplementedError
""" nDCG """
def torch_dcg_at_k(batch_sorted_labels, cutoff=None, label_type=LABEL_TYPE.MultiLabel, gpu=False):
'''
ICML-nDCG, which places stronger emphasis on retrieving relevant documents
:param batch_sorted_labels: [batch_size, ranking_size] a batch of ranked labels (either standard or predicted by a system)
:param cutoff: the cutoff position
:param label_type: either the case of multi-level relevance or the case of listwise int-value, e.g., MQ2007-list
:return: [batch_size, 1] cumulative gains for each rank position
'''
if cutoff is None: # using whole list
cutoff = batch_sorted_labels.size(1)
if LABEL_TYPE.MultiLabel == label_type: #the common case with multi-level labels
batch_numerators = torch.pow(2.0, batch_sorted_labels[:, 0:cutoff]) - 1.0
elif LABEL_TYPE.Permutation == label_type: # the case like listwise ltr_adhoc, where the relevance is labeled as (n-rank_position)
batch_numerators = batch_sorted_labels[:, 0:cutoff]
else:
raise NotImplementedError
batch_discounts = torch.log2(torch.arange(cutoff).type(torch.cuda.FloatTensor).expand_as(batch_numerators) + 2.0) if gpu \
else torch.log2(torch.arange(cutoff).expand_as(batch_numerators) + 2.0)
batch_dcg_at_k = torch.sum(batch_numerators/batch_discounts, dim=1, keepdim=True)
return batch_dcg_at_k
def torch_dcg_at_ks(batch_sorted_labels, max_cutoff, label_type=LABEL_TYPE.MultiLabel, gpu=False):
'''
:param batch_sorted_labels: [batch_size, ranking_size] ranked labels (either standard or predicted by a system)
:param max_cutoff: the maximum cutoff value
:param label_type: either the case of multi-level relevance or the case of listwise int-value, e.g., MQ2007-list
:return: [batch_size, max_cutoff] cumulative gains for each rank position
'''
if LABEL_TYPE.MultiLabel == label_type: # the common case with multi-level labels
batch_numerators = torch.pow(2.0, batch_sorted_labels[:, 0:max_cutoff]) - 1.0
elif LABEL_TYPE.Permutation == label_type: # the case like listwise ltr_adhoc, where the relevance is labeled as (n-rank_position)
batch_numerators = batch_sorted_labels[:, 0:max_cutoff]
else:
raise NotImplementedError
batch_discounts = torch.log2(torch.arange(max_cutoff).type(torch.cuda.FloatTensor).expand_as(batch_numerators) + 2.0) if gpu\
else torch.log2(torch.arange(max_cutoff).expand_as(batch_numerators) + 2.0)
batch_dcg_at_ks = torch.cumsum(batch_numerators/batch_discounts, dim=1) # dcg w.r.t. each position
return batch_dcg_at_ks
def torch_nDCG_at_k(batch_sys_sorted_labels, batch_ideal_sorted_labels, k=None, gpu=False, label_type=LABEL_TYPE.MultiLabel):
batch_sys_dcg_at_k = torch_dcg_at_k(batch_sys_sorted_labels, cutoff=k, label_type=label_type, gpu=gpu) # only using the cumulative gain at the final rank position
batch_ideal_dcg_at_k = torch_dcg_at_k(batch_ideal_sorted_labels, cutoff=k, label_type=label_type, gpu=gpu)
batch_ndcg_at_k = batch_sys_dcg_at_k / batch_ideal_dcg_at_k
return batch_ndcg_at_k
def torch_nDCG_at_ks(batch_sys_sorted_labels, batch_ideal_sorted_labels, ks=None, gpu=False, label_type=LABEL_TYPE.MultiLabel):
valid_max_cutoff = batch_sys_sorted_labels.size(1)
used_ks = [k for k in ks if k<=valid_max_cutoff] if valid_max_cutoff < max(ks) else ks
inds = torch.from_numpy(np.asarray(used_ks) - 1)
batch_sys_dcgs = torch_dcg_at_ks(batch_sys_sorted_labels, max_cutoff=max(used_ks), label_type=label_type, gpu=gpu)
batch_sys_dcg_at_ks = batch_sys_dcgs[:, inds] # get cumulative gains at specified rank positions
batch_ideal_dcgs = torch_dcg_at_ks(batch_ideal_sorted_labels, max_cutoff=max(used_ks), label_type=label_type, gpu=gpu)
batch_ideal_dcg_at_ks = batch_ideal_dcgs[:, inds]
batch_ndcg_at_ks = batch_sys_dcg_at_ks / batch_ideal_dcg_at_ks
if valid_max_cutoff < max(ks):
padded_ndcg_at_ks = torch.zeros(batch_sys_sorted_labels.size(0), len(ks))
padded_ndcg_at_ks[:, 0:len(used_ks)] = batch_ndcg_at_ks
return padded_ndcg_at_ks
else:
return batch_ndcg_at_ks
""" Kendall'tau Coefficient """
def torch_kendall_tau(sys_ranking, natural_ascending_as_reference = True):
'''
$\tau = 1.0 - \frac{2S(\pi, \delta)}{N(N-1)/2}$, cf. 2006-Automatic Evaluation of Information Ordering: Kendall’s Tau
The tie issue is not considered within this version.
The current implementation is just counting the inversion number, then normalized by n(n-1)/2. The underlying assumption is that the reference ltr_adhoc is the ideal ltr_adhoc, say labels are ordered in a descending order.
:param sys_ranking: system's ltr_adhoc, whose entries can be predicted values, labels, etc.
:return:
'''
assert 1 == len(sys_ranking.size()) # one-dimension vector
ranking_size = sys_ranking.size(0)
pair_diffs = sys_ranking.view(-1, 1) - sys_ranking.view(1, -1)
if natural_ascending_as_reference:
bi_pair_diffs = torch.clamp(pair_diffs, min=0, max=1)
bi_pair_diffs_triu1 = torch.triu(bi_pair_diffs, diagonal=1)
#print('bi_pair_diffs_triu1\n', bi_pair_diffs_triu1)
tau = 1.0 - 4 * torch.sum(bi_pair_diffs_triu1) / (ranking_size*(ranking_size-1))
else: # i.e., natural descending as the reference
bi_pair_diffs = torch.clamp(pair_diffs, min=-1, max=0)
bi_pair_diffs_triu1 = torch.triu(bi_pair_diffs, diagonal=1)
#print('bi_pair_diffs_triu1\n', bi_pair_diffs_triu1)
print('total discordant: ', 2*torch.sum(bi_pair_diffs_triu1))
tau = 1.0 + 4 * torch.sum(bi_pair_diffs_triu1) / (ranking_size*(ranking_size-1))
return tau
def rele_gain(rele_level, gain_base=2.0):
gain = np.power(gain_base, rele_level) - 1.0
return gain
def np_metric_at_ks(ranker=None, test_Qs=None, ks=[1, 5, 10], label_type=LABEL_TYPE.MultiLabel, max_rele_level=None, gpu=False, device=None):
'''
There is no check based on the assumption (say light_filtering() is called)
that each test instance Q includes at least k(k=max(ks)) documents, and at least one relevant document.
Or there will be errors.
'''
cnt = 0
sum_ndcg_at_ks = torch.zeros(len(ks))
sum_err_at_ks = torch.zeros(len(ks))
sum_ap_at_ks = torch.zeros(len(ks))
sum_p_at_ks = torch.zeros(len(ks))
list_ndcg_at_ks_per_q = []
list_err_at_ks_per_q = []
list_ap_at_ks_per_q = []
list_p_at_ks_per_q = []
for entry in test_Qs:
tor_test_ranking, tor_test_std_label_vec = entry[1], torch.squeeze(entry[2], dim=0) # remove the size 1 of dim=0 from loader itself
if gpu:
tor_rele_pred = ranker.predict(tor_test_ranking.to(device))
tor_rele_pred = torch.squeeze(tor_rele_pred)
tor_rele_pred = tor_rele_pred.cpu()
else:
tor_rele_pred = ranker.predict(tor_test_ranking)
tor_rele_pred = torch.squeeze(tor_rele_pred)
_, tor_sorted_inds = torch.sort(tor_rele_pred, descending=True)
sys_sorted_labels = tor_test_std_label_vec[tor_sorted_inds]
ideal_sorted_labels, _ = torch.sort(tor_test_std_label_vec, descending=True)
ndcg_at_ks_per_query = torch_nDCG_at_ks(sys_sorted_labels=sys_sorted_labels, ideal_sorted_labels=ideal_sorted_labels, ks=ks, label_type=label_type)
sum_ndcg_at_ks = torch.add(sum_ndcg_at_ks, ndcg_at_ks_per_query)
list_ndcg_at_ks_per_q.append(ndcg_at_ks_per_query.numpy())
err_at_ks_per_query = torch_nerr_at_ks(sys_sorted_labels, ideal_sorted_labels=ideal_sorted_labels, ks=ks, label_type=label_type)
sum_err_at_ks = torch.add(sum_err_at_ks, err_at_ks_per_query)
list_err_at_ks_per_q.append(err_at_ks_per_query.numpy())
ap_at_ks_per_query = torch_ap_at_ks(sys_sorted_labels=sys_sorted_labels, ideal_sorted_labels=ideal_sorted_labels, ks=ks)
sum_ap_at_ks = torch.add(sum_ap_at_ks, ap_at_ks_per_query)
list_ap_at_ks_per_q.append(ap_at_ks_per_query.numpy())
p_at_ks_per_query = torch_precision_at_ks(sys_sorted_labels=sys_sorted_labels, ks=ks)
sum_p_at_ks = torch.add(sum_p_at_ks, p_at_ks_per_query)
list_p_at_ks_per_q.append(p_at_ks_per_query.numpy())
cnt += 1
ndcg_at_ks = sum_ndcg_at_ks/cnt
err_at_ks = sum_err_at_ks/cnt
ap_at_ks = sum_ap_at_ks / cnt
p_at_ks = sum_p_at_ks/cnt
return ndcg_at_ks.numpy(), err_at_ks.numpy(), ap_at_ks.numpy(), p_at_ks.numpy(), list_ndcg_at_ks_per_q, list_err_at_ks_per_q, list_ap_at_ks_per_q, list_p_at_ks_per_q
def np_stable_softmax_e(histogram):
histogram = np.asarray(histogram, dtype=np.float64)
max_v, _ = np.max(histogram, dim=0) # a transformation aiming for higher stability when computing softmax() with exp()
hist = histogram - max_v
hist_exped = np.exp(hist)
probs = np.divide(hist_exped, np.sum(hist_exped, dim=0))
return probs
def eval_cost_mat_group(sorted_std_labels, group_div_cost=np.e, margin_to_non_rele=100.0, rele_gain_base=4.0):
size_ranking = len(sorted_std_labels)
cost_mat = np.zeros(shape=(size_ranking, size_ranking), dtype=np.float64)
for i in range(size_ranking):
i_rele_level = sorted_std_labels[i]
for j in range(size_ranking):
if i==j:
cost_mat[i, j] = 0
else:
j_rele_level = sorted_std_labels[j]
if i_rele_level == j_rele_level:
cost_mat[i, j] = group_div_cost
else:
cost_mat[i, j] = np.abs(rele_gain(i_rele_level, gain_base=rele_gain_base) - rele_gain(j_rele_level, gain_base=rele_gain_base))
if 0 == i_rele_level or 0 == j_rele_level:
cost_mat[i, j] += margin_to_non_rele
return cost_mat
| [
"[email protected]"
] | |
dc94995061a88c795f93deb5719820a9c7d233f6 | 9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612 | /exercises/1901020002/1001S02E05_string.py | 2ef785d1bf8d2423628b42ff569e92038180dac4 | [] | no_license | shen-huang/selfteaching-python-camp | e8410bfc06eca24ee2866c5d890fd063e9d4be89 | 459f90c9f09bd3a3df9e776fc64dfd64ac65f976 | refs/heads/master | 2022-05-02T05:39:08.932008 | 2022-03-17T07:56:30 | 2022-03-17T07:56:30 | 201,287,222 | 9 | 6 | null | 2019-08-08T15:34:26 | 2019-08-08T15:34:25 | null | UTF-8 | Python | false | false | 1,395 | py | sample_text = '''
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambxiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
'''
#1.2 better替换worse
test = sample_text.replace('better','worse')
print('better全部替换成worse',test)
#1.3 剔除包含ea的单词
words = test.split()
filtered = []
for word in words:
if word.find('ea') < 0:
filtered.append(word)
print('剔除包含ea的单词',filtered)
#1.4 大小写翻转
swapcased = [i.swapcase() for i in filtered]
print('大小写翻转',swapcased)
#1.5 升序排列
print('升序排列',sorted(swapcased))
print('降序',sorted(swapcased,reverse=True)) | [
"[email protected]"
] | |
945755e73c4c8fe1438bc352cd5a0861918ad25a | c14d9512c62fc479ba05ea5ed256828e8e1038c5 | /stripe/models/account.py | eaecab1ba97d6ff6408961f48b09a5193aa3c01d | [
"MIT"
] | permissive | jayvdb/saaskit-stripe | c44e6e387d4dd27f564f6959c134ec6aaff8f3c5 | bd292182b0bed47dff86a627231bdabafb99bf71 | refs/heads/master | 2021-09-07T17:25:14.710472 | 2018-01-24T15:17:41 | 2018-02-26T21:10:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,082 | py | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import json
from ..utils import UnixDateTimeField
from .charge import CURRENCY_CHOICES
ACCOUNT_TYPES = (
('custom', _('Custom')),
('standard', _('Standard')),
)
class Account(models.Model):
"""Stripe Account object.
This is an object representing your Stripe account. You can retrieve it to
see properties on the account like its current e-mail address or if the
account is enabled yet to make live charges.
Some properties, marked as 'managed accounts only', are only available to
platforms who want to create and manage Stripe accounts.
"""
id = models.CharField(max_length=255, primary_key=True)
charges_enabled = models.BooleanField(
help_text=_(
'Whether or not the account can create live charges',
),
)
country = models.CharField( # todo: add CHOICES
max_length=255,
help_text=_('The country of the account')
)
currencies_supports = json.JSONField(
help_text=_(
'The currencies this account can submit when creating charges',
),
)
default_currency = models.CharField(
max_length=255, help_text=_(
'The currency this account has chosen to use as the default'),
choices=CURRENCY_CHOICES)
details_submitted = models.BooleanField(
help_text=_(
'Whether or not account details have been submitted yet. '
'Standalone accounts cannot receive transfers before this is true.',
),
)
transfers_enabled = models.BooleanField(
help_text=_(
'Whether or not Stripe will send automatic transfers for this '
'account. This is only false when Stripe is waiting for '
'additional information from the account holder.',
),
default=True,
)
display_name = models.CharField(
max_length=255,
help_text=_(
'The display name for this account. This is used on the Stripe '
'dashboard to help you differentiate between accounts.',
),
)
email = models.EmailField(help_text=_('The primary user’s email address'))
statement_descriptor = models.TextField(
help_text=_(
'The text that will appear on credit card statements',
),
)
timezone = models.CharField(
max_length=255,
help_text=_(
'The timezone used in the Stripe dashboard for this account. A '
'list of possible timezone values is maintained at the IANA '
'Timezone Database.',
),
)
business_name = models.CharField(
max_length=255,
help_text=_(
'The publicly visible name of the business',
),
)
business_logo = models.CharField(max_length=255, null=True)
business_url = models.URLField(
help_text=_('The publicly visible website of the business'),
null=True,
)
created = UnixDateTimeField()
metadata = json.JSONField(
help_text=_(
'A set of key/value pairs that you can attach to a charge object. '
'it can be useful for storing additional information about the '
'charge in a structured format.',
),
)
support_email = models.EmailField(null=True)
support_phone = models.CharField(
max_length=255,
help_text=_(
'The publicly visible support phone number for the business',
),
null=True,
)
payout_schedule = json.JSONField(null=True)
payout_statement_descriptor = models.CharField(max_length=255, null=True)
payouts_enabled = models.BooleanField()
bank_accounts = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'Bank accounts currently attached to this account.',
),
)
debit_negative_balances = models.BooleanField(
help_text=_(
'(Managed Accounts Only) '
'Whether or not Stripe will attempt to reclaim negative account '
'balances from this account’s bank account.',
),
)
decline_charge_on = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'Account-level settings to automatically decline certain types of '
'charges regardless of the bank’s decision.',
),
)
legal_entity = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'Information regarding the owner of this account, including '
'verification status.',
),
)
product_description = models.TextField(
help_text=_(
'(Managed Accounts Only) '
'An internal-only description of the product or service provided. '
'This is used by Stripe in the event the account gets flagged for '
'potential fraud.',
),
null=True,
)
tos_acceptance = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'Who accepted the Stripe terms of service, and when they accepted '
'it.',
),
)
transfer_schedule = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'When payments collected will be automatically paid out to the '
'account holder’s bank account',
),
)
type = models.CharField(max_length=255, choices=ACCOUNT_TYPES)
verification = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'That state of the account’s information requests, including what '
'information is needed and by when it must be provided.',
),
)
@classmethod
def from_stripe_object(cls, stripe_object):
_dict = stripe_object.to_dict()
_dict.pop('object')
_dict.pop('external_accounts') # todo: handle this
a = cls(**_dict)
a.save()
return a
| [
"[email protected]"
] | |
58a513a543317eae70adb8d81445905b24667182 | feccf7588777becba68921c0bfade3e21f5210ce | /tests/www/views/test_views_base.py | 10eb3d5ea4b1ed3d2c841e64340bc37eb739d51a | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | francescomucio/airflow | f17ed9abd8b41d8a2227deca052508edf12f1cbf | c199b1a10563a11cf24436e38cb167ae82c01601 | refs/heads/master | 2023-04-14T17:44:53.438246 | 2023-04-06T06:44:23 | 2023-04-06T06:44:23 | 217,327,641 | 0 | 0 | Apache-2.0 | 2020-09-09T13:26:47 | 2019-10-24T15:06:52 | Python | UTF-8 | Python | false | false | 14,809 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
import json
import pytest
from airflow.jobs.base_job import BaseJob
from airflow.utils import timezone
from airflow.utils.session import create_session
from airflow.www import app as application
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.config import conf_vars
from tests.test_utils.www import check_content_in_response, check_content_not_in_response
def test_index_redirect(admin_client):
resp = admin_client.get("/")
assert resp.status_code == 302
assert "/home" in resp.headers.get("Location")
resp = admin_client.get("/", follow_redirects=True)
check_content_in_response("DAGs", resp)
def test_homepage_query_count(admin_client):
with assert_queries_count(17):
resp = admin_client.get("/home")
check_content_in_response("DAGs", resp)
def test_doc_urls(admin_client, monkeypatch):
# Mocking this way is tying the test closer to the implementation much more than I'd like. :shrug:
from airflow.www.views import AirflowBaseView
monkeypatch.setitem(AirflowBaseView.extra_args, "get_docs_url", lambda _: "!!DOCS_URL!!")
resp = admin_client.get("/", follow_redirects=True)
check_content_in_response("!!DOCS_URL!!", resp)
check_content_in_response("/api/v1/ui", resp)
@pytest.fixture()
def heartbeat_healthy():
# case-1: healthy scheduler status
last_heartbeat = timezone.utcnow()
job = BaseJob(
job_type="SchedulerJob",
state="running",
latest_heartbeat=last_heartbeat,
)
with create_session() as session:
session.add(job)
yield "healthy", last_heartbeat.isoformat()
with create_session() as session:
session.query(BaseJob).filter(
BaseJob.job_type == "SchedulerJob",
BaseJob.state == "running",
BaseJob.latest_heartbeat == last_heartbeat,
).delete()
@pytest.fixture()
def heartbeat_too_slow():
# case-2: unhealthy scheduler status - scenario 1 (SchedulerJob is running too slowly)
last_heartbeat = timezone.utcnow() - datetime.timedelta(minutes=1)
job = BaseJob(
job_type="SchedulerJob",
state="running",
latest_heartbeat=last_heartbeat,
)
with create_session() as session:
session.query(BaseJob).filter(
BaseJob.job_type == "SchedulerJob",
).update({"latest_heartbeat": last_heartbeat - datetime.timedelta(seconds=1)})
session.add(job)
yield "unhealthy", last_heartbeat.isoformat()
with create_session() as session:
session.query(BaseJob).filter(
BaseJob.job_type == "SchedulerJob",
BaseJob.state == "running",
BaseJob.latest_heartbeat == last_heartbeat,
).delete()
@pytest.fixture()
def heartbeat_not_running():
# case-3: unhealthy scheduler status - scenario 2 (no running SchedulerJob)
with create_session() as session:
session.query(BaseJob).filter(
BaseJob.job_type == "SchedulerJob",
BaseJob.state == "running",
).delete()
yield "unhealthy", None
@pytest.mark.parametrize(
"heartbeat",
["heartbeat_healthy", "heartbeat_too_slow", "heartbeat_not_running"],
)
def test_health(request, admin_client, heartbeat):
# Load the corresponding fixture by name.
scheduler_status, last_scheduler_heartbeat = request.getfixturevalue(heartbeat)
resp = admin_client.get("health", follow_redirects=True)
resp_json = json.loads(resp.data.decode("utf-8"))
assert "healthy" == resp_json["metadatabase"]["status"]
assert scheduler_status == resp_json["scheduler"]["status"]
assert last_scheduler_heartbeat == resp_json["scheduler"]["latest_scheduler_heartbeat"]
def test_users_list(admin_client):
resp = admin_client.get("users/list", follow_redirects=True)
check_content_in_response("List Users", resp)
@pytest.mark.parametrize(
"path, body_content",
[("roles/list", "List Roles"), ("roles/show/1", "Show Role")],
)
def test_roles_read(admin_client, path, body_content):
resp = admin_client.get(path, follow_redirects=True)
check_content_in_response(body_content, resp)
def test_roles_read_unauthorized(viewer_client):
resp = viewer_client.get("roles/list", follow_redirects=True)
check_content_in_response("Access is Denied", resp)
@pytest.fixture(scope="module")
def delete_role_if_exists(app):
def func(role_name):
if app.appbuilder.sm.find_role(role_name):
app.appbuilder.sm.delete_role(role_name)
return func
@pytest.fixture()
def non_exist_role_name(delete_role_if_exists):
role_name = "test_roles_create_role"
delete_role_if_exists(role_name)
yield role_name
delete_role_if_exists(role_name)
@pytest.fixture()
def exist_role_name(app, delete_role_if_exists):
role_name = "test_roles_create_role_new"
app.appbuilder.sm.add_role(role_name)
yield role_name
delete_role_if_exists(role_name)
@pytest.fixture()
def exist_role(app, exist_role_name):
return app.appbuilder.sm.find_role(exist_role_name)
def test_roles_create(app, admin_client, non_exist_role_name):
admin_client.post("roles/add", data={"name": non_exist_role_name}, follow_redirects=True)
assert app.appbuilder.sm.find_role(non_exist_role_name) is not None
def test_roles_create_unauthorized(app, viewer_client, non_exist_role_name):
resp = viewer_client.post("roles/add", data={"name": non_exist_role_name}, follow_redirects=True)
check_content_in_response("Access is Denied", resp)
assert app.appbuilder.sm.find_role(non_exist_role_name) is None
def test_roles_edit(app, admin_client, non_exist_role_name, exist_role):
admin_client.post(
f"roles/edit/{exist_role.id}", data={"name": non_exist_role_name}, follow_redirects=True
)
updated_role = app.appbuilder.sm.find_role(non_exist_role_name)
assert exist_role.id == updated_role.id
def test_roles_edit_unauthorized(app, viewer_client, non_exist_role_name, exist_role_name, exist_role):
resp = viewer_client.post(
f"roles/edit/{exist_role.id}", data={"name": non_exist_role_name}, follow_redirects=True
)
check_content_in_response("Access is Denied", resp)
assert app.appbuilder.sm.find_role(exist_role_name)
assert app.appbuilder.sm.find_role(non_exist_role_name) is None
def test_roles_delete(app, admin_client, exist_role_name, exist_role):
admin_client.post(f"roles/delete/{exist_role.id}", follow_redirects=True)
assert app.appbuilder.sm.find_role(exist_role_name) is None
def test_roles_delete_unauthorized(app, viewer_client, exist_role, exist_role_name):
resp = viewer_client.post(f"roles/delete/{exist_role.id}", follow_redirects=True)
check_content_in_response("Access is Denied", resp)
assert app.appbuilder.sm.find_role(exist_role_name)
@pytest.mark.parametrize(
"url, client, content",
[
("userstatschartview/chart/", "admin_client", "User Statistics"),
("userstatschartview/chart/", "viewer_client", "Access is Denied"),
("actions/list", "admin_client", "List Actions"),
("actions/list", "viewer_client", "Access is Denied"),
("resources/list/", "admin_client", "List Resources"),
("resources/list/", "viewer_client", "Access is Denied"),
("permissions/list/", "admin_client", "List Permissions"),
("permissions/list/", "viewer_client", "Access is Denied"),
("resetpassword/form?pk=1", "admin_client", "Reset Password Form"),
("resetpassword/form?pk=1", "viewer_client", "Access is Denied"),
("users/list", "admin_client", "List Users"),
("users/list", "viewer_client", "Access is Denied"),
],
ids=[
"userstatschertview-admin",
"userstatschertview-viewer",
"actions-admin",
"actions-viewer",
"resources-admin",
"resources-viewer",
"permissions-admin",
"permissions-viewer",
"resetpassword-admin",
"resetpassword-viewer",
"users-admin",
"users-viewer",
],
)
def test_views_get(request, url, client, content):
resp = request.getfixturevalue(client).get(url, follow_redirects=True)
check_content_in_response(content, resp)
def _check_task_stats_json(resp):
return set(list(resp.json.items())[0][1][0].keys()) == {"state", "count"}
@pytest.mark.parametrize(
"url, check_response",
[
("blocked", None),
("dag_stats", None),
("task_stats", _check_task_stats_json),
],
)
def test_views_post(admin_client, url, check_response):
resp = admin_client.post(url, follow_redirects=True)
assert resp.status_code == 200
if check_response:
assert check_response(resp)
@pytest.mark.parametrize(
"url, client, content, username",
[
("resetmypassword/form", "viewer_client", "Password Changed", "test_viewer"),
("resetpassword/form?pk={}", "admin_client", "Password Changed", "test_admin"),
("resetpassword/form?pk={}", "viewer_client", "Access is Denied", "test_viewer"),
],
ids=["my-viewer", "pk-admin", "pk-viewer"],
)
def test_resetmypasswordview_edit(app, request, url, client, content, username):
user = app.appbuilder.sm.find_user(username)
resp = request.getfixturevalue(client).post(
url.format(user.id), data={"password": "blah", "conf_password": "blah"}, follow_redirects=True
)
check_content_in_response(content, resp)
def test_resetmypasswordview_read(viewer_client):
# Tests with viewer as all roles should have access.
resp = viewer_client.get("resetmypassword/form", follow_redirects=True)
check_content_in_response("Reset Password Form", resp)
def test_get_myuserinfo(admin_client):
resp = admin_client.get("users/userinfo/", follow_redirects=True)
check_content_in_response("Your user information", resp)
def test_edit_myuserinfo(admin_client):
resp = admin_client.post(
"userinfoeditview/form",
data={"first_name": "new_first_name", "last_name": "new_last_name"},
follow_redirects=True,
)
check_content_in_response("User information changed", resp)
@pytest.mark.parametrize(
"url",
["users/add", "users/edit/1", "users/delete/1"],
ids=["add-user", "edit-user", "delete-user"],
)
def test_views_post_access_denied(viewer_client, url):
resp = viewer_client.get(url, follow_redirects=True)
check_content_in_response("Access is Denied", resp)
@pytest.fixture()
def non_exist_username(app):
username = "fake_username"
user = app.appbuilder.sm.find_user(username)
if user is not None:
app.appbuilder.sm.del_register_user(user)
yield username
user = app.appbuilder.sm.find_user(username)
if user is not None:
app.appbuilder.sm.del_register_user(user)
def test_create_user(app, admin_client, non_exist_username):
resp = admin_client.post(
"users/add",
data={
"first_name": "fake_first_name",
"last_name": "fake_last_name",
"username": non_exist_username,
"email": "[email protected]",
"roles": [1],
"password": "test",
"conf_password": "test",
},
follow_redirects=True,
)
check_content_in_response("Added Row", resp)
assert app.appbuilder.sm.find_user(non_exist_username)
@pytest.fixture()
def exist_username(app, exist_role):
username = "test_edit_user_user"
app.appbuilder.sm.add_user(
username,
"first_name",
"last_name",
"[email protected]",
exist_role,
password="password",
)
yield username
if app.appbuilder.sm.find_user(username):
app.appbuilder.sm.del_register_user(username)
def test_edit_user(app, admin_client, exist_username):
user = app.appbuilder.sm.find_user(exist_username)
resp = admin_client.post(
f"users/edit/{user.id}",
data={"first_name": "new_first_name"},
follow_redirects=True,
)
check_content_in_response("new_first_name", resp)
def test_delete_user(app, admin_client, exist_username):
user = app.appbuilder.sm.find_user(exist_username)
resp = admin_client.post(
f"users/delete/{user.id}",
follow_redirects=True,
)
check_content_in_response("Deleted Row", resp)
@conf_vars({("webserver", "show_recent_stats_for_completed_runs"): "False"})
def test_task_stats_only_noncompleted(admin_client):
resp = admin_client.post("task_stats", follow_redirects=True)
assert resp.status_code == 200
@conf_vars({("webserver", "instance_name"): "Site Title Test"})
def test_page_instance_name(admin_client):
resp = admin_client.get("home", follow_redirects=True)
check_content_in_response("Site Title Test", resp)
def test_page_instance_name_xss_prevention(admin_client):
xss_string = "<script>alert('Give me your credit card number')</script>"
with conf_vars({("webserver", "instance_name"): xss_string}):
resp = admin_client.get("home", follow_redirects=True)
escaped_xss_string = "<script>alert('Give me your credit card number')</script>"
check_content_in_response(escaped_xss_string, resp)
check_content_not_in_response(xss_string, resp)
instance_name_with_markup_conf = {
("webserver", "instance_name"): "<b>Bold Site Title Test</b>",
("webserver", "instance_name_has_markup"): "True",
}
@conf_vars(instance_name_with_markup_conf)
def test_page_instance_name_with_markup(admin_client):
resp = admin_client.get("home", follow_redirects=True)
check_content_in_response("<b>Bold Site Title Test</b>", resp)
check_content_not_in_response("<b>Bold Site Title Test</b>", resp)
@conf_vars(instance_name_with_markup_conf)
def test_page_instance_name_with_markup_title():
appbuilder = application.create_app(testing=True).appbuilder
assert appbuilder.app_name == "Bold Site Title Test"
| [
"[email protected]"
] | |
034d42940af343c1638afe358b2506823e840bf4 | 1be4f95b722397f255e58b21a182171eb24b6fe5 | /datalad_neuroimaging/extractors/tests/test_dicom.py | 338f2fa4c994f2dd11ced3bf44f4f0f768516770 | [
"MIT"
] | permissive | yarikoptic/datalad-neuroimaging | 5f9a7b0993ac56bbeaba95427541b2c75ed711ea | 7ee146d6c7c864aafc8b540d0ccd9b3a1b5b7210 | refs/heads/master | 2022-11-11T02:57:46.228562 | 2018-04-10T14:05:21 | 2018-04-10T14:05:21 | 128,942,708 | 0 | 0 | null | 2018-04-10T14:04:46 | 2018-04-10T14:04:46 | null | UTF-8 | Python | false | false | 3,032 | py | # emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil; coding: utf-8 -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test audio extractor"""
from datalad.tests.utils import SkipTest
try:
from datalad_neuroimaging.extractors.dicom import MetadataExtractor as DicomExtractor
except ImportError:
raise SkipTest
from shutil import copy
from os.path import dirname
from os.path import join as opj
from datalad.api import Dataset
from datalad.tests.utils import with_tempfile
from datalad.tests.utils import ok_clean_git
from datalad.tests.utils import assert_status
from datalad.tests.utils import assert_result_count
from datalad.tests.utils import eq_
from datalad.tests.utils import assert_dict_equal
from datalad.tests.utils import assert_in
from datalad.tests.utils import assert_not_in
@with_tempfile(mkdir=True)
def test_dicom(path):
ds = Dataset(path).create()
ds.config.add('datalad.metadata.nativetype', 'dicom', where='dataset')
copy(
opj(dirname(dirname(dirname(__file__))), 'tests', 'data', 'dicom.dcm'),
path)
ds.add('.')
ok_clean_git(ds.path)
res = ds.aggregate_metadata()
assert_status('ok', res)
# query for the file metadata
res = ds.metadata('dicom.dcm')
assert_result_count(res, 1)
# from this extractor
meta = res[0]['metadata']['dicom']
assert_in('@context', meta)
# no point in testing ALL keys, but we got plenty
assert(len(meta.keys()) > 70)
eq_(meta['SeriesDate'], '20070205')
# now ask for the dataset metadata, which should have both the unique props
# and a list of imageseries (one in this case, but a list)
res = ds.metadata(reporton='datasets')
assert_result_count(res, 1)
dsmeta = res[0]['metadata']['dicom']
# same context
assert_dict_equal(meta['@context'], dsmeta['@context'])
meta.pop('@context')
eq_(dsmeta['Series'], [meta])
# for this artificial case pretty much the same info also comes out as
# unique props, but wrapped in lists
ucp = res[0]['metadata']["datalad_unique_content_properties"]['dicom']
assert_dict_equal(
{k: [v]
for k, v in dsmeta['Series'][0].items()
if k not in DicomExtractor._unique_exclude and k in ucp},
{k: v
for k, v in ucp.items()
if k not in DicomExtractor._unique_exclude})
# buuuut, if we switch of file-based metadata storage
ds.config.add('datalad.metadata.aggregate-content-dicom', 'false', where='dataset')
ds.aggregate_metadata()
res = ds.metadata(reporton='datasets')
# the auto-uniquified bits are gone but the Series description stays
assert_not_in("datalad_unique_content_properties", res[0]['metadata'])
eq_(dsmeta['Series'], [meta])
| [
"[email protected]"
] | |
a2eb7128900a56f43e0ece19dedc06e35f192da8 | c2d3b7855b055cb8b0563a3812fb0dbfc670bc09 | /lessons_src/03_CFL_Condition.py | 7ecf7fe0fd959b7819bcdc7829ed929d41253a87 | [
"CC-BY-3.0",
"BSD-3-Clause",
"CC-BY-4.0"
] | permissive | tnakaicode/python-cfd | 85fab343c4c99f32777e45163b89f4d952d83e96 | 174176bdcb1c31e021fefd8fd54e2b3dd898dc62 | refs/heads/master | 2023-08-08T16:53:34.455088 | 2020-05-07T17:14:54 | 2020-05-07T17:14:54 | 261,902,096 | 0 | 0 | NOASSERTION | 2023-07-06T21:27:39 | 2020-05-06T23:30:09 | Jupyter Notebook | UTF-8 | Python | false | false | 5,978 | py | #!/usr/bin/env python
# coding: utf-8
# Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved BSD-3 license. (c) Lorena A. Barba, Gilbert F. Forsyth 2017. Thanks to NSF for support via CAREER award #1149784.
# [@LorenaABarba](https://twitter.com/LorenaABarba)
# 12 steps to Navier–Stokes
# =====
# ***
# Did you experiment in Steps [1](./01_Step_1.ipynb) and [2](./02_Step_2.ipynb) using different parameter choices? If you did, you probably ran into some unexpected behavior. Did your solution ever blow up? (In my experience, CFD students *love* to make things blow up.)
#
# You are probably wondering why changing the discretization parameters affects your solution in such a drastic way. This notebook complements our [interactive CFD lessons](https://github.com/barbagroup/CFDPython) by discussing the CFL condition. And learn more by watching Prof. Barba's YouTube lectures (links below).
# Convergence and the CFL Condition
# ----
# ***
# For the first few steps, we've been using the same general initial and boundary conditions. With the parameters we initially suggested, the grid has 41 points and the timestep is 0.25 seconds. Now, we're going to experiment with increasing the size of our grid. The code below is identical to the code we used in [Step 1](./01_Step_1.ipynb), but here it has been bundled up in a function so that we can easily examine what happens as we adjust just one variable: **the grid size**.
# In[1]:
import numpy # numpy is a library for array operations akin to MATLAB
from matplotlib import pyplot # matplotlib is 2D plotting library
# get_ipython().run_line_magic('matplotlib', 'inline')
def linearconv(nx):
dx = 2 / (nx - 1)
nt = 20 # nt is the number of timesteps we want to calculate
dt = .025 # dt is the amount of time each timestep covers (delta t)
c = 1
# defining a numpy array which is nx elements long with every value equal to 1.
u = numpy.ones(nx)
# setting u = 2 between 0.5 and 1 as per our I.C.s
u[int(.5 / dx):int(1 / dx + 1)] = 2
# initializing our placeholder array, un, to hold the values we calculate for the n+1 timestep
un = numpy.ones(nx)
for n in range(nt): # iterate through time
un = u.copy() # copy the existing values of u into un
for i in range(1, nx):
u[i] = un[i] - c * dt / dx * (un[i] - un[i - 1])
pyplot.plot(numpy.linspace(0, 2, nx), u)
pyplot.show()
# Now let's examine the results of our linear convection problem with an increasingly fine mesh.
# In[2]:
linearconv(41) # convection using 41 grid points
# This is the same result as our Step 1 calculation, reproduced here for reference.
# In[3]:
linearconv(61)
# Here, there is still numerical diffusion present, but it is less severe.
# In[4]:
linearconv(71)
# Here the same pattern is present -- the wave is more square than in the previous runs.
# In[5]:
linearconv(85)
# This doesn't look anything like our original hat function.
# ### What happened?
# To answer that question, we have to think a little bit about what we're actually implementing in code.
#
# In each iteration of our time loop, we use the existing data about our wave to estimate the speed of the wave in the subsequent time step. Initially, the increase in the number of grid points returned more accurate answers. There was less numerical diffusion and the square wave looked much more like a square wave than it did in our first example.
#
# Each iteration of our time loop covers a time-step of length $\Delta t$, which we have been defining as 0.025
#
# During this iteration, we evaluate the speed of the wave at each of the $x$ points we've created. In the last plot, something has clearly gone wrong.
#
# What has happened is that over the time period $\Delta t$, the wave is travelling a distance which is greater than `dx`. The length `dx` of each grid box is related to the number of total points `nx`, so stability can be enforced if the $\Delta t$ step size is calculated with respect to the size of `dx`.
#
# $$\sigma = \frac{u \Delta t}{\Delta x} \leq \sigma_{\max}$$
#
# where $u$ is the speed of the wave; $\sigma$ is called the **Courant number** and the value of $\sigma_{\max}$ that will ensure stability depends on the discretization used.
#
# In a new version of our code, we'll use the CFL number to calculate the appropriate time-step `dt` depending on the size of `dx`.
#
#
# In[6]:
import numpy
from matplotlib import pyplot
def linearconv(nx):
dx = 2 / (nx - 1)
nt = 20 # nt is the number of timesteps we want to calculate
c = 1
sigma = .5
dt = sigma * dx
u = numpy.ones(nx)
u[int(.5 / dx):int(1 / dx + 1)] = 2
un = numpy.ones(nx)
for n in range(nt): # iterate through time
un = u.copy() # copy the existing values of u into un
for i in range(1, nx):
u[i] = un[i] - c * dt / dx * (un[i] - un[i - 1])
pyplot.plot(numpy.linspace(0, 2, nx), u)
# In[7]:
linearconv(41)
# In[8]:
linearconv(61)
# In[9]:
linearconv(81)
# In[10]:
linearconv(101)
# In[11]:
linearconv(121)
# Notice that as the number of points `nx` increases, the wave convects a shorter and shorter distance. The number of time iterations we have advanced the solution at is held constant at `nt = 20`, but depending on the value of `nx` and the corresponding values of `dx` and `dt`, a shorter time window is being examined overall.
# Learn More
# -----
# ***
# It's possible to do rigurous analysis of the stability of numerical schemes, in some cases. Watch Prof. Barba's presentation of this topic in **Video Lecture 9** on You Tube.
# In[12]:
from IPython.display import YouTubeVideo
YouTubeVideo('Yw1YPBupZxU')
# In[13]:
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
| [
"[email protected]"
] | |
16d01ee4642643a3fa9a06a6f2fb3e7d14bc6433 | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/jupyter_client/jsonutil.py | 9903f70ecee4d8e753d94367e32ed64f5e0d57aa | [
"MIT"
] | permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 5,944 | py | """Utilities to manipulate JSON objects."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import math
import numbers
import re
import types
import warnings
from binascii import b2a_base64
from collections.abc import Iterable
from datetime import datetime
from typing import Optional
from typing import Union
from dateutil.parser import parse as _dateutil_parse # type: ignore
from dateutil.tz import tzlocal # type: ignore
next_attr_name = "__next__" # Not sure what downstream library uses this, but left it to be safe
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
# timestamp formats
ISO8601 = "%Y-%m-%dT%H:%M:%S.%f"
ISO8601_PAT = re.compile(
r"^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})(\.\d{1,6})?(Z|([\+\-]\d{2}:?\d{2}))?$"
)
# holy crap, strptime is not threadsafe.
# Calling it once at import seems to help.
datetime.strptime("1", "%d")
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
def _ensure_tzinfo(dt: datetime) -> datetime:
"""Ensure a datetime object has tzinfo
If no tzinfo is present, add tzlocal
"""
if not dt.tzinfo:
# No more naïve datetime objects!
warnings.warn(
"Interpreting naive datetime as local %s. Please add timezone info to timestamps." % dt,
DeprecationWarning,
stacklevel=4,
)
dt = dt.replace(tzinfo=tzlocal())
return dt
def parse_date(s: Optional[str]) -> Optional[Union[str, datetime]]:
"""parse an ISO8601 date string
If it is None or not a valid ISO8601 timestamp,
it will be returned unmodified.
Otherwise, it will return a datetime object.
"""
if s is None:
return s
m = ISO8601_PAT.match(s)
if m:
dt = _dateutil_parse(s)
return _ensure_tzinfo(dt)
return s
def extract_dates(obj):
"""extract ISO8601 dates from unpacked JSON"""
if isinstance(obj, dict):
new_obj = {} # don't clobber
for k, v in obj.items():
new_obj[k] = extract_dates(v)
obj = new_obj
elif isinstance(obj, (list, tuple)):
obj = [extract_dates(o) for o in obj]
elif isinstance(obj, str):
obj = parse_date(obj)
return obj
def squash_dates(obj):
"""squash datetime objects into ISO8601 strings"""
if isinstance(obj, dict):
obj = dict(obj) # don't clobber
for k, v in obj.items():
obj[k] = squash_dates(v)
elif isinstance(obj, (list, tuple)):
obj = [squash_dates(o) for o in obj]
elif isinstance(obj, datetime):
obj = obj.isoformat()
return obj
def date_default(obj):
"""DEPRECATED: Use jupyter_client.jsonutil.json_default"""
warnings.warn(
"date_default is deprecated since jupyter_client 7.0.0."
" Use jupyter_client.jsonutil.json_default.",
stacklevel=2,
)
return json_default(obj)
def json_default(obj):
"""default function for packing objects in JSON."""
if isinstance(obj, datetime):
obj = _ensure_tzinfo(obj)
return obj.isoformat().replace('+00:00', 'Z')
if isinstance(obj, bytes):
return b2a_base64(obj).decode('ascii')
if isinstance(obj, Iterable):
return list(obj)
if isinstance(obj, numbers.Integral):
return int(obj)
if isinstance(obj, numbers.Real):
return float(obj)
raise TypeError("%r is not JSON serializable" % obj)
# Copy of the old ipykernel's json_clean
# This is temporary, it should be removed when we deprecate support for
# non-valid JSON messages
def json_clean(obj):
# types that are 'atomic' and ok in json as-is.
atomic_ok = (str, type(None))
# containers that we need to convert into lists
container_to_list = (tuple, set, types.GeneratorType)
# Since bools are a subtype of Integrals, which are a subtype of Reals,
# we have to check them in that order.
if isinstance(obj, bool):
return obj
if isinstance(obj, numbers.Integral):
# cast int to int, in case subclasses override __str__ (e.g. boost enum, #4598)
return int(obj)
if isinstance(obj, numbers.Real):
# cast out-of-range floats to their reprs
if math.isnan(obj) or math.isinf(obj):
return repr(obj)
return float(obj)
if isinstance(obj, atomic_ok):
return obj
if isinstance(obj, bytes):
# unanmbiguous binary data is base64-encoded
# (this probably should have happened upstream)
return b2a_base64(obj).decode('ascii')
if isinstance(obj, container_to_list) or (
hasattr(obj, '__iter__') and hasattr(obj, next_attr_name)
):
obj = list(obj)
if isinstance(obj, list):
return [json_clean(x) for x in obj]
if isinstance(obj, dict):
# First, validate that the dict won't lose data in conversion due to
# key collisions after stringification. This can happen with keys like
# True and 'true' or 1 and '1', which collide in JSON.
nkeys = len(obj)
nkeys_collapsed = len(set(map(str, obj)))
if nkeys != nkeys_collapsed:
raise ValueError(
'dict cannot be safely converted to JSON: '
'key collision would lead to dropped values'
)
# If all OK, proceed by making the new dict that will be json-safe
out = {}
for k, v in obj.items():
out[str(k)] = json_clean(v)
return out
if isinstance(obj, datetime):
return obj.strftime(ISO8601)
# we don't understand it, it's probably an unserializable object
raise ValueError("Can't clean for JSON: %r" % obj)
| [
"[email protected]"
] | |
93248952101638dd63e2d980f3ce2641a5a9dad7 | ebb1e564c8a11e5af453f3749dcba1b66e2f3931 | /test/quantization/fx/test_model_report_fx.py | d123a8752ca72a05c6e3064859c2dee6efe72fd5 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | jjsjann123/pytorch | 06c3aee8dd3565664ac2e2fda0306432cf62dd7c | de9b3fb3e5eb54660190cbd20b6592fc5cbda547 | refs/heads/master | 2023-08-25T00:22:02.568347 | 2022-07-27T18:50:20 | 2022-07-27T22:38:28 | 152,169,545 | 0 | 1 | NOASSERTION | 2022-08-11T07:55:44 | 2018-10-09T01:33:17 | C++ | UTF-8 | Python | false | false | 79,542 | py | # -*- coding: utf-8 -*-
# Owner(s): ["oncall: quantization"]
import torch
import torch.nn as nn
import torch.ao.quantization.quantize_fx as quantize_fx
import torch.nn.functional as F
from torch.ao.quantization import QConfig, QConfigMapping
from torch.ao.quantization.fx._model_report.detector import (
DynamicStaticDetector,
InputWeightEqualizationDetector,
PerChannelDetector,
OutlierDetector,
)
from torch.ao.quantization.fx._model_report.model_report_observer import ModelReportObserver
from torch.ao.quantization.fx._model_report.model_report_visualizer import ModelReportVisualizer
from torch.ao.quantization.fx._model_report.model_report import ModelReport
from torch.ao.quantization.observer import HistogramObserver, default_per_channel_weight_observer
from torch.nn.intrinsic.modules.fused import ConvReLU2d, LinearReLU
from torch.testing._internal.common_quantization import (
ConvModel,
QuantizationTestCase,
SingleLayerLinearModel,
TwoLayerLinearModel,
skipIfNoFBGEMM,
skipIfNoQNNPACK,
override_quantized_engine,
)
"""
Partition of input domain:
Model contains: conv or linear, both conv and linear
Model contains: ConvTransposeNd (not supported for per_channel)
Model is: post training quantization model, quantization aware training model
Model is: composed with nn.Sequential, composed in class structure
QConfig utilizes per_channel weight observer, backend uses non per_channel weight observer
QConfig_dict uses only one default qconfig, Qconfig dict uses > 1 unique qconfigs
Partition on output domain:
There are possible changes / suggestions, there are no changes / suggestions
"""
# Default output for string if no optimizations are possible
DEFAULT_NO_OPTIMS_ANSWER_STRING = (
"Further Optimizations for backend {}: \nNo further per_channel optimizations possible."
)
# Example Sequential Model with multiple Conv and Linear with nesting involved
NESTED_CONV_LINEAR_EXAMPLE = torch.nn.Sequential(
torch.nn.Conv2d(3, 3, 2, 1),
torch.nn.Sequential(torch.nn.Linear(9, 27), torch.nn.ReLU()),
torch.nn.Linear(27, 27),
torch.nn.ReLU(),
torch.nn.Conv2d(3, 3, 2, 1),
)
# Example Sequential Model with Conv sub-class example
LAZY_CONV_LINEAR_EXAMPLE = torch.nn.Sequential(
torch.nn.LazyConv2d(3, 3, 2, 1),
torch.nn.Sequential(torch.nn.Linear(5, 27), torch.nn.ReLU()),
torch.nn.ReLU(),
torch.nn.Linear(27, 27),
torch.nn.ReLU(),
torch.nn.LazyConv2d(3, 3, 2, 1),
)
# Example Sequential Model with Fusion directly built into model
FUSION_CONV_LINEAR_EXAMPLE = torch.nn.Sequential(
ConvReLU2d(torch.nn.Conv2d(3, 3, 2, 1), torch.nn.ReLU()),
torch.nn.Sequential(LinearReLU(torch.nn.Linear(9, 27), torch.nn.ReLU())),
LinearReLU(torch.nn.Linear(27, 27), torch.nn.ReLU()),
torch.nn.Conv2d(3, 3, 2, 1),
)
# Test class
# example model to use for tests
class ThreeOps(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 3)
self.bn = nn.BatchNorm2d(3)
self.relu = nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.bn(x)
x = self.relu(x)
return x
def get_example_inputs(self):
return (torch.randn(1, 3, 3, 3),)
class TwoThreeOps(nn.Module):
def __init__(self):
super().__init__()
self.block1 = ThreeOps()
self.block2 = ThreeOps()
def forward(self, x):
x = self.block1(x)
y = self.block2(x)
z = x + y
z = F.relu(z)
return z
def get_example_inputs(self):
return (torch.randn(1, 3, 3, 3),)
class TestFxModelReportDetector(QuantizationTestCase):
"""Prepares and callibrate the model"""
def _prepare_model_and_run_input(self, model, q_config_mapping, input):
model_prep = torch.ao.quantization.quantize_fx.prepare_fx(model, q_config_mapping, input) # prep model
model_prep(input).sum() # callibrate the model
return model_prep
"""Case includes:
one conv or linear
post training quantiztion
composed as module
qconfig uses per_channel weight observer
Only 1 qconfig in qconfig dict
Output has no changes / suggestions
"""
@skipIfNoFBGEMM
def test_simple_conv(self):
with override_quantized_engine('fbgemm'):
torch.backends.quantized.engine = "fbgemm"
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
input = torch.randn(1, 3, 10, 10)
prepared_model = self._prepare_model_and_run_input(ConvModel(), q_config_mapping, input)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# no optims possible and there should be nothing in per_channel_status
self.assertEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# there shoud only be one conv there in this model
self.assertEqual(per_channel_info["conv"]["backend"], torch.backends.quantized.engine)
self.assertEqual(len(per_channel_info), 1)
self.assertEqual(list(per_channel_info)[0], "conv")
self.assertEqual(
per_channel_info["conv"]["per_channel_quantization_supported"],
True,
)
self.assertEqual(per_channel_info["conv"]["per_channel_quantization_used"], True)
"""Case includes:
Multiple conv or linear
post training quantization
composed as module
qconfig doesn't use per_channel weight observer
Only 1 qconfig in qconfig dict
Output has possible changes / suggestions
"""
@skipIfNoQNNPACK
def test_multi_linear_model_without_per_channel(self):
with override_quantized_engine('qnnpack'):
torch.backends.quantized.engine = "qnnpack"
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
prepared_model = self._prepare_model_and_run_input(
TwoLayerLinearModel(),
q_config_mapping,
TwoLayerLinearModel().get_example_inputs()[0],
)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# there should be optims possible
self.assertNotEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# pick a random key to look at
rand_key: str = list(per_channel_info.keys())[0]
self.assertEqual(per_channel_info[rand_key]["backend"], torch.backends.quantized.engine)
self.assertEqual(len(per_channel_info), 2)
# for each linear layer, should be supported but not used
for linear_key in per_channel_info.keys():
module_entry = per_channel_info[linear_key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
self.assertEqual(module_entry["per_channel_quantization_used"], False)
"""Case includes:
Multiple conv or linear
post training quantization
composed as Module
qconfig doesn't use per_channel weight observer
More than 1 qconfig in qconfig dict
Output has possible changes / suggestions
"""
@skipIfNoQNNPACK
def test_multiple_q_config_options(self):
with override_quantized_engine('qnnpack'):
torch.backends.quantized.engine = "qnnpack"
# qconfig with support for per_channel quantization
per_channel_qconfig = QConfig(
activation=HistogramObserver.with_args(reduce_range=True),
weight=default_per_channel_weight_observer,
)
# we need to design the model
class ConvLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 2, 1)
self.fc1 = torch.nn.Linear(9, 27)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(27, 27)
self.conv2 = torch.nn.Conv2d(3, 3, 2, 1)
def forward(self, x):
x = self.conv1(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.conv2(x)
return x
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(
torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine)
).set_object_type(torch.nn.Conv2d, per_channel_qconfig)
prepared_model = self._prepare_model_and_run_input(
ConvLinearModel(),
q_config_mapping,
torch.randn(1, 3, 10, 10),
)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# the only suggestions should be to linear layers
# there should be optims possible
self.assertNotEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# to ensure it got into the nested layer
self.assertEqual(len(per_channel_info), 4)
# for each layer, should be supported but not used
for key in per_channel_info.keys():
module_entry = per_channel_info[key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
# if linear False, if conv2d true cuz it uses different config
if "fc" in key:
self.assertEqual(module_entry["per_channel_quantization_used"], False)
elif "conv" in key:
self.assertEqual(module_entry["per_channel_quantization_used"], True)
else:
raise ValueError("Should only contain conv and linear layers as key values")
"""Case includes:
Multiple conv or linear
post training quantization
composed as sequential
qconfig doesn't use per_channel weight observer
Only 1 qconfig in qconfig dict
Output has possible changes / suggestions
"""
@skipIfNoQNNPACK
def test_sequential_model_format(self):
with override_quantized_engine('qnnpack'):
torch.backends.quantized.engine = "qnnpack"
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
prepared_model = self._prepare_model_and_run_input(
NESTED_CONV_LINEAR_EXAMPLE,
q_config_mapping,
torch.randn(1, 3, 10, 10),
)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# there should be optims possible
self.assertNotEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# to ensure it got into the nested layer
self.assertEqual(len(per_channel_info), 4)
# for each layer, should be supported but not used
for key in per_channel_info.keys():
module_entry = per_channel_info[key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
self.assertEqual(module_entry["per_channel_quantization_used"], False)
"""Case includes:
Multiple conv or linear
post training quantization
composed as sequential
qconfig doesn't use per_channel weight observer
Only 1 qconfig in qconfig dict
Output has possible changes / suggestions
"""
@skipIfNoQNNPACK
def test_conv_sub_class_considered(self):
with override_quantized_engine('qnnpack'):
torch.backends.quantized.engine = "qnnpack"
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
prepared_model = self._prepare_model_and_run_input(
LAZY_CONV_LINEAR_EXAMPLE,
q_config_mapping,
torch.randn(1, 3, 10, 10),
)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# there should be optims possible
self.assertNotEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# to ensure it got into the nested layer and it considered the lazyConv2d
self.assertEqual(len(per_channel_info), 4)
# for each layer, should be supported but not used
for key in per_channel_info.keys():
module_entry = per_channel_info[key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
self.assertEqual(module_entry["per_channel_quantization_used"], False)
"""Case includes:
Multiple conv or linear
post training quantization
composed as sequential
qconfig uses per_channel weight observer
Only 1 qconfig in qconfig dict
Output has no possible changes / suggestions
"""
@skipIfNoFBGEMM
def test_fusion_layer_in_sequential(self):
with override_quantized_engine('fbgemm'):
torch.backends.quantized.engine = "fbgemm"
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
prepared_model = self._prepare_model_and_run_input(
FUSION_CONV_LINEAR_EXAMPLE,
q_config_mapping,
torch.randn(1, 3, 10, 10),
)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# no optims possible and there should be nothing in per_channel_status
self.assertEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# to ensure it got into the nested layer and it considered all the nested fusion components
self.assertEqual(len(per_channel_info), 4)
# for each layer, should be supported but not used
for key in per_channel_info.keys():
module_entry = per_channel_info[key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
self.assertEqual(module_entry["per_channel_quantization_used"], True)
"""Case includes:
Multiple conv or linear
quantitative aware training
composed as model
qconfig does not use per_channel weight observer
Only 1 qconfig in qconfig dict
Output has possible changes / suggestions
"""
@skipIfNoQNNPACK
def test_qat_aware_model_example(self):
# first we want a QAT model
class QATConvLinearReluModel(torch.nn.Module):
def __init__(self):
super(QATConvLinearReluModel, self).__init__()
# QuantStub converts tensors from floating point to quantized
self.quant = torch.quantization.QuantStub()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.bn = torch.nn.BatchNorm2d(1)
self.relu = torch.nn.ReLU()
# DeQuantStub converts tensors from quantized to floating point
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.dequant(x)
return x
with override_quantized_engine('qnnpack'):
# create a model instance
model_fp32 = QATConvLinearReluModel()
model_fp32.qconfig = torch.quantization.get_default_qat_qconfig("qnnpack")
# model must be in eval mode for fusion
model_fp32.eval()
model_fp32_fused = torch.quantization.fuse_modules(model_fp32, [["conv", "bn", "relu"]])
# model must be set to train mode for QAT logic to work
model_fp32_fused.train()
# prepare the model for QAT, different than for post training quantization
model_fp32_prepared = torch.quantization.prepare_qat(model_fp32_fused)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(model_fp32_prepared)
# there should be optims possible
self.assertNotEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# make sure it was able to find the single conv in the fused model
self.assertEqual(len(per_channel_info), 1)
# for the one conv, it should still give advice to use different qconfig
for key in per_channel_info.keys():
module_entry = per_channel_info[key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
self.assertEqual(module_entry["per_channel_quantization_used"], False)
"""
Partition on Domain / Things to Test
- All zero tensor
- Multiple tensor dimensions
- All of the outward facing functions
- Epoch min max are correctly updating
- Batch range is correctly averaging as expected
- Reset for each epoch is correctly resetting the values
Partition on Output
- the calcuation of the ratio is occurring correctly
"""
class TestFxModelReportObserver(QuantizationTestCase):
class NestedModifiedSingleLayerLinear(torch.nn.Module):
def __init__(self):
super().__init__()
self.obs1 = ModelReportObserver()
self.mod1 = SingleLayerLinearModel()
self.obs2 = ModelReportObserver()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.obs1(x)
x = self.mod1(x)
x = self.obs2(x)
x = self.fc1(x)
x = self.relu(x)
return x
def run_model_and_common_checks(self, model, ex_input, num_epochs, batch_size):
# split up data into batches
split_up_data = torch.split(ex_input, batch_size)
for epoch in range(num_epochs):
# reset all model report obs
model.apply(
lambda module: module.reset_batch_and_epoch_values()
if isinstance(module, ModelReportObserver)
else None
)
# quick check that a reset occurred
self.assertEqual(
getattr(model, "obs1").average_batch_activation_range,
torch.tensor(float(0)),
)
self.assertEqual(getattr(model, "obs1").epoch_activation_min, torch.tensor(float("inf")))
self.assertEqual(getattr(model, "obs1").epoch_activation_max, torch.tensor(float("-inf")))
# loop through the batches and run through
for index, batch in enumerate(split_up_data):
num_tracked_so_far = getattr(model, "obs1").num_batches_tracked
self.assertEqual(num_tracked_so_far, index)
# get general info about the batch and the model to use later
batch_min, batch_max = torch.aminmax(batch)
current_average_range = getattr(model, "obs1").average_batch_activation_range
current_epoch_min = getattr(model, "obs1").epoch_activation_min
current_epoch_max = getattr(model, "obs1").epoch_activation_max
# run input through
model(ex_input)
# check that average batch activation range updated correctly
correct_updated_value = (current_average_range * num_tracked_so_far + (batch_max - batch_min)) / (
num_tracked_so_far + 1
)
self.assertEqual(
getattr(model, "obs1").average_batch_activation_range,
correct_updated_value,
)
if current_epoch_max - current_epoch_min > 0:
self.assertEqual(
getattr(model, "obs1").get_batch_to_epoch_ratio(),
correct_updated_value / (current_epoch_max - current_epoch_min),
)
"""Case includes:
all zero tensor
dim size = 2
run for 1 epoch
run for 10 batch
tests input data observer
"""
def test_zero_tensor_errors(self):
# initialize the model
model = self.NestedModifiedSingleLayerLinear()
# generate the desired input
ex_input = torch.zeros((10, 1, 5))
# run it through the model and do general tests
self.run_model_and_common_checks(model, ex_input, 1, 1)
# make sure final values are all 0
self.assertEqual(getattr(model, "obs1").epoch_activation_min, 0)
self.assertEqual(getattr(model, "obs1").epoch_activation_max, 0)
self.assertEqual(getattr(model, "obs1").average_batch_activation_range, 0)
# we should get an error if we try to calculate the ratio
with self.assertRaises(ValueError):
ratio_val = getattr(model, "obs1").get_batch_to_epoch_ratio()
"""Case includes:
non-zero tensor
dim size = 2
run for 1 epoch
run for 1 batch
tests input data observer
"""
def test_single_batch_of_ones(self):
# initialize the model
model = self.NestedModifiedSingleLayerLinear()
# generate the desired input
ex_input = torch.ones((1, 1, 5))
# run it through the model and do general tests
self.run_model_and_common_checks(model, ex_input, 1, 1)
# make sure final values are all 0 except for range
self.assertEqual(getattr(model, "obs1").epoch_activation_min, 1)
self.assertEqual(getattr(model, "obs1").epoch_activation_max, 1)
self.assertEqual(getattr(model, "obs1").average_batch_activation_range, 0)
# we should get an error if we try to calculate the ratio
with self.assertRaises(ValueError):
ratio_val = getattr(model, "obs1").get_batch_to_epoch_ratio()
"""Case includes:
non-zero tensor
dim size = 2
run for 10 epoch
run for 15 batch
tests non input data observer
"""
def test_observer_after_relu(self):
# model specific to this test
class NestedModifiedObserverAfterRelu(torch.nn.Module):
def __init__(self):
super().__init__()
self.obs1 = ModelReportObserver()
self.mod1 = SingleLayerLinearModel()
self.obs2 = ModelReportObserver()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.obs1(x)
x = self.mod1(x)
x = self.fc1(x)
x = self.relu(x)
x = self.obs2(x)
return x
# initialize the model
model = NestedModifiedObserverAfterRelu()
# generate the desired input
ex_input = torch.randn((15, 1, 5))
# run it through the model and do general tests
self.run_model_and_common_checks(model, ex_input, 10, 15)
"""Case includes:
non-zero tensor
dim size = 2
run for multiple epoch
run for multiple batch
tests input data observer
"""
def test_random_epochs_and_batches(self):
# set up a basic model
class TinyNestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.obs1 = ModelReportObserver()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
self.obs2 = ModelReportObserver()
def forward(self, x):
x = self.obs1(x)
x = self.fc1(x)
x = self.relu(x)
x = self.obs2(x)
return x
class LargerIncludeNestModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.obs1 = ModelReportObserver()
self.nested = TinyNestModule()
self.fc1 = SingleLayerLinearModel()
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.obs1(x)
x = self.nested(x)
x = self.fc1(x)
x = self.relu(x)
return x
class ModifiedThreeOps(torch.nn.Module):
def __init__(self, batch_norm_dim):
super(ModifiedThreeOps, self).__init__()
self.obs1 = ModelReportObserver()
self.linear = torch.nn.Linear(7, 3, 2)
self.obs2 = ModelReportObserver()
if batch_norm_dim == 2:
self.bn = torch.nn.BatchNorm2d(2)
elif batch_norm_dim == 3:
self.bn = torch.nn.BatchNorm3d(4)
else:
raise ValueError("Dim should only be 2 or 3")
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.obs1(x)
x = self.linear(x)
x = self.obs2(x)
x = self.bn(x)
x = self.relu(x)
return x
class HighDimensionNet(torch.nn.Module):
def __init__(self):
super(HighDimensionNet, self).__init__()
self.obs1 = ModelReportObserver()
self.fc1 = torch.nn.Linear(3, 7)
self.block1 = ModifiedThreeOps(3)
self.fc2 = torch.nn.Linear(3, 7)
self.block2 = ModifiedThreeOps(3)
self.fc3 = torch.nn.Linear(3, 7)
def forward(self, x):
x = self.obs1(x)
x = self.fc1(x)
x = self.block1(x)
x = self.fc2(x)
y = self.block2(x)
y = self.fc3(y)
z = x + y
z = F.relu(z)
return z
# the purpose of this test is to give the observers a variety of data examples
# initialize the model
models = [
self.NestedModifiedSingleLayerLinear(),
LargerIncludeNestModel(),
ModifiedThreeOps(2),
HighDimensionNet(),
]
# get some number of epochs and batches
num_epochs = 10
num_batches = 15
input_shapes = [(1, 5), (1, 5), (2, 3, 7), (4, 1, 8, 3)]
# generate the desired inputs
inputs = []
for shape in input_shapes:
ex_input = torch.randn((num_batches, *shape))
inputs.append(ex_input)
# run it through the model and do general tests
for index, model in enumerate(models):
self.run_model_and_common_checks(model, inputs[index], num_epochs, num_batches)
"""
Partition on domain / things to test
There is only a single test case for now.
This will be more thoroughly tested with the implementation of the full end to end tool coming soon.
"""
class TestFxModelReportDetectDynamicStatic(QuantizationTestCase):
@skipIfNoFBGEMM
def test_nested_detection_case(self):
class SingleLinear(torch.nn.Module):
def __init__(self):
super(SingleLinear, self).__init__()
self.linear = torch.nn.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
class TwoBlockNet(torch.nn.Module):
def __init__(self):
super(TwoBlockNet, self).__init__()
self.block1 = SingleLinear()
self.block2 = SingleLinear()
def forward(self, x):
x = self.block1(x)
y = self.block2(x)
z = x + y
z = F.relu(z)
return z
with override_quantized_engine('fbgemm'):
# create model, example input, and qconfig mapping
torch.backends.quantized.engine = "fbgemm"
model = TwoBlockNet()
example_input = torch.randint(-10, 0, (1, 3, 3, 3))
example_input = example_input.to(torch.float)
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig("fbgemm"))
# prep model and select observer
model_prep = quantize_fx.prepare_fx(model, q_config_mapping, example_input)
obs_ctr = ModelReportObserver
# find layer to attach to and store
linear_fqn = "block2.linear" # fqn of target linear
target_linear = None
for node in model_prep.graph.nodes:
if node.target == linear_fqn:
target_linear = node
break
# insert into both module and graph pre and post
# set up to insert before target_linear (pre_observer)
with model_prep.graph.inserting_before(target_linear):
obs_to_insert = obs_ctr()
pre_obs_fqn = linear_fqn + ".model_report_pre_observer"
model_prep.add_submodule(pre_obs_fqn, obs_to_insert)
model_prep.graph.create_node(op="call_module", target=pre_obs_fqn, args=target_linear.args)
# set up and insert after the target_linear (post_observer)
with model_prep.graph.inserting_after(target_linear):
obs_to_insert = obs_ctr()
post_obs_fqn = linear_fqn + ".model_report_post_observer"
model_prep.add_submodule(post_obs_fqn, obs_to_insert)
model_prep.graph.create_node(op="call_module", target=post_obs_fqn, args=(target_linear,))
# need to recompile module after submodule added and pass input through
model_prep.recompile()
num_iterations = 10
for i in range(num_iterations):
if i % 2 == 0:
example_input = torch.randint(-10, 0, (1, 3, 3, 3)).to(torch.float)
else:
example_input = torch.randint(0, 10, (1, 3, 3, 3)).to(torch.float)
model_prep(example_input)
# run it through the dynamic vs static detector
dynamic_vs_static_detector = DynamicStaticDetector()
dynam_vs_stat_str, dynam_vs_stat_dict = dynamic_vs_static_detector.generate_detector_report(model_prep)
# one of the stats should be stationary, and the other non-stationary
# as a result, dynamic should be recommended
data_dist_info = [
dynam_vs_stat_dict[linear_fqn][DynamicStaticDetector.PRE_OBS_DATA_DIST_KEY],
dynam_vs_stat_dict[linear_fqn][DynamicStaticDetector.POST_OBS_DATA_DIST_KEY],
]
self.assertTrue("stationary" in data_dist_info)
self.assertTrue("non-stationary" in data_dist_info)
self.assertTrue(dynam_vs_stat_dict[linear_fqn]["dynamic_recommended"])
class TestFxModelReportClass(QuantizationTestCase):
@skipIfNoFBGEMM
def test_constructor(self):
"""
Tests the constructor of the ModelReport class.
Specifically looks at:
- The desired reports
- Ensures that the observers of interest are properly initialized
"""
with override_quantized_engine('fbgemm'):
# set the backend for this test
torch.backends.quantized.engine = "fbgemm"
backend = torch.backends.quantized.engine
# create a model
model = ThreeOps()
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
model_prep = quantize_fx.prepare_fx(model, q_config_mapping, model.get_example_inputs()[0])
# make an example set of detectors
test_detector_set = set([DynamicStaticDetector(), PerChannelDetector(backend)])
# initialize with an empty detector
model_report = ModelReport(model_prep, test_detector_set)
# make sure internal valid reports matches
detector_name_set = set([detector.get_detector_name() for detector in test_detector_set])
self.assertEqual(model_report.get_desired_reports_names(), detector_name_set)
# now attempt with no valid reports, should raise error
with self.assertRaises(ValueError):
model_report = ModelReport(model, set([]))
# number of expected obs of interest entries
num_expected_entries = len(test_detector_set)
self.assertEqual(len(model_report.get_observers_of_interest()), num_expected_entries)
for value in model_report.get_observers_of_interest().values():
self.assertEqual(len(value), 0)
@skipIfNoFBGEMM
def test_prepare_model_callibration(self):
"""
Tests model_report.prepare_detailed_calibration that prepares the model for callibration
Specifically looks at:
- Whether observers are properly inserted into regular nn.Module
- Whether the target and the arguments of the observers are proper
- Whether the internal representation of observers of interest is updated
"""
with override_quantized_engine('fbgemm'):
# create model report object
# create model
model = TwoThreeOps()
# make an example set of detectors
torch.backends.quantized.engine = "fbgemm"
backend = torch.backends.quantized.engine
test_detector_set = set([DynamicStaticDetector(), PerChannelDetector(backend)])
# initialize with an empty detector
# prepare the model
example_input = model.get_example_inputs()[0]
current_backend = torch.backends.quantized.engine
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
model_prep = quantize_fx.prepare_fx(model, q_config_mapping, example_input)
model_report = ModelReport(model_prep, test_detector_set)
# prepare the model for callibration
prepared_for_callibrate_model = model_report.prepare_detailed_calibration()
# see whether observers properly in regular nn.Module
# there should be 4 observers present in this case
modules_observer_cnt = 0
for fqn, module in prepared_for_callibrate_model.named_modules():
if isinstance(module, ModelReportObserver):
modules_observer_cnt += 1
self.assertEqual(modules_observer_cnt, 4)
model_report_str_check = "model_report"
# also make sure arguments for observers in the graph are proper
for node in prepared_for_callibrate_model.graph.nodes:
# not all node targets are strings, so check
if isinstance(node.target, str) and model_report_str_check in node.target:
# if pre-observer has same args as the linear (next node)
if "pre_observer" in node.target:
self.assertEqual(node.args, node.next.args)
# if post-observer, args are the target linear (previous node)
if "post_observer" in node.target:
self.assertEqual(node.args, (node.prev,))
# ensure model_report observers of interest updated
# there should be two entries
self.assertEqual(len(model_report.get_observers_of_interest()), 2)
for detector in test_detector_set:
self.assertTrue(detector.get_detector_name() in model_report.get_observers_of_interest().keys())
# get number of entries for this detector
detector_obs_of_interest_fqns = model_report.get_observers_of_interest()[detector.get_detector_name()]
# assert that the per channel detector has 0 and the dynamic static has 4
if isinstance(detector, PerChannelDetector):
self.assertEqual(len(detector_obs_of_interest_fqns), 0)
elif isinstance(detector, DynamicStaticDetector):
self.assertEqual(len(detector_obs_of_interest_fqns), 4)
# ensure that we can prepare for callibration only once
with self.assertRaises(ValueError):
prepared_for_callibrate_model = model_report.prepare_detailed_calibration()
def get_module_and_graph_cnts(self, callibrated_fx_module):
r"""
Calculates number of ModelReportObserver modules in the model as well as the graph structure.
Returns a tuple of two elements:
int: The number of ModelReportObservers found in the model
int: The number of model_report nodes found in the graph
"""
# get the number of observers stored as modules
modules_observer_cnt = 0
for fqn, module in callibrated_fx_module.named_modules():
if isinstance(module, ModelReportObserver):
modules_observer_cnt += 1
# get number of observers in the graph
model_report_str_check = "model_report"
graph_observer_cnt = 0
# also make sure arguments for observers in the graph are proper
for node in callibrated_fx_module.graph.nodes:
# not all node targets are strings, so check
if isinstance(node.target, str) and model_report_str_check in node.target:
# increment if we found a graph observer
graph_observer_cnt += 1
return (modules_observer_cnt, graph_observer_cnt)
@skipIfNoFBGEMM
def test_generate_report(self):
"""
Tests model_report.generate_model_report to ensure report generation
Specifically looks at:
- Whether correct number of reports are being generated
- Whether observers are being properly removed if specified
- Whether correct blocking from generating report twice if obs removed
"""
with override_quantized_engine('fbgemm'):
# set the backend for this test
torch.backends.quantized.engine = "fbgemm"
# check whether the correct number of reports are being generated
filled_detector_set = set([DynamicStaticDetector(), PerChannelDetector(torch.backends.quantized.engine)])
single_detector_set = set([DynamicStaticDetector()])
# create our models
model_full = TwoThreeOps()
model_single = TwoThreeOps()
# prepare and callibrate two different instances of same model
# prepare the model
example_input = model_full.get_example_inputs()[0]
current_backend = torch.backends.quantized.engine
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
model_prep_full = quantize_fx.prepare_fx(model_full, q_config_mapping, example_input)
model_prep_single = quantize_fx.prepare_fx(model_single, q_config_mapping, example_input)
# initialize one with filled detector
model_report_full = ModelReport(model_prep_full, filled_detector_set)
# initialize another with a single detector set
model_report_single = ModelReport(model_prep_single, single_detector_set)
# prepare the models for callibration
prepared_for_callibrate_model_full = model_report_full.prepare_detailed_calibration()
prepared_for_callibrate_model_single = model_report_single.prepare_detailed_calibration()
# now callibrate the two models
num_iterations = 10
for i in range(num_iterations):
example_input = torch.tensor(torch.randint(100, (1, 3, 3, 3)), dtype=torch.float)
prepared_for_callibrate_model_full(example_input)
prepared_for_callibrate_model_single(example_input)
# now generate the reports
model_full_report = model_report_full.generate_model_report(True)
model_single_report = model_report_single.generate_model_report(False)
# check that sizes are appropriate
self.assertEqual(len(model_full_report), len(filled_detector_set))
self.assertEqual(len(model_single_report), len(single_detector_set))
# make sure observers are being properly removed for full report since we put flag in
modules_observer_cnt, graph_observer_cnt = self.get_module_and_graph_cnts(prepared_for_callibrate_model_full)
self.assertEqual(modules_observer_cnt, 0) # assert no more observer modules
self.assertEqual(graph_observer_cnt, 0) # assert no more observer nodes in graph
# make sure observers aren't being removed for single report since not specified
modules_observer_cnt, graph_observer_cnt = self.get_module_and_graph_cnts(prepared_for_callibrate_model_single)
self.assertNotEqual(modules_observer_cnt, 0)
self.assertNotEqual(graph_observer_cnt, 0)
# make sure error when try to rerun report generation for full report but not single report
with self.assertRaises(Exception):
model_full_report = model_report_full.generate_model_report(
prepared_for_callibrate_model_full, False
)
# make sure we don't run into error for single report
model_single_report = model_report_single.generate_model_report(False)
@skipIfNoFBGEMM
def test_generate_visualizer(self):
"""
Tests that the ModelReport class can properly create the ModelReportVisualizer instance
Checks that:
- Correct number of modules are represented
- Modules are sorted
- Correct number of features for each module
"""
with override_quantized_engine('fbgemm'):
# set the backend for this test
torch.backends.quantized.engine = "fbgemm"
# test with multiple detectors
detector_set = set()
detector_set.add(OutlierDetector(reference_percentile=0.95))
detector_set.add(InputWeightEqualizationDetector(0.5))
model = TwoThreeOps()
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = _get_prepped_for_calibration_model_helper(
model, detector_set, model.get_example_inputs()[0]
)
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# try to visualize without generating report, should throw error
with self.assertRaises(Exception):
mod_rep_visualizaiton = mod_report.generate_visualizer()
# now get the report by running it through ModelReport instance
generated_report = mod_report.generate_model_report(remove_inserted_observers=False)
# now we get the visualizer should not error
mod_rep_visualizer: ModelReportVisualizer = mod_report.generate_visualizer()
# since we tested with outlier detector, which looks at every base level module
# should be six entries in the ordered dict
mod_fqns_to_features = mod_rep_visualizer.generated_reports
self.assertEqual(len(mod_fqns_to_features), 6)
# outlier detector has 9 feature per module
# input-weight has 12 features per module
# there are 1 common data point, so should be 12 + 9 - 1 = 20 unique features per common modules
# all linears will be common
for module_fqn in mod_fqns_to_features:
if ".linear" in module_fqn:
linear_info = mod_fqns_to_features[module_fqn]
self.assertEqual(len(linear_info), 20)
class TestFxDetectInputWeightEqualization(QuantizationTestCase):
class SimpleConv(torch.nn.Module):
def __init__(self, con_dims):
super().__init__()
self.relu = torch.nn.ReLU()
self.conv = torch.nn.Conv2d(con_dims[0], con_dims[1], kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x
class TwoBlockComplexNet(torch.nn.Module):
def __init__(self):
super().__init__()
self.block1 = TestFxDetectInputWeightEqualization.SimpleConv((3, 32))
self.block2 = TestFxDetectInputWeightEqualization.SimpleConv((3, 3))
self.conv = torch.nn.Conv2d(32, 3, kernel_size=(1, 1), stride=(1, 1), padding=(1, 1), bias=False)
self.linear = torch.nn.Linear(768, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.block1(x)
x = self.conv(x)
y = self.block2(x)
y = y.repeat(1, 1, 2, 2)
z = x + y
z = z.flatten(start_dim=1)
z = self.linear(z)
z = self.relu(z)
return z
def get_fusion_modules(self):
return [['conv', 'relu']]
def get_example_inputs(self):
return (torch.randn((1, 3, 28, 28)),)
class ReluOnly(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.relu(x)
return x
def get_example_inputs(self):
return (torch.arange(27).reshape((1, 3, 3, 3)),)
def _get_prepped_for_calibration_model(self, model, detector_set, fused=False):
r"""Returns a model that has been prepared for callibration and corresponding model_report"""
# pass in necessary inputs to helper
example_input = model.get_example_inputs()[0]
return _get_prepped_for_calibration_model_helper(model, detector_set, example_input, fused)
@skipIfNoFBGEMM
def test_input_weight_equalization_determine_points(self):
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
detector_set = set([InputWeightEqualizationDetector(0.5)])
# get tst model and callibrate
non_fused = self._get_prepped_for_calibration_model(self.TwoBlockComplexNet(), detector_set)
fused = self._get_prepped_for_calibration_model(self.TwoBlockComplexNet(), detector_set, fused=True)
# reporter should still give same counts even for fused model
for prepared_for_callibrate_model, mod_report in [non_fused, fused]:
# supported modules to check
mods_to_check = set([nn.Linear, nn.Conv2d])
# get the set of all nodes in the graph their fqns
node_fqns = set([node.target for node in prepared_for_callibrate_model.graph.nodes])
# there should be 4 node fqns that have the observer inserted
correct_number_of_obs_inserted = 4
number_of_obs_found = 0
obs_name_to_find = InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME
for node in prepared_for_callibrate_model.graph.nodes:
# if the obs name is inside the target, we found an observer
if obs_name_to_find in str(node.target):
number_of_obs_found += 1
self.assertEqual(number_of_obs_found, correct_number_of_obs_inserted)
# assert that each of the desired modules have the observers inserted
for fqn, module in prepared_for_callibrate_model.named_modules():
# check if module is a supported module
is_in_include_list = sum(list(map(lambda x: isinstance(module, x), mods_to_check))) > 0
if is_in_include_list:
# make sure it has the observer attribute
self.assertTrue(hasattr(module, InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME))
else:
# if it's not a supported type, it shouldn't have observer attached
self.assertTrue(not hasattr(module, InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME))
@skipIfNoFBGEMM
def test_input_weight_equalization_report_gen(self):
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
test_input_weight_detector = InputWeightEqualizationDetector(0.4)
detector_set = set([test_input_weight_detector])
model = self.TwoBlockComplexNet()
# prepare the model for callibration
prepared_for_callibrate_model, model_report = self._get_prepped_for_calibration_model(
model, detector_set
)
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = model_report.generate_model_report(True)
# check that sizes are appropriate only 1 detector
self.assertEqual(len(generated_report), 1)
# get the specific report for input weight equalization
input_weight_str, input_weight_dict = generated_report[test_input_weight_detector.get_detector_name()]
# we should have 5 layers looked at since 4 conv / linear layers
self.assertEqual(len(input_weight_dict), 4)
# we can validate that the max and min values of the detector were recorded properly for the first one
# this is because no data has been processed yet, so it should be values from original input
example_input = example_input.reshape((3, 28, 28)) # reshape input
for module_fqn in input_weight_dict:
# look for the first linear
if "block1.linear" in module_fqn:
block_1_lin_recs = input_weight_dict[module_fqn]
# get input range info and the channel axis
ch_axis = block_1_lin_recs[InputWeightEqualizationDetector.CHANNEL_KEY]
# ensure that the min and max values extracted match properly
example_min, example_max = torch.aminmax(example_input, dim=ch_axis)
dimension_min = torch.amin(example_min, dim=ch_axis)
dimension_max = torch.amax(example_max, dim=ch_axis)
# make sure per channel min and max are as expected
min_per_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
min_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MIN_KEY
max_per_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
max_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MAX_KEY
per_channel_min = block_1_lin_recs[min_per_key]
per_channel_max = block_1_lin_recs[max_per_key]
self.assertEqual(per_channel_min, dimension_min)
self.assertEqual(per_channel_max, dimension_max)
# make sure per channel min and max are as expected
min_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
min_key += InputWeightEqualizationDetector.GLOBAL_MIN_KEY
max_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
max_key += InputWeightEqualizationDetector.GLOBAL_MAX_KEY
# make sure the global min and max were correctly recorded and presented
global_min = block_1_lin_recs[min_key]
global_max = block_1_lin_recs[max_key]
self.assertEqual(global_min, min(dimension_min))
self.assertEqual(global_max, max(dimension_max))
input_ratio = torch.sqrt((per_channel_max - per_channel_min) / (global_max - global_min))
# ensure comparision stat passed back is sqrt of range ratios
# need to get the weight ratios first
# make sure per channel min and max are as expected
min_per_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
min_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MIN_KEY
max_per_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
max_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MAX_KEY
# get weight per channel and global info
per_channel_min = block_1_lin_recs[min_per_key]
per_channel_max = block_1_lin_recs[max_per_key]
# make sure per channel min and max are as expected
min_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
min_key += InputWeightEqualizationDetector.GLOBAL_MIN_KEY
max_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
max_key += InputWeightEqualizationDetector.GLOBAL_MAX_KEY
global_min = block_1_lin_recs[min_key]
global_max = block_1_lin_recs[max_key]
weight_ratio = torch.sqrt((per_channel_max - per_channel_min) / (global_max - global_min))
# also get comp stat for this specific layer
comp_stat = block_1_lin_recs[InputWeightEqualizationDetector.COMP_METRIC_KEY]
weight_to_input_ratio = weight_ratio / input_ratio
self.assertEqual(comp_stat, weight_to_input_ratio)
# only looking at the first example so can break
break
@skipIfNoFBGEMM
def test_input_weight_equalization_report_gen_empty(self):
# tests report gen on a model that doesn't have any layers
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
test_input_weight_detector = InputWeightEqualizationDetector(0.4)
detector_set = set([test_input_weight_detector])
model = self.ReluOnly()
# prepare the model for callibration
prepared_for_callibrate_model, model_report = self._get_prepped_for_calibration_model(model, detector_set)
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = model_report.generate_model_report(True)
# check that sizes are appropriate only 1 detector
self.assertEqual(len(generated_report), 1)
# get the specific report for input weight equalization
input_weight_str, input_weight_dict = generated_report[test_input_weight_detector.get_detector_name()]
# we should have 0 layers since there is only a Relu
self.assertEqual(len(input_weight_dict), 0)
# make sure that the string only has two lines, as should be if no suggestions
self.assertEqual(input_weight_str.count("\n"), 2)
class TestFxDetectOutliers(QuantizationTestCase):
class LargeBatchModel(torch.nn.Module):
def __init__(self, param_size):
super().__init__()
self.param_size = param_size
self.linear = torch.nn.Linear(param_size, param_size)
self.relu_1 = torch.nn.ReLU()
self.conv = torch.nn.Conv2d(param_size, param_size, 1)
self.relu_2 = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu_1(x)
x = self.conv(x)
x = self.relu_2(x)
return x
def get_example_inputs(self):
param_size = self.param_size
return (torch.randn((1, param_size, param_size, param_size)),)
def get_outlier_inputs(self):
param_size = self.param_size
random_vals = torch.randn((1, param_size, param_size, param_size))
# change one in some of them to be a massive value
random_vals[:, 0:param_size:2, 0, 3] = torch.tensor([3.28e8])
return (random_vals,)
def _get_prepped_for_calibration_model(self, model, detector_set, use_outlier_data=False):
r"""Returns a model that has been prepared for callibration and corresponding model_report"""
# call the general helper function to callibrate
example_input = model.get_example_inputs()[0]
# if we specifically want to test data with outliers replace input
if use_outlier_data:
example_input = model.get_outlier_inputs()[0]
return _get_prepped_for_calibration_model_helper(model, detector_set, example_input)
@skipIfNoFBGEMM
def test_outlier_detection_determine_points(self):
# use fbgemm and create our model instance
# then create model report instance with detector
# similar to test for InputWeightEqualization but key differences that made refactoring not viable
# not explicitly testing fusion because fx workflow automatically
with override_quantized_engine('fbgemm'):
detector_set = set([OutlierDetector(reference_percentile=0.95)])
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = self._get_prepped_for_calibration_model(
self.LargeBatchModel(param_size=128), detector_set
)
# supported modules to check
mods_to_check = set([nn.Linear, nn.Conv2d, nn.ReLU])
# there should be 4 node fqns that have the observer inserted
correct_number_of_obs_inserted = 4
number_of_obs_found = 0
obs_name_to_find = InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME
number_of_obs_found = sum(
[1 if obs_name_to_find in str(node.target) else 0 for node in prepared_for_callibrate_model.graph.nodes]
)
self.assertEqual(number_of_obs_found, correct_number_of_obs_inserted)
# assert that each of the desired modules have the observers inserted
for fqn, module in prepared_for_callibrate_model.named_modules():
# check if module is a supported module
is_in_include_list = isinstance(module, tuple(mods_to_check))
if is_in_include_list:
# make sure it has the observer attribute
self.assertTrue(hasattr(module, InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME))
else:
# if it's not a supported type, it shouldn't have observer attached
self.assertTrue(not hasattr(module, InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME))
@skipIfNoFBGEMM
def test_no_outlier_report_gen(self):
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
# test with multiple detectors
outlier_detector = OutlierDetector(reference_percentile=0.95)
dynamic_static_detector = DynamicStaticDetector(tolerance=0.5)
param_size: int = 4
detector_set = set([outlier_detector, dynamic_static_detector])
model = self.LargeBatchModel(param_size=param_size)
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = self._get_prepped_for_calibration_model(
model, detector_set
)
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = mod_report.generate_model_report(True)
# check that sizes are appropriate only 2 detectors
self.assertEqual(len(generated_report), 2)
# get the specific report for input weight equalization
outlier_str, outlier_dict = generated_report[outlier_detector.get_detector_name()]
# we should have 5 layers looked at since 4 conv + linear + relu
self.assertEqual(len(outlier_dict), 4)
# assert the following are true for all the modules
for module_fqn in outlier_dict:
# get the info for the specific module
module_dict = outlier_dict[module_fqn]
# there really should not be any outliers since we used a normal distribution to perform this calculation
outlier_info = module_dict[OutlierDetector.OUTLIER_KEY]
self.assertEqual(sum(outlier_info), 0)
# ensure that the number of ratios and batches counted is the same as the number of params
self.assertEqual(len(module_dict[OutlierDetector.COMP_METRIC_KEY]), param_size)
self.assertEqual(len(module_dict[OutlierDetector.NUM_BATCHES_KEY]), param_size)
@skipIfNoFBGEMM
def test_all_outlier_report_gen(self):
# make the percentile 0 and the ratio 1, and then see that everything is outlier according to it
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
# create detector of interest
outlier_detector = OutlierDetector(ratio_threshold=1, reference_percentile=0)
param_size: int = 16
detector_set = set([outlier_detector])
model = self.LargeBatchModel(param_size=param_size)
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = self._get_prepped_for_calibration_model(
model, detector_set
)
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = mod_report.generate_model_report(True)
# check that sizes are appropriate only 1 detector
self.assertEqual(len(generated_report), 1)
# get the specific report for input weight equalization
outlier_str, outlier_dict = generated_report[outlier_detector.get_detector_name()]
# we should have 5 layers looked at since 4 conv + linear + relu
self.assertEqual(len(outlier_dict), 4)
# assert the following are true for all the modules
for module_fqn in outlier_dict:
# get the info for the specific module
module_dict = outlier_dict[module_fqn]
# everything should be an outlier because we said that the max should be equal to the min for all of them
# however we will just test and say most should be in case we have several 0 channel values
outlier_info = module_dict[OutlierDetector.OUTLIER_KEY]
assert sum(outlier_info) >= len(outlier_info) / 2
# ensure that the number of ratios and batches counted is the same as the number of params
self.assertEqual(len(module_dict[OutlierDetector.COMP_METRIC_KEY]), param_size)
self.assertEqual(len(module_dict[OutlierDetector.NUM_BATCHES_KEY]), param_size)
@skipIfNoFBGEMM
def test_multiple_run_consistent_spike_outlier_report_gen(self):
# specifically make a row really high consistently in the number of batches that you are testing and try that
# generate report after just 1 run, and after many runs (30) and make sure above minimum threshold is there
with override_quantized_engine('fbgemm'):
# detector of interest
outlier_detector = OutlierDetector(reference_percentile=0.95)
param_size: int = 8
detector_set = set([outlier_detector])
model = self.LargeBatchModel(param_size=param_size)
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = self._get_prepped_for_calibration_model(
model, detector_set, use_outlier_data=True
)
# now we actually callibrate the model
example_input = model.get_outlier_inputs()[0]
example_input = example_input.to(torch.float)
# now callibrate minimum 30 times to make it above minimum threshold
for i in range(30):
example_input = model.get_outlier_inputs()[0]
example_input = example_input.to(torch.float)
# make 2 of the batches to have zero channel
if i % 14 == 0:
# make one channel constant
example_input[0][1] = torch.zeros_like(example_input[0][1])
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = mod_report.generate_model_report(True)
# check that sizes are appropriate only 1 detector
self.assertEqual(len(generated_report), 1)
# get the specific report for input weight equalization
outlier_str, outlier_dict = generated_report[outlier_detector.get_detector_name()]
# we should have 5 layers looked at since 4 conv + linear + relu
self.assertEqual(len(outlier_dict), 4)
# assert the following are true for all the modules
for module_fqn in outlier_dict:
# get the info for the specific module
module_dict = outlier_dict[module_fqn]
# because we ran 30 times, we should have at least a couple be significant
# could be less because some channels could possibly be all 0
sufficient_batches_info = module_dict[OutlierDetector.IS_SUFFICIENT_BATCHES_KEY]
assert sum(sufficient_batches_info) >= len(sufficient_batches_info) / 2
# half of them should be outliers, because we set a really high value every 2 channels
outlier_info = module_dict[OutlierDetector.OUTLIER_KEY]
self.assertEqual(sum(outlier_info), len(outlier_info) / 2)
# ensure that the number of ratios and batches counted is the same as the number of params
self.assertEqual(len(module_dict[OutlierDetector.COMP_METRIC_KEY]), param_size)
self.assertEqual(len(module_dict[OutlierDetector.NUM_BATCHES_KEY]), param_size)
# for the first one ensure the per channel max values are what we set
if module_fqn == "linear.0":
# check that the non-zero channel count, at least 2 should be there
# for the first module
counts_info = module_dict[OutlierDetector.CONSTANT_COUNTS_KEY]
assert sum(counts_info) >= 2
# half of the recorded max values should be what we set
matched_max = sum([val == 3.28e8 for val in module_dict[OutlierDetector.MAX_VALS_KEY]])
self.assertEqual(matched_max, param_size / 2)
class TestFxModelReportVisualizer(QuantizationTestCase):
def _callibrate_and_generate_visualizer(self, model, prepared_for_callibrate_model, mod_report):
r"""
Callibrates the passed in model, generates report, and returns the visualizer
"""
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = mod_report.generate_model_report(remove_inserted_observers=False)
# now we get the visualizer should not error
mod_rep_visualizer: ModelReportVisualizer = mod_report.generate_visualizer()
return mod_rep_visualizer
@skipIfNoFBGEMM
def test_get_modules_and_features(self):
"""
Tests the get_all_unique_module_fqns and get_all_unique_feature_names methods of
ModelReportVisualizer
Checks whether returned sets are of proper size and filtered properly
"""
with override_quantized_engine('fbgemm'):
# set the backend for this test
torch.backends.quantized.engine = "fbgemm"
# test with multiple detectors
detector_set = set()
detector_set.add(OutlierDetector(reference_percentile=0.95))
detector_set.add(InputWeightEqualizationDetector(0.5))
model = TwoThreeOps()
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = _get_prepped_for_calibration_model_helper(
model, detector_set, model.get_example_inputs()[0]
)
mod_rep_visualizer: ModelReportVisualizer = self._callibrate_and_generate_visualizer(
model, prepared_for_callibrate_model, mod_report
)
# ensure the module fqns match the ones given by the get_all_unique_feature_names method
actual_model_fqns = set(mod_rep_visualizer.generated_reports.keys())
returned_model_fqns = mod_rep_visualizer.get_all_unique_module_fqns()
self.assertEqual(returned_model_fqns, actual_model_fqns)
# now ensure that features are all properly returned
# all the linears have all the features for two detectors
# can use those as check that method is working reliably
b_1_linear_features = mod_rep_visualizer.generated_reports["block1.linear"]
# first test all features
returned_all_feats = mod_rep_visualizer.get_all_unique_feature_names(False)
self.assertEqual(returned_all_feats, set(b_1_linear_features.keys()))
# now test plottable features
plottable_set = set()
for feature_name in b_1_linear_features:
if type(b_1_linear_features[feature_name]) == torch.Tensor:
plottable_set.add(feature_name)
returned_plottable_feats = mod_rep_visualizer.get_all_unique_feature_names()
self.assertEqual(returned_plottable_feats, plottable_set)
def _prep_visualizer_helper(self):
r"""
Returns a mod rep visualizer that we test in various ways
"""
# set backend for test
torch.backends.quantized.engine = "fbgemm"
# test with multiple detectors
detector_set = set()
detector_set.add(OutlierDetector(reference_percentile=0.95))
detector_set.add(InputWeightEqualizationDetector(0.5))
model = TwoThreeOps()
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = _get_prepped_for_calibration_model_helper(
model, detector_set, model.get_example_inputs()[0]
)
mod_rep_visualizer: ModelReportVisualizer = self._callibrate_and_generate_visualizer(
model, prepared_for_callibrate_model, mod_report
)
return mod_rep_visualizer
@skipIfNoFBGEMM
def test_generate_tables_match_with_report(self):
"""
Tests the generate_table_view()
ModelReportVisualizer
Checks whether the generated dict has proper information
Visual check that the tables look correct performed during testing
"""
with override_quantized_engine('fbgemm'):
# get the visualizer
mod_rep_visualizer = self._prep_visualizer_helper()
table_dict = mod_rep_visualizer.generate_filtered_tables()
# test primarily the dict since it has same info as str
tensor_headers, tensor_table = table_dict[ModelReportVisualizer.TABLE_TENSOR_KEY]
channel_headers, channel_table = table_dict[ModelReportVisualizer.TABLE_CHANNEL_KEY]
# these two together should be the same as the generated report info in terms of keys
tensor_info_modules = set(row[1] for row in tensor_table)
channel_info_modules = set(row[1] for row in channel_table)
combined_modules: Set = tensor_info_modules.union(channel_info_modules)
generated_report_keys: Set = set(mod_rep_visualizer.generated_reports.keys())
self.assertEqual(combined_modules, generated_report_keys)
@skipIfNoFBGEMM
def test_generate_tables_no_match(self):
"""
Tests the generate_table_view()
ModelReportVisualizer
Checks whether the generated dict has proper information
Visual check that the tables look correct performed during testing
"""
with override_quantized_engine('fbgemm'):
# get the visualizer
mod_rep_visualizer = self._prep_visualizer_helper()
# try a random filter and make sure that there are no rows for either table
empty_tables_dict = mod_rep_visualizer.generate_filtered_tables(module_fqn_filter="random not there module")
# test primarily the dict since it has same info as str
tensor_headers, tensor_table = empty_tables_dict[ModelReportVisualizer.TABLE_TENSOR_KEY]
channel_headers, channel_table = empty_tables_dict[ModelReportVisualizer.TABLE_CHANNEL_KEY]
tensor_info_modules = set(row[1] for row in tensor_table)
channel_info_modules = set(row[1] for row in channel_table)
combined_modules: Set = tensor_info_modules.union(channel_info_modules)
self.assertEqual(len(combined_modules), 0) # should be no matching modules
@skipIfNoFBGEMM
def test_generate_tables_single_feat_match(self):
"""
Tests the generate_table_view()
ModelReportVisualizer
Checks whether the generated dict has proper information
Visual check that the tables look correct performed during testing
"""
with override_quantized_engine('fbgemm'):
# get the visualizer
mod_rep_visualizer = self._prep_visualizer_helper()
# try a matching filter for feature and make sure only those features show up
# if we filter to a very specific feature name, should only have 1 additional column in each table row
single_feat_dict = mod_rep_visualizer.generate_filtered_tables(feature_filter=OutlierDetector.MAX_VALS_KEY)
# test primarily the dict since it has same info as str
tensor_headers, tensor_table = single_feat_dict[ModelReportVisualizer.TABLE_TENSOR_KEY]
channel_headers, channel_table = single_feat_dict[ModelReportVisualizer.TABLE_CHANNEL_KEY]
# get the number of features in each of these
tensor_info_features = len(tensor_headers)
channel_info_features = len(channel_headers) - ModelReportVisualizer.NUM_NON_FEATURE_CHANNEL_HEADERS
# make sure that there are no tensor features, and that there is one channel level feature
self.assertEqual(tensor_info_features, 0)
self.assertEqual(channel_info_features, 1)
def _get_prepped_for_calibration_model_helper(model, detector_set, example_input, fused: bool = False):
r"""Returns a model that has been prepared for callibration and corresponding model_report"""
# set the backend for this test
torch.backends.quantized.engine = "fbgemm"
# create model instance and prepare it
example_input = example_input.to(torch.float)
q_config_mapping = torch.ao.quantization.get_default_qconfig_mapping()
# if they passed in fusion paramter, make sure to test that
if fused:
model = torch.quantization.fuse_modules(model, model.get_fusion_modules())
model_prep = quantize_fx.prepare_fx(model, q_config_mapping, example_input)
model_report = ModelReport(model_prep, detector_set)
# prepare the model for callibration
prepared_for_callibrate_model = model_report.prepare_detailed_calibration()
return (prepared_for_callibrate_model, model_report)
| [
"[email protected]"
] | |
24c1026d70712dc58f96d6e0a9023fab0f1cdfd6 | 5c531de5e4759c904e608b4fc653b2b041f79a0e | /Snap_monte_carlo_simulation.py | 7cb310205cf32290a10076f203756fb68c14d270 | [] | no_license | jianhui-ben/leetcode_python | 133c7e6e5c7316d00607ba2e327239e002de28b2 | fcc16124cc24a5993e27f5d97e78d8f290e68230 | refs/heads/master | 2022-06-05T22:32:18.034581 | 2022-05-17T02:27:11 | 2022-05-17T02:27:11 | 250,683,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | #N个 Turkers标数据,数据很模糊基本靠瞎猜,有M个选项可选。
#问这些人达到了majority共识的概率有多大?也就是有超过半数的人都选了某一选项的概率。
#要求先给出数学解析解,然后给出coding实现方法来求近似解。
#代码其实很简单,Monte Carlo simulation,跑个足够多的次数,用统计结果来近似概率
## p= (1/M)**(N//2)
print(12//2)
import random
random.randint(1, 2)
import collections
collections.Counter([1,1,1,2, 3,3,3,3]).most_common(1)[0][1]
def prob(M, N):
import random
import collections
major=0
for _ in range(100000):
choices= [None]* N
for i in range(N):
choices[i]= random.randint(1, M)
if collections.Counter(choices).most_common(1)[0][1]> int(N//2):
major+=1
return float(major)/100000.0*100.0
def verify(M, N):
return (1.0/float(M))**int(N//2)*100.0
verify(7, 3)
prob(7, 3) | [
"[email protected]"
] | |
588cb0f08c24dabc182e357f3b5efba012b7b98e | bcf42041a64fdefcaec54843900c3d8f833f2215 | /emc/kb/browser/dataout.py | cf3d77f951b9bb4e11b4e53d61cd34ea9ffe24d5 | [] | no_license | adam139/emc.kb | 487650837207e0f773c077310f001a524965ee4f | ff21383762dad96ac09d414e7d1e8104c51b91f9 | refs/heads/master | 2022-01-14T09:42:49.790659 | 2020-09-22T13:16:27 | 2020-09-22T13:16:27 | 49,318,509 | 0 | 3 | null | 2016-12-26T17:37:20 | 2016-01-09T09:34:12 | Python | UTF-8 | Python | false | false | 7,602 | py | #-*- coding: UTF-8 -*-
import csv
from cStringIO import StringIO
from zope import event
from zope.component import getMultiAdapter
from five import grok
from zope.interface import implements
from zope.interface import Interface
from Products.Five.browser import BrowserView
from Products.CMFCore.utils import getToolByName
from Products.statusmessages.interfaces import IStatusMessage
import datetime
from plone import api
from emc.policy.events import AddloginEvent,NormalUserloginEvent
from emc.policy import get_ip,fmt,list2str,getfullname_orid
from emc.kb import _
# todo code cp932
# need byte string
data_VALUES = [
u"主体".encode('utf-8'),
u"客体".encode('utf-8'),
u"时间".encode('utf-8'),
u"ip".encode('utf-8'),
u"级别".encode('utf-8'),
u"描述".encode('utf-8'),
u"结果".encode('utf-8')
]
userlog_header = [
u"用户".encode('utf-8'),
u"时间".encode('utf-8'),
u"ip".encode('utf-8'),
u"级别".encode('utf-8'),
u"描述".encode('utf-8'),
u"结果".encode('utf-8')
]
class AdminLogDataOut (grok.View):
"""AdminLog Data export as CSV files.
"""
grok.context(Interface)
grok.name('export_csv')
grok.require('zope2.View')
def searchview(self,viewname="admin_logs"):
searchview = getMultiAdapter((self.context, self.request),name=viewname)
return searchview
def render(self):
method = self.request.get('REQUEST_METHOD', 'GET')
# import pdb
# pdb.set_trace()
if (method != 'POST'):
return self.request.response.redirect(self.context.absolute_url())
if self.request.form.get('form.button.Cancel'):
return self.request.response.redirect(self.context.absolute_url())
searchview = self.searchview()
# datadic receive front ajax post data
datadic = self.request.form
start = int(datadic['start']) # batch search start position
size = int(datadic['size']) # batch search size
sortcolumn = datadic['sortcolumn']
sortdirection = datadic['sortdirection']
keyword = (datadic['searchabletext']).strip()
# origquery = searchview.getPathQuery()
origquery = {}
# default reverse,as is desc
origquery['sort_on'] = sortcolumn
# sql db sortt_order:asc,desc
origquery['sort_order'] = sortdirection
#模糊搜索
if keyword != "":
origquery['SearchableText'] = '%'+keyword+'%'
else:
origquery['SearchableText'] = ""
#origquery provide batch search
origquery['size'] = size
origquery['start'] = start
#totalquery search all
totalquery = origquery.copy()
totalquery['size'] = 0
# search all size = 0 return numbers of recorders
totalnum = searchview.search_multicondition(totalquery)
origquery.update({"size":totalnum})
resultDicLists = searchview.search_multicondition(origquery)
del origquery
del totalquery
if totalnum == 0: return
#fire a log event
user = api.user.get_current()
ip = get_ip(self.request)
if user is None:
return
des = "从用户日志表导出了%s条日志" % totalnum
loginEvent = NormalUserloginEvent(userid = getfullname_orid(user),
datetime = datetime.datetime.now().strftime(fmt),
ip = ip,
type = 0,
description = des,
result = 1)
if loginEvent.available():
if loginEvent.is_normal_user():
event.notify(loginEvent)
else:
des = "从管理员日志表导出了%s条日志" % totalnum
loginEvent = AddloginEvent(adminid = getfullname_orid(user),
userid = "",
datetime = datetime.datetime.now().strftime(fmt),
ip = ip,
type = 0,
description = des,
result = 1)
event.notify(loginEvent)
return self.exportData(resultDicLists)
def exportData(self,recorders):
"""Export Data within CSV file."""
datafile = self._createCSV(self._getDataInfos(recorders))
return self._createRequest(datafile.getvalue(), "admin_log_export.log")
def _getDataInfos(self,recorders):
"""Generator filled with the recorders."""
from emc.kb.utils import kind
from emc.kb.utils import level as log_level
from emc.kb.utils import result as log_result
for i in recorders:
i = list(i)
i[4] = kind[i[4]]
i[5] = log_level[i[5]]
i[7] = log_result[i[7]]
yield i
def _createCSV(self, lines):
"""Write header and lines within the CSV file."""
datafile = StringIO()
datafile.write(u'\ufeff'.encode('utf-8'))
writor = csv.writer(datafile)
writor.writerow(data_VALUES)
map(writor.writerow, lines)
return datafile
def _createRequest(self, data, filename):
"""Create the request to be returned.
Add the right header and the CSV file.
"""
self.request.response.addHeader('Content-Disposition', "attachment; filename=%s" % filename)
self.request.response.addHeader('Content-Type', "text/csv;charset=utf-8")
self.request.response.addHeader("Content-Transfer-Encoding", "8bit")
self.request.response.addHeader('Content-Length', "%d" % len(data))
self.request.response.addHeader('Pragma', "no-cache")
self.request.response.addHeader('Cache-Control', "must-revalidate, post-check=0, pre-check=0, public")
self.request.response.addHeader('Expires', "0")
return data
class UserLogDataOut (AdminLogDataOut):
"""UserLog Data export as CSV files.
"""
# grok.context(Interface)
grok.name('userlog_export_csv')
# grok.require('zope2.View')
def searchview(self,viewname="user_logs"):
searchview = getMultiAdapter((self.context, self.request),name=viewname)
return searchview
def _createCSV(self, lines):
"""Write header and lines within the CSV file."""
datafile = StringIO()
writor = csv.writer(datafile)
writor.writerow(userlog_header)
map(writor.writerow, lines)
return datafile
def exportData(self,recorders):
"""Export Data within CSV file."""
datafile = self._createCSV(self._getDataInfos(recorders))
return self._createRequest(datafile.getvalue(), "user_log_export.log")
def _getDataInfos(self,recorders):
"""Generator filled with the recorders."""
from emc.kb.utils import kind
from emc.kb.utils import level as log_level
from emc.kb.utils import result as log_result
for i in recorders:
i = list(i)
i[3] = kind[i[3]]
i[4] = log_level[i[4]]
i[6] = log_result[i[6]]
yield i | [
"[email protected]"
] | |
7d8d2a7d613ecbd9087ac3588eca08034858f9f9 | 1ee90596d52554cb4ef51883c79093897f5279a0 | /Sisteme/[C++]System Pet OFFICIAL/uipetsystem.py | 4772d43117166548d16ed66d4b2f02322ab6c6fd | [] | no_license | Reizonr1/metin2-adv | bf7ecb26352b13641cd69b982a48a6b20061979a | 5c2c096015ef3971a2f1121b54e33358d973c694 | refs/heads/master | 2022-04-05T20:50:38.176241 | 2020-03-03T18:20:58 | 2020-03-03T18:20:58 | 233,462,795 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 25,267 | py | import os
import ui
import player
import mouseModule
import net
import app
import snd
import item
import player
import chat
import grp
import uiScriptLocale
import localeInfo
import constInfo
import ime
import wndMgr
import petskill
import uipetfeed
import uiToolTip
import uipetsystem
import interfaceModule
AFFECT_DICT = {
item.APPLY_MAX_HP : localeInfo.TOOLTIP_MAX_HP,
item.APPLY_MAX_SP : localeInfo.TOOLTIP_MAX_SP,
item.APPLY_CON : localeInfo.TOOLTIP_CON,
item.APPLY_INT : localeInfo.TOOLTIP_INT,
item.APPLY_STR : localeInfo.TOOLTIP_STR,
item.APPLY_DEX : localeInfo.TOOLTIP_DEX,
item.APPLY_ATT_SPEED : localeInfo.TOOLTIP_ATT_SPEED,
item.APPLY_MOV_SPEED : localeInfo.TOOLTIP_MOV_SPEED,
item.APPLY_CAST_SPEED : localeInfo.TOOLTIP_CAST_SPEED,
item.APPLY_HP_REGEN : localeInfo.TOOLTIP_HP_REGEN,
item.APPLY_SP_REGEN : localeInfo.TOOLTIP_SP_REGEN,
item.APPLY_POISON_PCT : localeInfo.TOOLTIP_APPLY_POISON_PCT,
item.APPLY_STUN_PCT : localeInfo.TOOLTIP_APPLY_STUN_PCT,
item.APPLY_SLOW_PCT : localeInfo.TOOLTIP_APPLY_SLOW_PCT,
item.APPLY_CRITICAL_PCT : localeInfo.TOOLTIP_APPLY_CRITICAL_PCT,
item.APPLY_PENETRATE_PCT : localeInfo.TOOLTIP_APPLY_PENETRATE_PCT,
item.APPLY_ATTBONUS_WARRIOR : localeInfo.TOOLTIP_APPLY_ATTBONUS_WARRIOR,
item.APPLY_ATTBONUS_ASSASSIN : localeInfo.TOOLTIP_APPLY_ATTBONUS_ASSASSIN,
item.APPLY_ATTBONUS_SURA : localeInfo.TOOLTIP_APPLY_ATTBONUS_SURA,
item.APPLY_ATTBONUS_SHAMAN : localeInfo.TOOLTIP_APPLY_ATTBONUS_SHAMAN,
item.APPLY_ATTBONUS_MONSTER : localeInfo.TOOLTIP_APPLY_ATTBONUS_MONSTER,
item.APPLY_ATTBONUS_HUMAN : localeInfo.TOOLTIP_APPLY_ATTBONUS_HUMAN,
item.APPLY_ATTBONUS_ANIMAL : localeInfo.TOOLTIP_APPLY_ATTBONUS_ANIMAL,
item.APPLY_ATTBONUS_ORC : localeInfo.TOOLTIP_APPLY_ATTBONUS_ORC,
item.APPLY_ATTBONUS_MILGYO : localeInfo.TOOLTIP_APPLY_ATTBONUS_MILGYO,
item.APPLY_ATTBONUS_UNDEAD : localeInfo.TOOLTIP_APPLY_ATTBONUS_UNDEAD,
item.APPLY_ATTBONUS_DEVIL : localeInfo.TOOLTIP_APPLY_ATTBONUS_DEVIL,
item.APPLY_STEAL_HP : localeInfo.TOOLTIP_APPLY_STEAL_HP,
item.APPLY_STEAL_SP : localeInfo.TOOLTIP_APPLY_STEAL_SP,
item.APPLY_MANA_BURN_PCT : localeInfo.TOOLTIP_APPLY_MANA_BURN_PCT,
item.APPLY_DAMAGE_SP_RECOVER : localeInfo.TOOLTIP_APPLY_DAMAGE_SP_RECOVER,
item.APPLY_BLOCK : localeInfo.TOOLTIP_APPLY_BLOCK,
item.APPLY_DODGE : localeInfo.TOOLTIP_APPLY_DODGE,
item.APPLY_RESIST_SWORD : localeInfo.TOOLTIP_APPLY_RESIST_SWORD,
item.APPLY_RESIST_TWOHAND : localeInfo.TOOLTIP_APPLY_RESIST_TWOHAND,
item.APPLY_RESIST_DAGGER : localeInfo.TOOLTIP_APPLY_RESIST_DAGGER,
item.APPLY_RESIST_BELL : localeInfo.TOOLTIP_APPLY_RESIST_BELL,
item.APPLY_RESIST_FAN : localeInfo.TOOLTIP_APPLY_RESIST_FAN,
item.APPLY_RESIST_BOW : localeInfo.TOOLTIP_RESIST_BOW,
item.APPLY_RESIST_FIRE : localeInfo.TOOLTIP_RESIST_FIRE,
item.APPLY_RESIST_ELEC : localeInfo.TOOLTIP_RESIST_ELEC,
item.APPLY_RESIST_MAGIC : localeInfo.TOOLTIP_RESIST_MAGIC,
item.APPLY_RESIST_WIND : localeInfo.TOOLTIP_APPLY_RESIST_WIND,
item.APPLY_REFLECT_MELEE : localeInfo.TOOLTIP_APPLY_REFLECT_MELEE,
item.APPLY_REFLECT_CURSE : localeInfo.TOOLTIP_APPLY_REFLECT_CURSE,
item.APPLY_POISON_REDUCE : localeInfo.TOOLTIP_APPLY_POISON_REDUCE,
item.APPLY_KILL_SP_RECOVER : localeInfo.TOOLTIP_APPLY_KILL_SP_RECOVER,
item.APPLY_EXP_DOUBLE_BONUS : localeInfo.TOOLTIP_APPLY_EXP_DOUBLE_BONUS,
item.APPLY_GOLD_DOUBLE_BONUS : localeInfo.TOOLTIP_APPLY_GOLD_DOUBLE_BONUS,
item.APPLY_ITEM_DROP_BONUS : localeInfo.TOOLTIP_APPLY_ITEM_DROP_BONUS,
item.APPLY_POTION_BONUS : localeInfo.TOOLTIP_APPLY_POTION_BONUS,
item.APPLY_KILL_HP_RECOVER : localeInfo.TOOLTIP_APPLY_KILL_HP_RECOVER,
item.APPLY_IMMUNE_STUN : localeInfo.TOOLTIP_APPLY_IMMUNE_STUN,
item.APPLY_IMMUNE_SLOW : localeInfo.TOOLTIP_APPLY_IMMUNE_SLOW,
item.APPLY_IMMUNE_FALL : localeInfo.TOOLTIP_APPLY_IMMUNE_FALL,
item.APPLY_BOW_DISTANCE : localeInfo.TOOLTIP_BOW_DISTANCE,
item.APPLY_DEF_GRADE_BONUS : localeInfo.TOOLTIP_DEF_GRADE,
item.APPLY_ATT_GRADE_BONUS : localeInfo.TOOLTIP_ATT_GRADE,
item.APPLY_MAGIC_ATT_GRADE : localeInfo.TOOLTIP_MAGIC_ATT_GRADE,
item.APPLY_MAGIC_DEF_GRADE : localeInfo.TOOLTIP_MAGIC_DEF_GRADE,
item.APPLY_MAX_STAMINA : localeInfo.TOOLTIP_MAX_STAMINA,
item.APPLY_MALL_ATTBONUS : localeInfo.TOOLTIP_MALL_ATTBONUS,
item.APPLY_MALL_DEFBONUS : localeInfo.TOOLTIP_MALL_DEFBONUS,
item.APPLY_MALL_EXPBONUS : localeInfo.TOOLTIP_MALL_EXPBONUS,
item.APPLY_MALL_ITEMBONUS : localeInfo.TOOLTIP_MALL_ITEMBONUS,
item.APPLY_MALL_GOLDBONUS : localeInfo.TOOLTIP_MALL_GOLDBONUS,
item.APPLY_SKILL_DAMAGE_BONUS : localeInfo.TOOLTIP_SKILL_DAMAGE_BONUS,
item.APPLY_NORMAL_HIT_DAMAGE_BONUS : localeInfo.TOOLTIP_NORMAL_HIT_DAMAGE_BONUS,
item.APPLY_SKILL_DEFEND_BONUS : localeInfo.TOOLTIP_SKILL_DEFEND_BONUS,
item.APPLY_NORMAL_HIT_DEFEND_BONUS : localeInfo.TOOLTIP_NORMAL_HIT_DEFEND_BONUS,
item.APPLY_PC_BANG_EXP_BONUS : localeInfo.TOOLTIP_MALL_EXPBONUS_P_STATIC,
item.APPLY_PC_BANG_DROP_BONUS : localeInfo.TOOLTIP_MALL_ITEMBONUS_P_STATIC,
item.APPLY_RESIST_WARRIOR : localeInfo.TOOLTIP_APPLY_RESIST_WARRIOR,
item.APPLY_RESIST_ASSASSIN : localeInfo.TOOLTIP_APPLY_RESIST_ASSASSIN,
item.APPLY_RESIST_SURA : localeInfo.TOOLTIP_APPLY_RESIST_SURA,
item.APPLY_RESIST_SHAMAN : localeInfo.TOOLTIP_APPLY_RESIST_SHAMAN,
item.APPLY_MAX_HP_PCT : localeInfo.TOOLTIP_APPLY_MAX_HP_PCT,
item.APPLY_MAX_SP_PCT : localeInfo.TOOLTIP_APPLY_MAX_SP_PCT,
item.APPLY_ENERGY : localeInfo.TOOLTIP_ENERGY,
item.APPLY_COSTUME_ATTR_BONUS : localeInfo.TOOLTIP_COSTUME_ATTR_BONUS,
item.APPLY_MAGIC_ATTBONUS_PER : localeInfo.TOOLTIP_MAGIC_ATTBONUS_PER,
item.APPLY_MELEE_MAGIC_ATTBONUS_PER : localeInfo.TOOLTIP_MELEE_MAGIC_ATTBONUS_PER,
item.APPLY_RESIST_ICE : localeInfo.TOOLTIP_RESIST_ICE,
item.APPLY_RESIST_EARTH : localeInfo.TOOLTIP_RESIST_EARTH,
item.APPLY_RESIST_DARK : localeInfo.TOOLTIP_RESIST_DARK,
item.APPLY_ANTI_CRITICAL_PCT : localeInfo.TOOLTIP_ANTI_CRITICAL_PCT,
item.APPLY_ANTI_PENETRATE_PCT : localeInfo.TOOLTIP_ANTI_PENETRATE_PCT,
}
def checkdiv(n):
x = str(n/10.0)
if len(x) > 3:
return str(x)[0:3]
return str(x)
def pointop(n):
t = int(n)
if t / 10 < 1:
return "0."+n
else:
return n[0:len(n)-1]+"."+n[len(n)-1:]
def GetAffectString(affectType, affectValue):
if 0 == affectType:
return None
if 0 == affectValue:
return None
try:
return AFFECT_DICT[affectType](affectValue)
except TypeError:
return "UNKNOWN_VALUE[%s] %s" % (affectType, affectValue)
except KeyError:
return "UNKNOWN_TYPE[%s] %s" % (affectType, affectValue)
class PetSystemMain(ui.ScriptWindow):
class TextToolTip(ui.Window):
def __init__(self, y):
ui.Window.__init__(self, "TOP_MOST")
textLine = ui.TextLine()
textLine.SetParent(self)
textLine.SetHorizontalAlignLeft()
textLine.SetOutline()
textLine.Show()
self.y = y
self.textLine = textLine
def __del__(self):
ui.Window.__del__(self)
def SetText(self, text):
self.textLine.SetText(text)
def OnRender(self):
(mouseX, mouseY) = wndMgr.GetMousePosition()
self.textLine.SetPosition(mouseX, mouseY - 60 + self.y)
def __init__(self, vnum = 0):
ui.ScriptWindow.__init__(self)
self.vnum = vnum
self.__LoadWindow()
def __del__(self):
ui.ScriptWindow.__del__(self)
def Show(self):
ui.ScriptWindow.Show(self)
def Close(self):
self.Hide()
constInfo.PET_MAIN = 0
self.feedwind.Close()
def __LoadWindow(self):
try:
pyScrLoader = ui.PythonScriptLoader()
pyScrLoader.LoadScriptFile(self, "uiscript/PetInformationWindow.py")
except:
import exception
exception.Abort("PetInformationWindow.LoadWindow.LoadObject")
try:
self.feedwind = uipetfeed.PetFeedWindow()
self.board = self.GetChild("board")
self.boardclose = self.GetChild("CloseButton")
self.slotimgpet = self.GetChild("UpBringing_Pet_Slot")
self.evolname = self.GetChild("EvolName")
self.petname = self.GetChild("PetName")
self.expwind = self.GetChild("UpBringing_Pet_EXP_Gauge_Board")
self.tooltipexp = []
for i in range(0,4):
self.tooltipexp.append(self.TextToolTip(15*i))
self.tooltipexp[i].Hide()
self.petlifeg = self.GetChild("LifeGauge")
self.petlevel = self.GetChild("LevelValue")
self.petexpa = self.GetChild("UpBringing_Pet_EXPGauge_01")
self.petexpb = self.GetChild("UpBringing_Pet_EXPGauge_02")
self.petexpc = self.GetChild("UpBringing_Pet_EXPGauge_03")
self.petexpd = self.GetChild("UpBringing_Pet_EXPGauge_04")
self.petexpe = self.GetChild("UpBringing_Pet_EXPGauge_05")
self.petexppages = []
self.petexppages.append(self.petexpa)
self.petexppages.append(self.petexpb)
self.petexppages.append(self.petexpc)
self.petexppages.append(self.petexpd)
self.petexppages.append(self.petexpe)
for exp in self.petexppages:
exp.SetSize(0, 0)
#exp.Hide()
self.petages = self.GetChild("AgeValue")
self.petdur = self.GetChild("LifeTextValue")
#gaugehp
self.nutribtn = self.GetChild("FeedLifeTimeButton")
self.sviluppobtn = self.GetChild("FeedEvolButton")
self.itemexp = self.GetChild("FeedExpButton")
self.pethp = self.GetChild("HpValue")
self.petdef = self.GetChild("DefValue")
self.petsp = self.GetChild("SpValue")
self.petskill0 = self.GetChild("PetSkillSlot0")
#self.petskill0.SetPetSkillSlot(0, 2, 10)
#self.petskill0.SetPetSkillSlot(1, 11, 10)
#self.petskill0.SetPetSkillSlot(2, 5, 10)
self.petskill0.SetSlot(0, 2, 32, 32, petskill.GetEmptySkill())
self.petskill0.SetSlot(1, 2, 32, 32, petskill.GetEmptySkill())
self.petskill0.SetSlot(2, 2, 32, 32, petskill.GetEmptySkill())
#self.petskill0.SetCoverButton(0)
#self.petskill0.SetCoverButton(1)
#self.petskill0.SetCoverButton(2)
#self.petskill0.SetAlwaysRenderCoverButton(0, TRUE)
#self.petskill0.SetAlwaysRenderCoverButton(1, TRUE)
#self.petskill0.SetAlwaysRenderCoverButton(2, TRUE)
self.petskill0.SetSelectItemSlotEvent(ui.__mem_func__(self.UseSkill))
self.petskill0.SetUseSlotEvent(ui.__mem_func__(self.UseSkill))
self.petskill0.SetOverInItemEvent(ui.__mem_func__(self.PetSkillTooltipShow))
self.petskill0.SetOverOutItemEvent(ui.__mem_func__(self.PetSkillTooltipHide))
self.SetDefaultInfo()
self.arrytooltip = [ [-1,-1], [-1,-1], [-1,-1]]
PET_FILE_NAME = "%s/pet_skill.txt" % app.GetLocalePath()
PET_FILE_SKILL = "%s/pet_skill_bonus.txt" % app.GetLocalePath()
self.linespet = pack_open(PET_FILE_NAME, "r").readlines()
self.linespetskill = pack_open(PET_FILE_SKILL, "r").readlines()
self.SkillTooltip = uiToolTip.ToolTip(180)
#Event
self.boardclose.SetEvent(ui.__mem_func__(self.Close,))
self.nutribtn.SetToggleDownEvent(lambda arg=0,arg1=1: self.OpenFeedBox(arg,arg1))
self.nutribtn.SetToggleUpEvent(lambda arg=1,arg1=0: self.OpenFeedBox(arg,arg1))
self.itemexp.SetToggleDownEvent(lambda arg=0,arg1=3: self.OpenFeedBox(arg,arg1))
self.itemexp.SetToggleUpEvent(lambda arg=1,arg1=0: self.OpenFeedBox(arg,arg1))
self.sviluppobtn.SetToggleDownEvent(lambda arg=0: self.evolution(arg))
self.sviluppobtn.SetToggleUpEvent(lambda arg=1: self.evolution(arg))
except:
import exception
exception.Abort("PetInformationWindow.LoadWindow.BindObject")
def PetSkillTooltipShow(self, slot):
if self.arrytooltip[slot][0] > 0:
tokens = self.linespet[self.arrytooltip[slot][0]-1][:-1].split("\t")
tokens2 = self.linespetskill[self.arrytooltip[slot][0]-1][:-1].split("\t")
self.SkillTooltip.ClearToolTip()
self.SkillTooltip.AutoAppendTextLine(tokens[1], grp.GenerateColor(0.9490, 0.9058, 0.7568, 1.0))
self.SkillTooltip.AppendDescription(tokens[4], 26)
self.SkillTooltip.AppendSpace(5)
if self.arrytooltip[slot][0] != 10 and self.arrytooltip[slot][0] != 17 and self.arrytooltip[slot][0] != 18:
self.SkillTooltip.AutoAppendTextLine(GetAffectString(int(tokens2[1]), int(tokens2[self.arrytooltip[slot][1]+1])))
elif self.arrytooltip[slot][0] == 10:
self.SkillTooltip.AutoAppendTextLine("Hp Restored:" + str(tokens2[self.arrytooltip[slot][1]+1]))
elif self.arrytooltip[slot][0] == 17:
self.SkillTooltip.AutoAppendTextLine("Immortality Time:" + checkdiv(int(tokens2[self.arrytooltip[slot][1]+1])) + "s")
self.SkillTooltip.AutoAppendTextLine("Cooldown: "+tokens[5]+"s", grp.GenerateColor(1.0, 0.7843, 0.0, 1.0))
self.SkillTooltip.AlignHorizonalCenter()
self.SkillTooltip.ShowToolTip()
def PetSkillTooltipHide(self):
self.SkillTooltip.HideToolTip()
def evolution(self, mode):
if mode == 0:
net.SendChatPacket("/petvoincrease")
self.sviluppobtn.Enable()
#self.SkillTooltip.HideToolTip()
def SetDefaultInfo(self):
self.evolname.SetText("")
self.petname.SetText("")
self.petlevel.SetText("")
self.petages.SetText("")
self.petdur.SetText("")
self.pethp.SetText("")
self.petdef.SetText("")
self.petsp.SetText("")
self.SetDuration("0", "0")
self.slotimgpet.ClearSlot(0)
self.petskill0.ClearSlot(0)
self.petskill0.ClearSlot(1)
self.petskill0.ClearSlot(2)
self.petskill0.SetSlot(0, 2, 32, 32, petskill.GetEmptySkill())
self.petskill0.SetSlot(1, 2, 32, 32, petskill.GetEmptySkill())
self.petskill0.SetSlot(2, 2, 32, 32, petskill.GetEmptySkill())
self.SetExperience(0,0,0)
self.arrytooltip = [ [-1,-1], [-1,-1], [-1,-1]]
self.nutribtn.Disable()
self.sviluppobtn.Disable()
self.itemexp.Disable()
def OpenFeedBox(self, mode, btn):
if constInfo.FEEDWIND == btn or constInfo.FEEDWIND == 0:
if mode == 0:
self.feedwind.Show()
constInfo.FEEDWIND = btn
else:
self.feedwind.Close()
constInfo.FEEDWIND = 0
else:
self.nutribtn.Enable()
self.sviluppobtn.Enable()
self.itemexp.Enable()
self.feedwind.Close()
constInfo.FEEDWIND = 0
def SetImageSlot(self, vnum):
self.slotimgpet.SetItemSlot(0, int(vnum), 0)
self.slotimgpet.SetAlwaysRenderCoverButton(0, TRUE)
def SetEvolveName(self, name):
self.evolname.SetText(name)
def SetName(self, name):
if name != "":
self.nutribtn.Enable()
self.sviluppobtn.Enable()
self.itemexp.Enable()
#pet.SetTop()
else:
self.nutribtn.Disable()
self.sviluppobtn.Disable()
self.itemexp.Disable()
self.petname.SetText(name)
def SetLevel(self, level):
if int(level) == 40 or int(level) == 60 or int(level) == 80:
constInfo.EVOLUTION = int(level)
else:
constInfo.EVOLUTION = 0
self.petlevel.SetText(level)
def SetAges(self, ages):
self.petages.SetText(ages)
def SetDuration(self, dur, durt):
dur1 = int(dur)/60
durt1 = int(durt)/60
tmpage = int((int(durt)/60 -int(dur) /60)/24)
if int(dur) > 0:
self.petlifeg.SetPercentage(int(dur)*1.6, int(durt))
self.petlifeg.Show()
else:
self.petlifeg.Hide()
self.petdur.SetText(str(dur1)+"/"+str(durt1)+" Hours")
self.SetAges(str(tmpage)+"Days")
def SetHp(self, hp):
self.pethp.SetText(pointop(hp)+"%")
def SetDef(self, deff):
self.petdef.SetText(pointop(deff)+"%")
def SetSp(self, sp):
self.petsp.SetText(pointop(sp)+"%")
def SetSkill(self, slot, idx, lv):
if int(idx) != -1:
self.petskill0.ClearSlot(int(slot))
self.petskill0.SetPetSkillSlot(int(slot), int(idx), int(lv))
self.petskill0.SetCoverButton(int(slot))
self.petskill0.SetAlwaysRenderCoverButton(int(slot), TRUE)
self.arrytooltip[int(slot)][0] = int(idx)
self.arrytooltip[int(slot)][1] = int(lv)
#chat.AppendChat(chat.CHAT_TYPE_INFO, "Slot:"+str(slot)+" idx: "+str(idx)+" Lv:"+str(lv))
def SetExperience(self, expm, expi, exptot):
expm = int(expm)
expi = int(expi)
exptot = int(exptot)
if exptot > 0:
totalexp = exptot
totexpm = int( float(totalexp) / 100 * 90 )
totexpi = totalexp - totexpm
expi = min(expi, totexpi)
expmp = float(expm) / totexpm * 100
expip = float(expi) / totexpi * 100
else:
totalexp = 0
totexpm = 0
totexpi = 0
expmp = 0
expip = 0
curPoint = int(min(expm, totexpm))
curPoint = int(max(expm, 0))
maxPoint = int(max(totexpm, 0))
curPointi = int(min(expi, totexpi))
curPointi = int(max(expi, 0))
maxPointi = int(max(totexpi, 0))
quarterPoint = maxPoint / 4
quarterPointi = maxPointi
FullCount = 0
FullCounti = 0
if 0 != quarterPoint:
FullCount = min(4, curPoint / quarterPoint)
if 0 != quarterPointi:
FullCounti = min(1, curPointi / quarterPointi)
for i in xrange(4):
self.petexppages[i].Hide()
self.petexppages[4].Hide()
for i in xrange(FullCount):
self.petexppages[i].SetRenderingRect(0.0, 0.0, 0.0, 0.0)
self.petexppages[i].Show()
for i in xrange(FullCounti):
self.petexppages[4].SetRenderingRect(0.0, 0.0, 0.0, 0.0)
self.petexppages[4].Show()
if 0 != quarterPoint:
if FullCount < 4:
Percentage = float(curPoint % quarterPoint) / quarterPoint - 1.0
self.petexppages[FullCount].SetRenderingRect(0.0, Percentage, 0.0, 0.0)
self.petexppages[FullCount].Show()
if 0 != quarterPointi:
if FullCounti < 1:
Percentage = float(curPointi % quarterPointi) / quarterPointi - 1.0
self.petexppages[4].SetRenderingRect(0.0, Percentage, 0.0, 0.0)
self.petexppages[4].Show()
#chat.AppendChat(chat.CHAT_TYPE_INFO, str(curPoint)+"-"+str(maxPoint)+"-"+str(FullCount)+"--"+str(quarterPoint))
#####
self.tooltipexp[0].SetText("Experience : %d of %d" % (expm, totexpm))
self.tooltipexp[1].SetText("Experience : %.2f%%" % expmp)
self.tooltipexp[2].SetText("ExperienceI : %d of %d" % (expi, totexpi))
self.tooltipexp[3].SetText("ExperienceI : %.2f%%" % expip)
def UseSkill(self, slot):
#chat.AppendChat(chat.CHAT_TYPE_INFO, "+ --> "+str(slot))
#chat.AppendChat(chat.CHAT_TYPE_INFO, "Skill: "+ str(petskill.GetSkillbySlot(slot)))
net.SendChatPacket("/petskills "+str(slot))
def OnUpdate(self):
if constInfo.FEEDWIND == 0:
self.nutribtn.Enable()
#self.sviluppobtn.Enable()
self.itemexp.Enable()
if TRUE == self.expwind.IsIn():
for i in range(0,4):
self.tooltipexp[i].Show()
else:
for i in range(0,4):
self.tooltipexp[i].Hide()
class PetSystemMini(ui.ScriptWindow):
class TextToolTip(ui.Window):
def __init__(self, y):
ui.Window.__init__(self, "TOP_MOST")
textLine = ui.TextLine()
textLine.SetParent(self)
textLine.SetHorizontalAlignLeft()
textLine.SetOutline()
textLine.Show()
self.y = y
self.textLine = textLine
def __del__(self):
ui.Window.__del__(self)
def SetText(self, text):
self.textLine.SetText(text)
def OnRender(self):
(mouseX, mouseY) = wndMgr.GetMousePosition()
self.textLine.SetPosition(mouseX, mouseY - 60 + self.y)
def __init__(self, vnum = 0):
ui.ScriptWindow.__init__(self)
self.vnum = vnum
self.__LoadWindow()
def __del__(self):
ui.ScriptWindow.__del__(self)
def Show(self):
ui.ScriptWindow.Show(self)
def Close(self):
self.Hide()
def __LoadWindow(self):
try:
pyScrLoader = ui.PythonScriptLoader()
pyScrLoader.LoadScriptFile(self, "uiscript/PetMiniInformationWindow.py")
except:
import exception
exception.Abort("PetMiniInformationWindow.LoadWindow.LoadObject")
try:
self.expwind = self.GetChild("pet_mini_info_exp_gauge_board")
self.expwind1 = self.GetChild("pet_mini_info_exp_gauge_board1")
self.mainbg = self.GetChild("main_bg")
self.mainicon = self.GetChild("main_slot_img")
self.main_slot_img = self.GetChild("pet_icon_slot")
self.tooltipexp = []
for i in range(0,4):
self.tooltipexp.append(self.TextToolTip(15*i))
self.tooltipexp[i].Hide()
self.pet_icon_slot_ani_img = self.GetChild("pet_icon_slot_ani_img")
self.pet_mini_exp_01 = self.GetChild("pet_mini_EXPGauge_01")
self.pet_mini_exp_02 = self.GetChild("pet_mini_EXPGauge_02")
self.pet_mini_exp_03 = self.GetChild("pet_mini_EXPGauge_03")
self.pet_mini_exp_04 = self.GetChild("pet_mini_EXPGauge_04")
self.pet_mini_exp_05 = self.GetChild("pet_mini_EXPGauge_05")
self.petmini_exp = []
self.petmini_exp.append(self.pet_mini_exp_01)
self.petmini_exp.append(self.pet_mini_exp_02)
self.petmini_exp.append(self.pet_mini_exp_03)
self.petmini_exp.append(self.pet_mini_exp_04)
self.petmini_exp.append(self.pet_mini_exp_05)
self.petlifeg = self.GetChild("LifeGauge")
self.pet_icon_slot_ani_img.Hide()
self.skillslot = self.GetChild("mini_skill_slot0")
#self.skillslot.SetSlotScale(0, 2, 16, 16, petskill.GetEmptySkill(), 0.5, 0.5)
#self.skillslot.SetSlotScale(1, 2, 16, 16, petskill.GetEmptySkill(), 0.5, 0.5)
#self.skillslot.SetSlotScale(2, 2, 16, 16, petskill.GetEmptySkill(), 0.5, 0.5)
self.skillslot.SetSelectItemSlotEvent(ui.__mem_func__(self.UseSkill))
self.skillslot.SetUseSlotEvent(ui.__mem_func__(self.UseSkill))
self.main_slot_img.SetUseSlotEvent(ui.__mem_func__(self.OpenPet))
self.main_slot_img.SetSelectItemSlotEvent(ui.__mem_func__(self.OpenPet))
self.SetDefaultInfo()
#self.mainbg.Show()
except:
import exception
exception.Abort("PetMiniInformationWindow.LoadWindow.BindObject")
def SetDefaultInfo(self):
self.SetDuration("0", "0")
self.main_slot_img.ClearSlot(0)
self.skillslot.ClearSlot(0)
self.skillslot.ClearSlot(1)
self.skillslot.ClearSlot(2)
self.skillslot.SetSlotScale(0, 2, 16, 16, petskill.GetEmptySkill(), 0.5, 0.5)
self.skillslot.SetSlotScale(1, 2, 16, 16, petskill.GetEmptySkill(), 0.5, 0.5)
self.skillslot.SetSlotScale(2, 2, 16, 16, petskill.GetEmptySkill(), 0.5, 0.5)
self.SetExperience(0,0,0)
def OpenPet(self):
net.SendChatPacket("/gift")
def SetImageSlot(self, vnum):
self.main_slot_img.SetItemSlot(0, int(vnum), 0)
self.main_slot_img.SetAlwaysRenderCoverButton(0, TRUE)
def SetDuration(self, dur, durt):
tmpage = int((int(durt)/60 -int(dur) /60)/24)
if int(dur) > 0:
self.petlifeg.SetPercentage(int(dur), int(durt))
self.petlifeg.Show()
else:
self.petlifeg.Hide()
def SetSkill(self, slot, idx, lv):
if int(idx) != -1:
self.skillslot.ClearSlot(int(slot))
self.skillslot.SetPetSkillSlot(int(slot), int(idx), int(lv), 0.5, 0.5)
self.skillslot.SetCoverButton(int(slot), "d:/ymir work/ui/pet/mini_window/pet_slot_corvermini.sub", "d:/ymir work/ui/pet/mini_window/pet_slot_corvermini.sub", "d:/ymir work/ui/pet/mini_window/pet_slot_corvermini.sub" , "d:/ymir work/ui/pet/mini_window/pet_slot_corvermini.sub")
self.skillslot.SetAlwaysRenderCoverButton(int(slot), TRUE)
def SetExperience(self, expm, expi, exptot):
expm = int(expm)
expi = int(expi)
exptot = int(exptot)
if exptot > 0:
totalexp = exptot
totexpm = int( float(totalexp) / 100 * 90 )
totexpi = totalexp - totexpm
expi = min(expi, totexpi)
expmp = float(expm) / totexpm * 100
expip = float(expi) / totexpi * 100
else:
totalexp = 0
totexpm = 0
totexpi = 0
expmp = 0
expip = 0
curPoint = int(min(expm, totexpm))
curPoint = int(max(expm, 0))
maxPoint = int(max(totexpm, 0))
curPointi = int(min(expi, totexpi))
curPointi = int(max(expi, 0))
maxPointi = int(max(totexpi, 0))
quarterPoint = maxPoint / 4
quarterPointi = maxPointi
FullCount = 0
FullCounti = 0
if 0 != quarterPoint:
FullCount = min(4, curPoint / quarterPoint)
if 0 != quarterPointi:
FullCounti = min(1, curPointi / quarterPointi)
for i in xrange(4):
self.petmini_exp[i].Hide()
self.petmini_exp[4].Hide()
for i in xrange(FullCount):
self.petmini_exp[i].SetRenderingRect(0.0, 0.0, 0.0, 0.0)
self.petmini_exp[i].Show()
for i in xrange(FullCounti):
self.petmini_exp[4].SetRenderingRect(0.0, 0.0, 0.0, 0.0)
self.petmini_exp[4].Show()
if 0 != quarterPoint:
if FullCount < 4:
Percentage = float(curPoint % quarterPoint) / quarterPoint - 1.0
self.petmini_exp[FullCount].SetRenderingRect(0.0, Percentage, 0.0, 0.0)
self.petmini_exp[FullCount].Show()
if 0 != quarterPointi:
if FullCounti < 1:
Percentage = float(curPointi % quarterPointi) / quarterPointi - 1.0
self.petmini_exp[4].SetRenderingRect(0.0, Percentage, 0.0, 0.0)
self.petmini_exp[4].Show()
#####
self.tooltipexp[0].SetText("Experience : %d of %d" % (expm, totexpm))
self.tooltipexp[1].SetText("Experience : %.2f%%" % expmp)
self.tooltipexp[2].SetText("ExperienceI : %d of %d" % (expi, totexpi))
self.tooltipexp[3].SetText("ExperienceI : %.2f%%" % expip)
def UseSkill(self, slot):
chat.AppendChat(chat.CHAT_TYPE_INFO, "+ --> "+str(slot))
#chat.AppendChat(chat.CHAT_TYPE_INFO, "Skill: "+ str(petskill.GetSkillbySlot(slot)))
net.SendChatPacket("/petskills "+str(slot))
def OnUpdate(self):
if constInfo.PET_LEVEL == 40 and constInfo.PET_EVOLUTION == 0:
self.pet_icon_slot_ani_img.Show()
elif constInfo.PET_LEVEL == 81 and constInfo.PET_EVOLUTION == 1:
self.pet_icon_slot_ani_img.Show()
elif constInfo.PET_LEVEL == 81 and constInfo.PET_EVOLUTION == 2:
self.pet_icon_slot_ani_img.Show()
else:
self.pet_icon_slot_ani_img.Hide()
if TRUE == self.expwind1.IsIn():
for i in range(0,4):
self.tooltipexp[i].Show()
else:
for i in range(0,4):
self.tooltipexp[i].Hide()
| [
"[email protected]"
] | |
2112bbc0bb40eb05b9d150ae386c7817e5840775 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /unprocessing/estimator.py | def1f4464ffc7f3d2afab9ff75d80d3992b58c68 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 5,225 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unprocessing model function and train and eval specs for Estimator.
Unprocessing Images for Learned Raw Denoising
http://timothybrooks.com/tech/unprocessing
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from unprocessing import process
from tensorflow.contrib import layers as contrib_layers
def psnr(labels, predictions):
"""Computes average peak signal-to-noise ratio of `predictions`.
Here PSNR is defined with respect to the maximum value of 1. All image tensors
must be within the range [0, 1].
Args:
labels: Tensor of shape [B, H, W, N].
predictions: Tensor of shape [B, H, W, N].
Returns:
Tuple of (psnr, update_op) as returned by tf.metrics.
"""
predictions.shape.assert_is_compatible_with(labels.shape)
with tf.control_dependencies([tf.assert_greater_equal(labels, 0.0),
tf.assert_less_equal(labels, 1.0)]):
psnrs = tf.image.psnr(labels, predictions, max_val=1.0)
psnrs = tf.boolean_mask(psnrs, tf.logical_not(tf.is_inf(psnrs)))
return tf.metrics.mean(psnrs, name='psnr')
def create_model_fn(inference_fn, hparams):
"""Creates a model function for Estimator.
Args:
inference_fn: Model inference function with specification:
Args -
noisy_img - Tensor of shape [B, H, W, 4].
variance - Tensor of shape [B, H, W, 4].
Returns -
Tensor of shape [B, H, W, 4].
hparams: Hyperparameters for model as a tf.contrib.training.HParams object.
Returns:
`_model_fn`.
"""
def _model_fn(features, labels, mode, params):
"""Constructs the model function.
Args:
features: Dictionary of input features.
labels: Tensor of labels if mode is `TRAIN` or `EVAL`, otherwise `None`.
mode: ModeKey object (`TRAIN` or `EVAL`).
params: Parameter dictionary passed from the Estimator object.
Returns:
An EstimatorSpec object that encapsulates the model and its serving
configurations.
"""
del params # Unused.
def process_images(images):
"""Closure for processing images with fixed metadata."""
return process.process(images, features['red_gain'],
features['blue_gain'], features['cam2rgb'])
denoised_img = inference_fn(features['noisy_img'], features['variance'])
noisy_img = process_images(features['noisy_img'])
denoised_img = process_images(denoised_img)
truth_img = process_images(labels)
if mode in [tf_estimator.ModeKeys.TRAIN, tf_estimator.ModeKeys.EVAL]:
loss = tf.losses.absolute_difference(truth_img, denoised_img)
else:
loss = None
if mode == tf_estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate)
train_op = contrib_layers.optimize_loss(
loss=loss,
global_step=tf.train.get_global_step(),
learning_rate=None,
optimizer=optimizer,
name='') # Prevents scope prefix.
else:
train_op = None
if mode == tf_estimator.ModeKeys.EVAL:
eval_metric_ops = {'PSNR': psnr(truth_img, denoised_img)}
def summary(images, name):
"""As a hack, saves image summaries by adding to `eval_metric_ops`."""
images = tf.saturate_cast(images * 255 + 0.5, tf.uint8)
eval_metric_ops[name] = (tf.summary.image(name, images, max_outputs=2),
tf.no_op())
summary(noisy_img, 'Noisy')
summary(denoised_img, 'Denoised')
summary(truth_img, 'Truth')
diffs = (denoised_img - truth_img + 1.0) / 2.0
summary(diffs, 'Diffs')
else:
eval_metric_ops = None
return tf_estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
return _model_fn
def create_train_and_eval_specs(train_dataset_fn,
eval_dataset_fn,
eval_steps=250):
"""Creates a TrainSpec and EvalSpec.
Args:
train_dataset_fn: Function returning a Dataset of training data.
eval_dataset_fn: Function returning a Dataset of evaluation data.
eval_steps: Number of steps for evaluating model.
Returns:
Tuple of (TrainSpec, EvalSpec).
"""
train_spec = tf_estimator.TrainSpec(input_fn=train_dataset_fn, max_steps=None)
eval_spec = tf_estimator.EvalSpec(
input_fn=eval_dataset_fn, steps=eval_steps, name='')
return train_spec, eval_spec
| [
"[email protected]"
] | |
0fb5e7964be470bc671a7d6c2fc74cb80dd76bf7 | 07c6d3055eda7b1ddb16ce9444166ed311ce3219 | /modules/topics.py | 0821e4b960e83e661ea7519105c0d6cf7682fd6f | [] | no_license | IISH/dpe | 4df9b0576b5419e543c61ce9ef14380ddc4b5c03 | 6509b06aa03242f450766d4cb5d8984f14146b11 | refs/heads/master | 2021-01-10T17:52:54.775316 | 2016-05-04T09:50:46 | 2016-05-04T09:50:46 | 42,994,984 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,867 | py | import urllib2
import simplejson
import json
import sys
import pandas as pd
import random
import vincent
from vincent import Axis, AxisProperties, PropertySet, ValueRef
from pandas.io.json import json_normalize
from config import configuration, dataverse2indicators, load_dataverse, findpid, load_metadata
import re
def loadjson(apiurl):
jsondataurl = apiurl
req = urllib2.Request(jsondataurl)
opener = urllib2.build_opener()
f = opener.open(req)
dataframe = simplejson.load(f)
return dataframe
def topics_parser(alltopics):
topics = {}
indicators = {}
topic2inds = {}
indline = []
for item in alltopics:
#print item
name = item['Name']
thisid = int(item['ID'])
pcode = item['parent ID']
if not pcode:
topics[name] = thisid
else:
indicators[thisid] = name
try:
indline = topic2inds[pcode]
except:
indline = []
indline.append(thisid)
topic2inds[int(pcode)] = indline
return (topics, indicators, topic2inds)
def load_alltopics(api, branch):
result = loadjson(api)
(topics, indicators, topic2inds) = topics_parser(result)
datasets = dataverse2indicators(branch)
html = ''
for topic in sorted(topics):
topicID = topics[topic]
html = html + "<optgroup label=\"" + str(topic) + "\">\n"
indlist = topic2inds[topicID]
for ind in indlist:
indicator = indicators[ind]
try:
showind = datasets[indicator]
except:
showind = ind
html = html + "\t<option value=\"" + str(showind) + "\">" + indicator + "</option>" + "\n"
html = html + "</optgroup>\n"
return html
| [
"[email protected]"
] | |
15d9b29ddeef1b379e388b0cbb36ebe97afa4cdd | 30a2a924eb32e7297b5a99785950467f25ea785d | /tfgen.py | 074dd41e2fc6687d9c11c44a8d2d2c8c9a1784f5 | [] | no_license | zshwuhan/Reinforcement-Learning-of-Spatio-Temporal-Point-Processes | 1a794e83491b52dea5db3926de91779a9e661a17 | a3f98e77b56c03839dcdb545b17b3675e7c43878 | refs/heads/master | 2020-07-22T16:18:10.020860 | 2019-07-02T18:49:02 | 2019-07-02T18:49:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,218 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Imitation Learning for Point Process
A LSTM based model for generating marked spatial-temporal points.
References:
- https://arxiv.org/abs/1811.05016
Dependencies:
- Python 3.6.7
- tensorflow==1.5.0
"""
import sys
import arrow
import utils
import numpy as np
import tensorflow as tf
from stppg import GaussianMixtureDiffusionKernel, HawkesLam, SpatialTemporalPointProcess
class SpatialTemporalHawkes(object):
"""
Customized Spatial Temporal Hawkes
A Hawkes model parametrized by multi-layers neural networks, which provides flexible self-exciting
points pattern.
"""
def __init__(self, T, S, layers=[20, 20], n_comp=5, C=1., maximum=1e+3, verbose=False):
"""
"""
# constant hyper parameters
self.INIT_PARAM = .01
self.SIGMA_SHIFT = .05
self.SIGMA_SCALE = .2
self.MU_SCALE = .01
# configurations
self.C = C # constant
self.T = T # time space
self.S = S # location space
self.maximum = maximum # upper bound of conditional intensity
self.verbose = verbose
# model parameters
self.mu = tf.get_variable(name="mu", initializer=tf.constant(0.1), dtype=tf.float32)
self.beta = tf.get_variable(name="beta", initializer=tf.constant(1.), dtype=tf.float32)
self.Wss = []
self.bss = []
self.Wphis = []
# construct multi-layers neural networks
# - define the layers where 2 is for the input layer (x and y);
# And 5 is for the output layer (mu_x, mu_y, sigma_x, sigma_y, rho)
self.layers = [2] + layers + [5]
# - define the number of the components in Gaussian mixture diffusion kernel
self.n_comp = n_comp
# - construct component weighting vectors
for k in range(self.n_comp):
Wphi = tf.get_variable(name="Wphi%d" % k,
initializer=self.INIT_PARAM * tf.random.normal(shape=[2, 1]),
dtype=tf.float32)
self.Wphis.append(Wphi)
# - construct weight & bias matrix layer by layer for each of Gaussian components
Ws = []
bs = []
for i in range(len(self.layers)-1):
# random initialization
W = tf.get_variable(name="W%d%d" % (k, i),
initializer=self.INIT_PARAM * tf.random.normal(shape=[self.layers[i], self.layers[i+1]]),
dtype=tf.float32)
b = tf.get_variable(name="b%d%d" % (k, i),
initializer=self.INIT_PARAM * tf.random.normal(shape=[self.layers[i+1]]),
dtype=tf.float32)
Ws.append(W)
bs.append(b)
self.Wss.append(Ws)
self.bss.append(bs)
def sampling(self, sess, batch_size):
"""fetch model parameters, and generate samples accordingly."""
# get current model parameters
mu, beta = sess.run([self.mu, self.beta])
Wss = sess.run(self.Wss)
bss = sess.run(self.bss)
Wphis = sess.run(self.Wphis)
# construct kernel function and conditional intensity lambda
kernel = GaussianMixtureDiffusionKernel(
self.n_comp, layers=self.layers[1:-1], beta=beta, C=self.C,
SIGMA_SHIFT=self.SIGMA_SHIFT, SIGMA_SCALE=self.SIGMA_SCALE, MU_SCALE=self.MU_SCALE,
Wss=Wss, bss=bss, Wphis=Wphis)
lam = HawkesLam(mu, kernel, maximum=self.maximum)
# sampling points given model parameters
pp = SpatialTemporalPointProcess(lam)
seqs, sizes = pp.generate(T=self.T, S=self.S, batch_size=batch_size, verbose=self.verbose)
return seqs
def _nonlinear_mapping(self, k, s):
"""nonlinear mapping from location space to parameters space"""
# construct multi-layers neural networks
output = s # [n_his, 2]
for i in range(len(self.layers)-1):
output = tf.nn.sigmoid(tf.nn.xw_plus_b(output, self.Wss[k][i], self.bss[k][i])) # [n_his, n_b]
# project to parameters space
mu_x = (output[:, 0] - 0.5) * 2 * self.MU_SCALE # [n_his]: mu_x spans (-MU_SCALE, MU_SCALE)
mu_y = (output[:, 1] - 0.5) * 2 * self.MU_SCALE # [n_his]: mu_y spans (-MU_SCALE, MU_SCALE)
sigma_x = output[:, 2] * self.SIGMA_SCALE + self.SIGMA_SHIFT # [n_his]: sigma_x spans (SIGMA_SHIFT, SIGMA_SHIFT + SIGMA_SCALE)
sigma_y = output[:, 3] * self.SIGMA_SCALE + self.SIGMA_SHIFT # [n_his]: sigma_y spans (SIGMA_SHIFT, SIGMA_SHIFT + SIGMA_SCALE)
rho = output[:, 4] * 1.5 - .75 # [n_his]: rho spans (-.75, .75)
return mu_x, mu_y, sigma_x, sigma_y, rho
def _gaussian_kernel(self, k, t, s, his_t, his_s):
"""
A Gaussian diffusion kernel function based on the standard kernel function proposed
by Musmeci and Vere-Jones (1992). The angle and shape of diffusion ellipse is able
to vary according to the location.
k indicates the k-th gaussian component that is used to compute the nonlinear mappings.
"""
eps = 1e-8 # IMPORTANT: Avoid delta_t be zero
delta_t = t - his_t + eps # [n_his]
delta_s = s - his_s # [n_his, 2]
delta_x = delta_s[:, 0] # [n_his]
delta_y = delta_s[:, 1] # [n_his]
mu_x, mu_y, sigma_x, sigma_y, rho = self._nonlinear_mapping(k, his_s)
return tf.exp(- self.beta * delta_t) * \
(self.C / (2 * np.pi * sigma_x * sigma_y * delta_t * tf.sqrt(1 - tf.square(rho)))) * \
tf.exp((- 1. / (2 * delta_t * (1 - tf.square(rho)))) * \
((tf.square(delta_x - mu_x) / tf.square(sigma_x)) + \
(tf.square(delta_y - mu_y) / tf.square(sigma_y)) - \
(2 * rho * (delta_x - mu_x) * (delta_y - mu_y) / (sigma_x * sigma_y))))
def _softmax(self, s, k):
"""
Gaussian mixture components are weighted by phi^k, which are computed by a softmax function, i.e.,
phi^k(x, y) = e^{[x y]^T w^k} / \sum_{i=1}^K e^{[x y]^T w^i}
"""
# s: [n_his, 2]
# Wphis[k]: [2, 1]
numerator = tf.exp(tf.matmul(s, self.Wphis[k])) # [n_his, 1]
denominator = tf.concat([
tf.exp(tf.matmul(s, self.Wphis[i]))
for i in range(self.n_comp) ], axis=1) # [n_his, K=n_comp]
phis = tf.squeeze(numerator) / tf.reduce_sum(denominator, axis=1) # [n_his]
return phis
def _gaussian_mixture_kernel(self, t, s, his_t, his_s):
"""
A Gaussian mixture diffusion kernel function is superposed by multiple Gaussian diffusion
kernel function. The number of the Gaussian components is specified by n_comp.
"""
nus = []
for k in range(self.n_comp):
phi = self._softmax(his_s, k) # [n_his]
nu = phi * self._gaussian_kernel(k, t, s, his_t, his_s) # [n_his]
nu = tf.expand_dims(nu, -1) # [n_his, 1]
nus.append(nu) # K * [n_his, 1]
nus = tf.concat(nus, axis=1) # [n_his, K]
return tf.reduce_sum(nus, axis=1) # [n_his]
def _lambda(self, t, s, his_t, his_s):
"""lambda function for the Hawkes process."""
lam = self.mu + tf.reduce_sum(self._gaussian_mixture_kernel(t, s, his_t, his_s))
return lam
def log_conditional_pdf(self, points, keep_latest_k=None):
"""log pdf conditional on history."""
if keep_latest_k is not None:
points = points[-keep_latest_k:, :]
# number of the points
len_points = tf.shape(points)[0]
# variables for calculating triggering probability
s, t = points[-1, 1:], points[-1, 0]
his_s, his_t = points[:-1, 1:], points[:-1, 0]
def pdf_no_history():
return tf.log(tf.clip_by_value(self._lambda(t, s, his_t, his_s), 1e-8, 1e+10))
def pdf_with_history():
# triggering probability
log_trig_prob = tf.log(tf.clip_by_value(self._lambda(t, s, his_t, his_s), 1e-8, 1e+10))
# variables for calculating tail probability
tn, ti = points[-2, 0], points[:-1, 0]
t_ti, tn_ti = t - ti, tn - ti
# tail probability
# TODO: change to gaussian mixture (add phi)
log_tail_prob = - \
self.mu * (t - tn) * utils.lebesgue_measure(self.S) - \
tf.reduce_sum(tf.scan(
lambda a, i: self.C * (tf.exp(- self.beta * tn_ti[i]) - tf.exp(- self.beta * t_ti[i])) / \
tf.clip_by_value(self.beta, 1e-8, 1e+10),
tf.range(tf.shape(t_ti)[0]),
initializer=np.array(0., dtype=np.float32)))
return log_trig_prob + log_tail_prob
# TODO: Unsolved issue:
# pdf_with_history will still be called even if the condition is true, which leads to exception
# "ValueError: slice index -1 of dimension 0 out of bounds." due to that points is empty but we
# try to index a nonexisted element.
# However, when points is indexed in a scan loop, this works fine and the numerical result is
# also correct. which is very confused to me. Therefore, I leave this problem here temporarily.
log_cond_pdf = tf.cond(tf.less(len_points, 2),
pdf_no_history, # if there is only one point in the sequence
pdf_with_history) # if there is more than one point in the sequence
return log_cond_pdf
def log_likelihood(self, points):
"""log likelihood of given points"""
loglikli = 0. # loglikelihood initialization
mask_t = tf.cast(points[:, 0] > 0, tf.float32) # time mask
trunc_seq = tf.boolean_mask(points, mask_t) # truncate the sequence and get the valid part
seq_len = tf.shape(trunc_seq)[0] # length of the sequence
# term 1: product of lambda
loglikli += tf.reduce_sum(tf.scan(
lambda a, i: tf.log(self._lambda(trunc_seq[i, 0], trunc_seq[i, 1:], trunc_seq[:i, 0], trunc_seq[:i, 1:])),
tf.range(seq_len),
initializer=np.array(0., dtype=np.float32)))
# term 2: 1 - F^*(T)
ti = points[:, 0]
zero_ti = 0 - ti
T_ti = self.T[1] - ti
loglikli -= tf.reduce_sum(tf.scan(
lambda a, i: self.C * (tf.exp(- self.beta * zero_ti[i]) - tf.exp(- self.beta * T_ti[i])) / \
tf.clip_by_value(self.beta, 1e-8, 1e+10),
tf.range(tf.shape(ti)[0]),
initializer=np.array(0., dtype=np.float32)))
return loglikli
def save_params_npy(self, sess, path):
"""save parameters into numpy file."""
Wss = sess.run(self.Wss)
bss = sess.run(self.bss)
Wphis = sess.run(self.Wphis)
mu, beta = sess.run([self.mu, self.beta])
print(Wss)
print(Wphis)
np.savez(path, Wss=Wss, bss=bss, Wphis=Wphis, mu=mu, beta=beta)
if __name__ == "__main__":
# Unittest example
np.random.seed(1)
tf.set_random_seed(1)
with tf.Session() as sess:
hawkes = SpatialTemporalHawkes(
T=[0., 10.], S=[[-1., 1.], [-1., 1.]],
layers=[5], n_comp=3, C=1., maximum=1e+3, verbose=True)
points = tf.constant([
[ 1.16898147e-02, 1.45831794e-01, -3.05314839e-01],
[ 4.81481478e-02, -1.25229925e-01, 8.72766301e-02],
[ 1.13194443e-01, -3.87020826e-01, 2.80696362e-01],
[ 1.60300925e-01, -2.42807735e-02, -5.64230382e-01],
[ 1.64004624e-01, 7.10764453e-02, -1.77927762e-01],
[ 1.64236113e-01, 6.51166216e-02, -6.82414293e-01],
[ 2.05671296e-01, -4.48017061e-01, 5.36620915e-01],
[ 2.12152779e-01, -3.20064761e-02, -2.08911732e-01]], dtype=tf.float32)
init_op = tf.global_variables_initializer()
sess.run(init_op)
# t = points[-1, 0]
# s = points[-1, 1:]
# his_t = points[:-1, 0]
# his_s = points[:-1, 1:]
# res = sess.run(hawkes.log_conditional_pdf(points))
# res = sess.run(hawkes._lambda(t, s, his_t, his_s))
# res = sess.run(hawkes._softmax(his_s, 0))
# res = sess.run(hawkes._gaussian_kernel(0, t, s, his_t, his_s))
# seq_len = tf.shape(points)[0]
# r = tf.scan(
# lambda a, i: hawkes._lambda(points[i, 0], points[i, 1:], points[:i, 0], points[:i, 1:]),
# tf.range(seq_len), # from the first point to the last point
# initializer=np.array(0., dtype=np.float32))
r = hawkes.log_likelihood(points)
print(sess.run(r))
# # test sampling
# seqs = hawkes.sampling(sess, batch_size=10)
# print(seqs) | [
"[email protected]"
] | |
a2e37e0d4b119607f5714d60955c059bfeb459ae | 96c1f13473cf224113185902edd4c9c01091e106 | /theseus/optimizer/nonlinear/dcem.py | e8187726c0cc032faab0f714e60c34a80840bb1b | [
"MIT"
] | permissive | facebookresearch/theseus | f1e488eb5a25f5ba74a6995911bee958b5da4cf3 | 240e1206329d42fedd40399684d6e17e455c6645 | refs/heads/main | 2023-08-11T07:33:12.328520 | 2023-08-02T12:58:01 | 2023-08-02T12:58:01 | 429,570,359 | 1,410 | 105 | MIT | 2023-08-01T14:30:01 | 2021-11-18T20:28:27 | Python | UTF-8 | Python | false | false | 8,421 | py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional, Union
import numpy as np
import torch
from torch.distributions import Normal
from theseus.core.objective import Objective
from theseus.optimizer import OptimizerInfo
from theseus.optimizer.variable_ordering import VariableOrdering
from theseus.third_party.lml import LML
from .nonlinear_optimizer import (
BackwardMode,
EndIterCallbackType,
NonlinearOptimizer,
NonlinearOptimizerInfo,
NonlinearOptimizerStatus,
)
class DCEM(NonlinearOptimizer):
"""
DCEM optimizer for nonlinear optimization using sampling based techniques.
The optimizer can be really sensitive to hypermeter tuning. Here are few tuning
hints:
1. If have to lower the max_iterations, then increase the n_sample.
2. The higher the n_sample, the slowly with variance of samples will decrease.
3. The higher the n_sample, more the chances of optimum being in the elite set.
4. The higher the n_elite, the slower is convergence, but more accurate it might
be, but would need more iterations. n_elite= 5 is good enough for most cases.
"""
def __init__(
self,
objective: Objective,
vectorize: bool = False,
max_iterations: int = 50,
n_sample: int = 100,
n_elite: int = 5,
temp: float = 1.0,
init_sigma: Union[float, torch.Tensor] = 1.0,
lb: float = None,
ub: float = None,
lml_verbose: bool = False,
lml_eps: float = 1e-3,
normalize: bool = True,
abs_err_tolerance: float = 1e-6,
rel_err_tolerance: float = 1e-4,
**kwargs,
) -> None:
super().__init__(
objective,
vectorize=vectorize,
abs_err_tolerance=abs_err_tolerance,
rel_err_tolerance=rel_err_tolerance,
max_iterations=max_iterations,
**kwargs,
)
self.objective = objective
self.ordering = VariableOrdering(objective)
self.n_samples = n_sample
self.n_elite = n_elite
self.lb = lb
self.ub = ub
self.temp = temp
self.normalize = normalize
self._tot_dof = sum([x.dof() for x in self.ordering])
self.lml_eps = lml_eps
self.lml_verbose = lml_verbose
self.init_sigma = init_sigma
def _mu_vec_to_dict(self, mu: torch.Tensor) -> Dict[str, torch.Tensor]:
idx = 0
mu_dic = {}
for var in self.ordering:
mu_dic[var.name] = mu[:, slice(idx, idx + var.dof())]
idx += var.dof()
return mu_dic
def reset_sigma(self, init_sigma: Union[float, torch.Tensor]) -> None:
self.sigma = (
torch.ones(
(self.objective.batch_size, self._tot_dof), device=self.objective.device
)
* init_sigma
)
def _CEM_step(self):
"""
Performs one iteration of CEM.
Updates the self.sigma and return the new mu.
"""
device = self.objective.device
n_batch = self.ordering[0].shape[0]
mu = torch.cat([var.tensor for var in self.ordering], dim=-1)
X = Normal(mu, self.sigma).rsample((self.n_samples,))
X_samples: List[Dict[str, torch.Tensor]] = []
for sample in X:
X_samples.append(self._mu_vec_to_dict(sample))
fX = torch.stack(
[self.objective.error_metric(X_samples[i]) for i in range(self.n_samples)],
dim=1,
)
assert fX.shape == (n_batch, self.n_samples)
if self.temp is not None and self.temp < np.infty:
if self.normalize:
fX_mu = fX.mean(dim=1).unsqueeze(1)
fX_sigma = fX.std(dim=1).unsqueeze(1)
_fX = (fX - fX_mu) / (fX_sigma + 1e-6)
else:
_fX = fX
if self.n_elite == 1:
# indexes = LML(N=n_elite, verbose=lml_verbose, eps=lml_eps)(-_fX*temp)
indexes = torch.softmax(-_fX * self.temp, dim=1)
else:
indexes = LML(
N=self.n_elite, verbose=self.lml_verbose, eps=self.lml_eps
)(-_fX * self.temp)
indexes = indexes.unsqueeze(2)
eps = 0
else:
indexes_vals = fX.argsort(dim=1)[:, : self.n_elite]
# Scatter 1.0 to the indexes using indexes_vals
indexes = torch.zeros(n_batch, self.n_samples, device=device).scatter_(
1, indexes_vals, 1.0
)
indexes = indexes.unsqueeze(2)
eps = 1e-10
# indexes.shape should be (n_batch, n_sample, 1)
X = X.transpose(0, 1)
assert indexes.shape[:2] == X.shape[:2]
X_I = indexes * X
mu = torch.sum(X_I, dim=1) / self.n_elite
self.sigma = (
(indexes * (X - mu.unsqueeze(1)) ** 2).sum(dim=1) / self.n_elite
).sqrt() + eps # adding eps to avoid sigma=0, which is happening when temp=None
assert self.sigma.shape == (n_batch, self._tot_dof)
return self._mu_vec_to_dict(mu)
def _optimize_loop(
self,
num_iter: int,
info: NonlinearOptimizerInfo,
verbose: bool,
end_iter_callback: Optional[EndIterCallbackType] = None,
**kwargs,
) -> int:
converged_indices = torch.zeros_like(info.last_err).bool()
iters_done = 0
for it_ in range(num_iter):
iters_done += 1
try:
mu = self._CEM_step()
except RuntimeError as error:
raise RuntimeError(f"There is an error in update {error}.")
self.objective.update(mu)
# check for convergence
with torch.no_grad():
err = self.objective.error_metric()
self._update_info(info, it_, err, converged_indices)
if verbose:
print(
f"Nonlinear optimizer. Iteration: {it_+1}. "
f"Error: {err.mean().item()} "
)
converged_indices = self._check_convergence(err, info.last_err)
info.status[
np.array(converged_indices.cpu().numpy())
] = NonlinearOptimizerStatus.CONVERGED
if converged_indices.all():
break # nothing else will happen at this point
info.last_err = err
if end_iter_callback is not None:
end_iter_callback(self, info, mu, it_)
info.status[
info.status == NonlinearOptimizerStatus.START
] = NonlinearOptimizerStatus.MAX_ITERATIONS
return iters_done
def _optimize_impl(
self,
track_best_solution: bool = False,
track_err_history: bool = False,
track_state_history: bool = False,
verbose: bool = False,
backward_mode: Union[str, BackwardMode] = BackwardMode.UNROLL,
end_iter_callback: Optional[EndIterCallbackType] = None,
**kwargs,
) -> OptimizerInfo:
backward_mode = BackwardMode.resolve(backward_mode)
init_sigma = kwargs.get("init_sigma", self.init_sigma)
self.reset_sigma(init_sigma)
with torch.no_grad():
info = self._init_info(
track_best_solution, track_err_history, track_state_history
)
if verbose:
print(
f"DCEM optimizer. Iteration: 0. "
f"Error: {info.last_err.mean().item()}"
)
if backward_mode in [BackwardMode.UNROLL, BackwardMode.DLM]:
self._optimize_loop(
num_iter=self.params.max_iterations,
info=info,
verbose=verbose,
end_iter_callback=end_iter_callback,
**kwargs,
)
# If didn't coverge, remove misleading converged_iter value
info.converged_iter[
info.status == NonlinearOptimizerStatus.MAX_ITERATIONS
] = -1
return info
else:
raise NotImplementedError(
"DCEM currently only supports 'unroll' backward mode."
)
| [
"[email protected]"
] | |
cce4258214c9c76a0aa0de00685e225913846b9b | a7dc8f76293a2c60478c95c4720cf39b8556c9e8 | /tests/test_classify.py | 3dc694dcb8ce4841090ee4d127deb0f3d62de74f | [
"MIT"
] | permissive | FarDON/cherry | 8b67f6587a5c13603dfe5047edece218a382e904 | 28da9a05a0bf09f209829e81b8642e3fd76034e8 | refs/heads/master | 2022-11-02T13:13:12.366289 | 2020-06-22T13:56:45 | 2020-06-22T13:56:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,587 | py | import os
import unittest
import cherry
from unittest import mock
from cherry import classify
from sklearn.exceptions import NotFittedError
class ClassifyTest(unittest.TestCase):
def setUp(self):
pass
# __init__()
@mock.patch('cherry.classifyer.Classify._classify')
@mock.patch('cherry.classifyer.Classify._load_cache')
def test_init(self, mock_load, mock_classify):
mock_load.return_value = ('foo', 'bar')
cherry.classifyer.Classify(model='random', text=['random text'])
mock_load.assert_called_once_with('random')
mock_classify.assert_called_once_with(['random text'])
# _load_cache()
@mock.patch('cherry.classifyer.Classify._classify')
@mock.patch('cherry.classifyer.load_cache')
def test_load_cache(self, mock_load, mock_classify):
res = cherry.classifyer.Classify(model='foo', text=['random text'])
mock_load.assert_not_called()
@mock.patch('sklearn.feature_extraction.text.CountVectorizer.transform')
@mock.patch('cherry.classifyer.load_cache')
def test_classify_with_missing_token(self, mock_load, mock_trans):
mock_object = mock.Mock()
mock_object.transform.side_effect = NotFittedError()
mock_load.return_value = mock_object
# with self.assertRaises(cherry.exceptions.TokenNotFoundError) as token_error:
# res = cherry.classifyer.Classify(model='harmful', text=['random text'])
# self.assertEqual(
# str(token_error.exception),
# 'Some of the tokens in text never appear in training data')
| [
"[email protected]"
] | |
0a27993a6e8351ecb41b9f6181bea19c78bf6000 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/lobby/store/StoreTableDataProvider.py | 047518eda41afe48b100901c3b0b9c35381c591b | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 969 | py | # 2017.02.03 21:50:30 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/store/StoreTableDataProvider.py
from gui.Scaleform.framework.entities.DAAPIDataProvider import DAAPIDataProvider
class StoreTableDataProvider(DAAPIDataProvider):
def __init__(self):
super(StoreTableDataProvider, self).__init__()
self.__list = []
@property
def collection(self):
return self.__list
def buildList(self, dpList):
self.__list = dpList
def emptyItem(self):
return None
def clearList(self):
while len(self.__list):
self.__list.pop()
self.__list = None
return
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\lobby\store\StoreTableDataProvider.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:50:30 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
52bf11f8be269922508207b1a1e3c7cdd7224b51 | ab6cfc2aedad3de7a04efae4a6105dc893958b9e | /hivwholeseq/patients/get_allele_cocounts.py | 733f7a025ee4e21175e08d194a24584b733f1f04 | [
"MIT"
] | permissive | neherlab/hivwholeseq | 158c0ce590bc67d1d36042c71b8b0afa3e8d8abf | 978ce4060362e4973f92b122ed5340a5314d7844 | refs/heads/master | 2021-01-15T16:48:15.769316 | 2015-09-04T08:33:52 | 2015-09-04T08:33:52 | 49,801,765 | 4 | 3 | null | 2016-01-17T03:43:46 | 2016-01-17T03:43:44 | null | UTF-8 | Python | false | false | 1,994 | py | #!/usr/bin/env python
# vim: fdm=marker
'''
author: Fabio Zanini
date: 20/03/14
content: Get the joint counts at two sites for patient samples, after mapping.
'''
# Modules
import argparse
import numpy as np
import matplotlib.pyplot as plt
from hivwholeseq.patients.samples import load_samples_sequenced as lssp
from hivwholeseq.patients.samples import SamplePat
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(description='Get allele cocounts',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
pats_or_samples = parser.add_mutually_exclusive_group(required=True)
pats_or_samples.add_argument('--patients', nargs='+',
help='Patient to analyze')
pats_or_samples.add_argument('--samples', nargs='+',
help='Samples to map')
parser.add_argument('--regions', nargs='+', required=True,
help='Fragments to analyze (e.g. F1 F6)')
parser.add_argument('--verbose', type=int, default=0,
help='Verbosity level [0-3]')
parser.add_argument('--qualmin', type=int, default=30,
help='Minimal quality of base to call')
args = parser.parse_args()
pnames = args.patients
samplenames = args.samples
regions = args.regions
VERBOSE = args.verbose
qual_min = args.qualmin
use_plot = args.plot
samples = lssp()
if pnames is not None:
samples = samples.loc[samples.patient.isin(pnames)]
elif samplenames is not None:
samples = samples.loc[samples.index.isin(samplenames)]
if VERBOSE >= 2:
print 'samples', samples.index.tolist()
for region in regions:
for samplename, sample in samples.iterrows():
sample = SamplePat(sample)
if VERBOSE >= 1:
print region, samplename
cocount = np.load(fn_out)['cocounts']
| [
"[email protected]"
] | |
ec461e4efcf3da5428bd688d03a049eaf481b553 | 60b8c5e048be54f49c28b2c224e86cf4d4629164 | /gluon/setup.py | ec8a8656318e076b7715cb3373652d0ac7778656 | [
"MIT"
] | permissive | kcieslik/imgclsmob | b333d2b0f8a04d15cc1c0b0d38845d1d2923ae26 | d15bc7d4ebc50a31b4ad01cb3ad0e73b8cddbc9a | refs/heads/master | 2020-06-13T06:21:01.744329 | 2019-06-28T16:05:11 | 2019-06-28T16:05:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gluoncv2',
version='0.0.47',
description='Image classification and segmentation models for Gluon',
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/osmr/imgclsmob',
author='Oleg Sémery',
author_email='[email protected]',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Image Recognition',
],
keywords='machine-learning deep-learning neuralnetwork image-classification mxnet gluon imagenet cifar svhn vgg '
'resnet pyramidnet diracnet densenet condensenet wrn drn dpn darknet fishnet espnetv2 xdensnet squeezenet '
'squeezenext shufflenet menet mobilenet igcv3 mnasnet darts xception inception polynet nasnet pnasnet ror '
'proxylessnas dianet efficientnet image-segmentation voc ade20k cityscapes coco pspnet deeplabv3 fcn',
packages=find_packages(exclude=['others', '*.others', 'others.*', '*.others.*']),
include_package_data=True,
install_requires=['numpy'],
)
| [
"[email protected]"
] | |
f39bd365db767a8011a2eb4aa13b47ed5c0ac42e | 923d035a4762a19b30d5900db91143a83837ae70 | /ichnaea/data/station.py | 8546d75f06412bf83af0c62c790ab8f2638f4774 | [
"Apache-2.0"
] | permissive | voolitels/ichnaea | d5d5da34cb30b3e0c85675e32dab3972cc31d7b0 | bd0350fcba9efb0bad3957309ed3a471ae07e41b | refs/heads/master | 2021-01-17T14:21:16.056481 | 2015-11-10T16:38:22 | 2015-11-10T16:57:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,009 | py | from collections import defaultdict
import numpy
from ichnaea.constants import (
PERMANENT_BLOCKLIST_THRESHOLD,
TEMPORARY_BLOCKLIST_DURATION,
)
from ichnaea.data.base import DataTask
from ichnaea.geocalc import (
centroid,
circle_radius,
distance,
)
from ichnaea.geocode import GEOCODER
from ichnaea.models import (
encode_cellarea,
Cell,
CellBlocklist,
StatCounter,
StatKey,
WifiShard,
)
from ichnaea.models.constants import (
CELL_MAX_RADIUS,
WIFI_MAX_RADIUS,
)
from ichnaea import util
class CellRemover(DataTask):
def __init__(self, task, session, pipe):
super(CellRemover, self).__init__(task, session)
self.pipe = pipe
self.area_queue = self.task.app.data_queues['update_cellarea']
def __call__(self, cell_keys):
cells_removed = 0
changed_areas = set()
for key in cell_keys:
query = Cell.querykey(self.session, key)
cells_removed += query.delete()
changed_areas.add(encode_cellarea(
key.radio, key.mcc, key.mnc, key.lac))
if changed_areas:
self.area_queue.enqueue(list(changed_areas),
pipe=self.pipe, json=False)
return cells_removed
class StationUpdater(DataTask):
MAX_OLD_OBSERVATIONS = 1000
max_dist_meters = None
station_type = None
def __init__(self, task, session, pipe):
super(StationUpdater, self).__init__(task, session)
self.pipe = pipe
self.updated_areas = set()
self.utcnow = util.utcnow()
self.today = self.utcnow.date()
def stat_count(self, action, count, reason=None):
if count > 0:
tags = ['type:%s' % self.station_type]
if reason:
tags.append('reason:%s' % reason)
self.stats_client.incr(
'data.observation.%s' % action,
count,
tags=tags)
def __call__(self, batch=10):
raise NotImplementedError()
class CellUpdater(StationUpdater):
max_dist_meters = CELL_MAX_RADIUS
station_type = 'cell'
def __init__(self, task, session, pipe, remove_task=None):
super(CellUpdater, self).__init__(task, session, pipe)
self.remove_task = remove_task
self.data_queue = self.task.app.data_queues['update_cell']
def emit_statcounters(self, obs, stations):
day = self.today
StatCounter(StatKey.cell, day).incr(self.pipe, obs)
StatCounter(StatKey.unique_cell, day).incr(self.pipe, stations)
def emit_stats(self, added, dropped):
self.stat_count('insert', added)
for reason, count in dropped.items():
self.stat_count('drop', dropped[reason], reason=reason)
def add_area_update(self, key):
self.updated_areas.add(encode_cellarea(
key.radio, key.mcc, key.mnc, key.lac))
def queue_area_updates(self):
data_queue = self.task.app.data_queues['update_cellarea']
data_queue.enqueue(list(self.updated_areas),
pipe=self.pipe, json=False)
def blocklisted_station(self, block):
age = self.utcnow - block.time
temporary = age < TEMPORARY_BLOCKLIST_DURATION
permanent = block.count >= PERMANENT_BLOCKLIST_THRESHOLD
if temporary or permanent:
return (True, block.time, block)
return (False, block.time, block)
def blocklisted_stations(self, station_keys):
blocklist = {}
for block in CellBlocklist.iterkeys(
self.session, list(station_keys)):
blocklist[block.hashkey()] = self.blocklisted_station(block)
return blocklist
def blocklist_stations(self, moving):
moving_keys = []
new_block_values = []
for station_key, block in moving:
moving_keys.append(station_key)
if block:
block.time = self.utcnow
block.count += 1
else:
block_key = CellBlocklist.to_hashkey(station_key)
new_block_values.append(dict(
time=self.utcnow,
count=1,
**block_key.__dict__
))
if new_block_values:
# do a batch insert of new blocks
stmt = CellBlocklist.__table__.insert(
mysql_on_duplicate='time = time' # no-op
)
# but limit the batch depending on each model
ins_batch = CellBlocklist._insert_batch
for i in range(0, len(new_block_values), ins_batch):
batch_values = new_block_values[i:i + ins_batch]
self.session.execute(stmt.values(batch_values))
if moving_keys:
self.stats_client.incr(
'data.station.blocklist',
len(moving_keys),
tags=['type:%s' % self.station_type,
'action:add',
'reason:moving'])
self.remove_task.delay(moving_keys)
def new_station_values(self, station, station_key,
first_blocked, observations):
# This function returns a 3-tuple, the first element is True,
# if the station was found to be moving.
# The second element is either None or a dict of values,
# if the station is new and should result in a table insert
# The third element is either None or a dict of values
# if the station did exist and should be updated
obs_length = len(observations)
obs_positions = numpy.array(
[(obs.lat, obs.lon) for obs in observations],
dtype=numpy.double)
obs_lat, obs_lon = centroid(obs_positions)
values = {
'modified': self.utcnow,
}
values.update(station_key.__dict__)
if self.station_type == 'cell':
# pass on extra psc column which is not actually part
# of the stations hash key
values['psc'] = observations[-1].psc
created = self.utcnow
if station is None:
if first_blocked:
# if the station did previously exist, retain at least the
# time it was first put on a blocklist as the creation date
created = first_blocked
values.update({
'created': created,
'radius': 0,
'samples': 0,
})
if (station is not None and
station.lat is not None and station.lon is not None):
obs_positions = numpy.append(obs_positions, [
(station.lat, station.lon),
(numpy.nan if station.max_lat is None else station.max_lat,
numpy.nan if station.max_lon is None else station.max_lon),
(numpy.nan if station.min_lat is None else station.min_lat,
numpy.nan if station.min_lon is None else station.min_lon),
], axis=0)
existing_station = True
else:
values['lat'] = obs_lat
values['lon'] = obs_lon
existing_station = False
max_lat, max_lon = numpy.nanmax(obs_positions, axis=0)
min_lat, min_lon = numpy.nanmin(obs_positions, axis=0)
# calculate sphere-distance from opposite corners of
# bounding box containing current location estimate
# and new observations; if too big, station is moving
box_dist = distance(min_lat, min_lon, max_lat, max_lon)
# TODO: If we get a too large box_dist, we should not create
# a new station record with the impossibly big distance,
# so moving the box_dist > self.max_dist_meters here
if existing_station:
if box_dist > self.max_dist_meters:
# Signal a moving station and return early without updating
# the station since it will be deleted by caller momentarily
return (True, None, None)
# limit the maximum weight of the old station estimate
old_weight = min(station.samples,
self.MAX_OLD_OBSERVATIONS)
new_weight = old_weight + obs_length
values['lat'] = ((station.lat * old_weight) +
(obs_lat * obs_length)) / new_weight
values['lon'] = ((station.lon * old_weight) +
(obs_lon * obs_length)) / new_weight
# increase total counter
if station is not None:
values['samples'] = station.samples + obs_length
else:
values['samples'] = obs_length
# update max/min lat/lon columns
values['min_lat'] = float(min_lat)
values['min_lon'] = float(min_lon)
values['max_lat'] = float(max_lat)
values['max_lon'] = float(max_lon)
# give radius estimate between extreme values and centroid
values['radius'] = circle_radius(
values['lat'], values['lon'],
max_lat, max_lon, min_lat, min_lon)
if station is None:
# return new values
return (False, values, None)
else:
# return updated values, remove station from session
self.session.expunge(station)
return (False, None, values)
def __call__(self, batch=10):
all_observations = self.data_queue.dequeue(batch=batch)
drop_counter = defaultdict(int)
added = 0
new_stations = 0
station_obs = defaultdict(list)
for obs in all_observations:
station_obs[Cell.to_hashkey(obs)].append(obs)
if not station_obs:
return (0, 0)
stations = {}
for station in Cell.iterkeys(self.session, list(station_obs.keys())):
stations[station.hashkey()] = station
blocklist = self.blocklisted_stations(station_obs.keys())
new_station_values = []
changed_station_values = []
moving_stations = set()
for station_key, observations in station_obs.items():
blocked, first_blocked, block = blocklist.get(
station_key, (False, None, None))
if not any(observations):
continue
if blocked:
# Drop observations for blocklisted stations.
drop_counter['blocklisted'] += len(observations)
continue
station = stations.get(station_key, None)
if station is None and not first_blocked:
# We discovered an actual new never before seen station.
new_stations += 1
moving, new_values, changed_values = self.new_station_values(
station, station_key, first_blocked, observations)
if moving:
moving_stations.add((station_key, block))
else:
added += len(observations)
if new_values:
new_station_values.append(new_values)
if changed_values:
changed_station_values.append(changed_values)
# track potential updates to dependent areas
self.add_area_update(station_key)
if new_station_values:
# do a batch insert of new stations
stmt = Cell.__table__.insert(
mysql_on_duplicate='psc = psc' # no-op
)
# but limit the batch depending on each model
ins_batch = Cell._insert_batch
for i in range(0, len(new_station_values), ins_batch):
batch_values = new_station_values[i:i + ins_batch]
self.session.execute(stmt.values(batch_values))
if changed_station_values:
# do a batch update of changed stations
ins_batch = Cell._insert_batch
for i in range(0, len(changed_station_values), ins_batch):
batch_values = changed_station_values[i:i + ins_batch]
self.session.bulk_update_mappings(Cell, batch_values)
if self.updated_areas:
self.queue_area_updates()
if moving_stations:
self.blocklist_stations(moving_stations)
self.emit_stats(added, drop_counter)
self.emit_statcounters(added, new_stations)
if self.data_queue.enough_data(batch=batch): # pragma: no cover
self.task.apply_async(
kwargs={'batch': batch},
countdown=2,
expires=10)
return (len(stations) + len(new_station_values), len(moving_stations))
class WifiUpdater(StationUpdater):
max_dist_meters = WIFI_MAX_RADIUS
station_type = 'wifi'
def __init__(self, task, session, pipe, shard_id=None):
super(WifiUpdater, self).__init__(task, session, pipe)
self.shard_id = shard_id
queue_name = '%s_%s' % ('update_wifi', shard_id)
self.data_queue = self.task.app.data_queues[queue_name]
def emit_stats(self, stats_counter, drop_counter):
day = self.today
StatCounter(StatKey.wifi, day).incr(
self.pipe, stats_counter['obs'])
StatCounter(StatKey.unique_wifi, day).incr(
self.pipe, stats_counter['new_station'])
self.stat_count('insert', stats_counter['obs'])
for reason, count in drop_counter.items():
self.stat_count('drop', drop_counter[reason], reason=reason)
if stats_counter['block']:
self.stats_client.incr(
'data.station.blocklist',
stats_counter['block'],
tags=['type:%s' % self.station_type,
'action:add',
'reason:moving'])
def station_values(self, station_key, shard_station, observations):
"""
Return two-tuple of status, value dict where status is one of:
`new`, `new_moving`, `moving`, `changed`.
"""
# cases:
# we always get a station key and observations
# 0. observations disagree
# 0.a. no shard station, return new_moving
# 0.b. shard station, return moving
# 1. no shard station
# 1.a. obs agree -> return new
# 2. shard station
# 2.a. obs disagree -> return moving
# 2.b. obs agree -> return changed
created = self.utcnow
values = {
'mac': station_key,
'modified': self.utcnow,
}
obs_length = len(observations)
obs_positions = numpy.array(
[(obs.lat, obs.lon) for obs in observations],
dtype=numpy.double)
obs_new_lat, obs_new_lon = centroid(obs_positions)
obs_max_lat, obs_max_lon = numpy.nanmax(obs_positions, axis=0)
obs_min_lat, obs_min_lon = numpy.nanmin(obs_positions, axis=0)
obs_box_dist = distance(obs_min_lat, obs_min_lon,
obs_max_lat, obs_max_lon)
if obs_box_dist > self.max_dist_meters:
# the new observations are already too far apart
if not shard_station:
values.update({
'created': created,
'block_first': self.today,
'block_last': self.today,
'block_count': 1,
})
return ('new_moving', values)
else:
block_count = shard_station.block_count or 0
values.update({
'lat': None,
'lon': None,
'max_lat': None,
'min_lat': None,
'max_lon': None,
'min_lon': None,
'radius': None,
'region': shard_station.region,
'samples': None,
'source': None,
'block_first': shard_station.block_first or self.today,
'block_last': self.today,
'block_count': block_count + 1,
})
return ('moving', values)
if shard_station is None:
# totally new station, only agreeing observations
radius = circle_radius(
obs_new_lat, obs_new_lon,
obs_max_lat, obs_max_lon, obs_min_lat, obs_min_lon)
values.update({
'created': created,
'lat': obs_new_lat,
'lon': obs_new_lon,
'max_lat': float(obs_max_lat),
'min_lat': float(obs_min_lat),
'max_lon': float(obs_max_lon),
'min_lon': float(obs_min_lon),
'radius': radius,
'region': GEOCODER.region(obs_new_lat, obs_new_lon),
'samples': obs_length,
'source': None,
})
return ('new', values)
else:
# shard_station + new observations
positions = numpy.append(obs_positions, [
(numpy.nan if shard_station.lat is None
else shard_station.lat,
numpy.nan if shard_station.lon is None
else shard_station.lon),
(numpy.nan if shard_station.max_lat is None
else shard_station.max_lat,
numpy.nan if shard_station.max_lon is None
else shard_station.max_lon),
(numpy.nan if shard_station.min_lat is None
else shard_station.min_lat,
numpy.nan if shard_station.min_lon is None
else shard_station.min_lon),
], axis=0)
max_lat, max_lon = numpy.nanmax(positions, axis=0)
min_lat, min_lon = numpy.nanmin(positions, axis=0)
box_dist = distance(min_lat, min_lon, max_lat, max_lon)
if box_dist > self.max_dist_meters:
# shard_station + disagreeing observations
block_count = shard_station.block_count or 0
values.update({
'lat': None,
'lon': None,
'max_lat': None,
'min_lat': None,
'max_lon': None,
'min_lon': None,
'radius': None,
'region': shard_station.region,
'samples': None,
'source': None,
'block_first': shard_station.block_first or self.today,
'block_last': self.today,
'block_count': block_count + 1,
})
return ('moving', values)
else:
# shard_station + agreeing observations
if shard_station.lat is None or shard_station.lon is None:
old_weight = 0
else:
old_weight = min((shard_station.samples or 0),
self.MAX_OLD_OBSERVATIONS)
new_lat = ((obs_new_lat * obs_length +
(shard_station.lat or 0.0) * old_weight) /
(obs_length + old_weight))
new_lon = ((obs_new_lon * obs_length +
(shard_station.lon or 0.0) * old_weight) /
(obs_length + old_weight))
samples = (shard_station.samples or 0) + obs_length
radius = circle_radius(
new_lat, new_lon, max_lat, max_lon, min_lat, min_lon)
region = shard_station.region
if (region and not GEOCODER.in_region(
new_lat, new_lon, region)):
# reset region if it no longer matches
region = None
if not region:
region = GEOCODER.region(new_lat, new_lon)
values.update({
'lat': new_lat,
'lon': new_lon,
'max_lat': float(max_lat),
'min_lat': float(min_lat),
'max_lon': float(max_lon),
'min_lon': float(min_lon),
'radius': radius,
'region': region,
'samples': samples,
'source': None,
# use the exact same keys as in the moving case
'block_first': shard_station.block_first,
'block_last': shard_station.block_last,
'block_count': shard_station.block_count,
})
return ('changed', values)
return (None, None) # pragma: no cover
def _shard_observations(self, observations):
sharded_obs = {}
for obs in observations:
if obs is not None:
shard = WifiShard.shard_model(obs.mac)
if shard not in sharded_obs:
sharded_obs[shard] = defaultdict(list)
sharded_obs[shard][obs.mac].append(obs)
return sharded_obs
def _query_stations(self, shard, shard_values):
macs = list(shard_values.keys())
rows = (self.session.query(shard)
.filter(shard.mac.in_(macs))).all()
blocklist = {}
stations = {}
for row in rows:
stations[row.mac] = row
blocklist[row.mac] = row.blocked(today=self.today)
return (blocklist, stations)
def _update_shard(self, shard, shard_values,
drop_counter, stats_counter):
new_data = defaultdict(list)
blocklist, stations = self._query_stations(shard, shard_values)
for station_key, observations in shard_values.items():
if blocklist.get(station_key, False):
# Drop observations for blocklisted stations.
drop_counter['blocklisted'] += len(observations)
continue
shard_station = stations.get(station_key, None)
if shard_station is None:
# We discovered an actual new never before seen station.
stats_counter['new_station'] += 1
status, result = self.station_values(
station_key, shard_station, observations)
new_data[status].append(result)
if status in ('moving', 'new_moving'):
stats_counter['block'] += 1
else:
stats_counter['obs'] += len(observations)
if new_data['new']:
# do a batch insert of new stations
stmt = shard.__table__.insert(
mysql_on_duplicate='samples = samples' # no-op
)
self.session.execute(stmt.values(new_data['new']))
if new_data['new_moving']:
# do a batch insert of new moving stations
stmt = shard.__table__.insert(
mysql_on_duplicate='block_count = block_count' # no-op
)
self.session.execute(stmt.values(new_data['new_moving']))
if new_data['moving'] or new_data['changed']:
# do a batch update of changing and moving stations
self.session.bulk_update_mappings(
shard, new_data['changed'] + new_data['moving'])
def __call__(self, batch=10):
sharded_obs = self._shard_observations(
self.data_queue.dequeue(batch=batch))
if not sharded_obs:
return
drop_counter = defaultdict(int)
stats_counter = defaultdict(int)
for shard, shard_values in sharded_obs.items():
self._update_shard(shard, shard_values,
drop_counter, stats_counter)
self.emit_stats(stats_counter, drop_counter)
if self.data_queue.enough_data(batch=batch): # pragma: no cover
self.task.apply_async(
kwargs={'batch': batch, 'shard_id': self.shard_id},
countdown=2,
expires=10)
| [
"[email protected]"
] | |
a1514ff0aae5fff6ba6124c662459a1592b7a132 | 55c8fd9ce0c5bb147cbdb69274873b93b35356fc | /pathGeneration-v2/code-v2/full_eval.py | ca61e1c985c92a33e67e67192299fb8498954df2 | [] | no_license | WOW5678/pathGeneration | b4143bbbc2be686ee011d24af46d57d2cee88f06 | 88f31b4f30750307fa7f5072e7faa2f959a6d0c0 | refs/heads/master | 2020-08-06T17:46:22.075128 | 2019-11-15T12:38:07 | 2019-11-15T12:38:07 | 213,097,008 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,528 | py | import numpy as np
from sklearn.metrics import precision_recall_fscore_support, average_precision_score, \
roc_auc_score, precision_score, recall_score
thres = 0.5
def f1_score(preds, labels, thres, average='micro'):
'''Returns (precision, recall, F1 score) from a batch of predictions (thresholded probabilities)
given a batch of labels (for macro-averaging across batches)'''
#preds = (probs >= thres).astype(np.int32)
# print('probs:',probs)
# print('labels:',labels)
# print('preds:',preds)
#preds=probs
# print(preds)
# print(labels)
p, r, f, _ = precision_recall_fscore_support(labels, preds, average=average,
warn_for=())
return p, r, f
def auc_pr(probs, labels, average='micro'):
'''Precision integrated over all thresholds (area under the precision-recall curve)'''
if average == 'macro' or average is None:
sums = labels.sum(0)
nz_indices = np.logical_and(sums != labels.shape[0], sums != 0)
probs = probs[:, nz_indices]
labels = labels[:, nz_indices]
return average_precision_score(labels, probs, average=average)
def auc_roc(probs, labels, average='micro'):
'''Area under the ROC curve'''
if average == 'macro' or average is None:
sums = labels.sum(0)
nz_indices = np.logical_and(sums != labels.shape[0], sums != 0)
probs = probs[:, nz_indices]
labels = labels[:, nz_indices]
# print('labels:',labels)
# print('probs:',probs)
return roc_auc_score(labels, probs, average=average)
def precision_at_k(probs, labels, k, average='micro'):
indices = np.argpartition(-probs, k-1, axis=1)[:, :k]
preds = np.zeros(probs.shape, dtype=np.int)
preds[np.arange(preds.shape[0])[:, np.newaxis], indices] = 1
return precision_score(labels, preds, average=average)
def recall_at_k(probs, labels, k, average='micro'):
indices = np.argpartition(-probs, k-1, axis=1)[:, :k]
preds = np.zeros(probs.shape, dtype=np.int)
preds[np.arange(preds.shape[0])[:, np.newaxis], indices] = 1
return recall_score(labels, preds, average=average)
def full_evaluate(pred,probs, gold, thres=0.5):
# pred = np.array(pred)
# gold = np.array(gold)
#print(pred)
micro_p, micro_r, micro_f1 = f1_score(pred, gold, thres, average='micro')
macro_p,macro_r,macro_f1= f1_score(pred, gold, thres, average='macro')
# micro_auc_pr= auc_pr(pred, gold, average='micro')
# macro_auc_pr= auc_pr(pred, gold, average='macro')
micro_auc_roc= auc_roc(pred, gold, average='micro')
macro_auc_roc= auc_roc(pred, gold, average='macro')
precision_8= precision_at_k(probs, gold, 8, average='micro')
precision_40= precision_at_k(probs, gold, 40, average='micro')
recall_8= recall_at_k(probs, gold, 8, average='micro')
recall_40=recall_at_k(probs, gold, 40, average='micro')
return micro_p,macro_p,micro_r,macro_r,micro_f1,macro_f1,micro_auc_roc,macro_auc_roc,precision_8,precision_40,recall_8,recall_40
def jaccrad(predList, referList): # terms_reference为源句子,terms_model为候选句子
grams_reference = set(predList) # 去重;如果不需要就改为list
grams_model = set(referList)
temp = 0
for i in grams_reference:
if i in grams_model:
temp = temp + 1
fenmu = len(grams_model) + len(grams_reference) - temp # 并集
jaccard_coefficient = temp*1.0 / fenmu # 交集
return jaccard_coefficient | [
"[email protected]"
] | |
f9b7225639bb8e7345c3ae82acb0ee54276ceedb | fd67592b2338105e0cd0b3503552d188b814ad95 | /egoi_api/paths/campaign_groups/post.pyi | e20c009a7deaa011725e0a74459217db59959c7d | [] | no_license | E-goi/sdk-python | 175575fcd50bd5ad426b33c78bdeb08d979485b7 | 5cba50a46e1d288b5038d18be12af119211e5b9f | refs/heads/master | 2023-04-29T20:36:02.314712 | 2023-04-18T07:42:46 | 2023-04-18T07:42:46 | 232,095,340 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 15,827 | pyi | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from egoi_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from egoi_api import schemas # noqa: F401
from egoi_api.model.campaign_group_post import CampaignGroupPost
from egoi_api.model.unauthorized import Unauthorized
from egoi_api.model.campaign_group import CampaignGroup
from egoi_api.model.service_unavailable import ServiceUnavailable
from egoi_api.model.conflict import Conflict
from egoi_api.model.bad_request import BadRequest
from egoi_api.model.unprocessable_entity import UnprocessableEntity
from egoi_api.model.internal_server_error import InternalServerError
from egoi_api.model.too_many_requests import TooManyRequests
from egoi_api.model.forbidden import Forbidden
# body param
SchemaForRequestBodyApplicationJson = CampaignGroupPost
request_body_campaign_group_post = api_client.RequestBody(
content={
'application/json': api_client.MediaType(
schema=SchemaForRequestBodyApplicationJson),
},
required=True,
)
SchemaFor201ResponseBodyApplicationJson = CampaignGroup
@dataclass
class ApiResponseFor201(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor201ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_201 = api_client.OpenApiResponse(
response_cls=ApiResponseFor201,
content={
'application/json': api_client.MediaType(
schema=SchemaFor201ResponseBodyApplicationJson),
},
)
SchemaFor400ResponseBodyApplicationJson = BadRequest
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor400ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
content={
'application/json': api_client.MediaType(
schema=SchemaFor400ResponseBodyApplicationJson),
},
)
SchemaFor401ResponseBodyApplicationJson = Unauthorized
@dataclass
class ApiResponseFor401(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor401ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_401 = api_client.OpenApiResponse(
response_cls=ApiResponseFor401,
content={
'application/json': api_client.MediaType(
schema=SchemaFor401ResponseBodyApplicationJson),
},
)
SchemaFor403ResponseBodyApplicationJson = Forbidden
@dataclass
class ApiResponseFor403(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor403ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_403 = api_client.OpenApiResponse(
response_cls=ApiResponseFor403,
content={
'application/json': api_client.MediaType(
schema=SchemaFor403ResponseBodyApplicationJson),
},
)
SchemaFor409ResponseBodyApplicationJson = Conflict
@dataclass
class ApiResponseFor409(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor409ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_409 = api_client.OpenApiResponse(
response_cls=ApiResponseFor409,
content={
'application/json': api_client.MediaType(
schema=SchemaFor409ResponseBodyApplicationJson),
},
)
SchemaFor422ResponseBodyApplicationJson = UnprocessableEntity
@dataclass
class ApiResponseFor422(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor422ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_422 = api_client.OpenApiResponse(
response_cls=ApiResponseFor422,
content={
'application/json': api_client.MediaType(
schema=SchemaFor422ResponseBodyApplicationJson),
},
)
SchemaFor429ResponseBodyApplicationJson = TooManyRequests
@dataclass
class ApiResponseFor429(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor429ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_429 = api_client.OpenApiResponse(
response_cls=ApiResponseFor429,
content={
'application/json': api_client.MediaType(
schema=SchemaFor429ResponseBodyApplicationJson),
},
)
SchemaFor500ResponseBodyApplicationJson = InternalServerError
@dataclass
class ApiResponseFor500(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor500ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_500 = api_client.OpenApiResponse(
response_cls=ApiResponseFor500,
content={
'application/json': api_client.MediaType(
schema=SchemaFor500ResponseBodyApplicationJson),
},
)
SchemaFor503ResponseBodyApplicationJson = ServiceUnavailable
@dataclass
class ApiResponseFor503(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor503ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_503 = api_client.OpenApiResponse(
response_cls=ApiResponseFor503,
content={
'application/json': api_client.MediaType(
schema=SchemaFor503ResponseBodyApplicationJson),
},
)
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _create_campaign_group_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def _create_campaign_group_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def _create_campaign_group_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _create_campaign_group_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor201,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _create_campaign_group_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = 'application/json',
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Create new campaign group
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
if body is schemas.unset:
raise exceptions.ApiValueError(
'The required body parameter has an invalid value of: unset. Set a valid value instead')
_fields = None
_body = None
serialized_data = request_body_campaign_group_post.serialize(body, content_type)
_headers.add('Content-Type', content_type)
if 'fields' in serialized_data:
_fields = serialized_data['fields']
elif 'body' in serialized_data:
_body = serialized_data['body']
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
fields=_fields,
body=_body,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class CreateCampaignGroup(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def create_campaign_group(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def create_campaign_group(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def create_campaign_group(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def create_campaign_group(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor201,
api_client.ApiResponseWithoutDeserialization,
]: ...
def create_campaign_group(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = 'application/json',
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._create_campaign_group_oapg(
body=body,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor201,
api_client.ApiResponseWithoutDeserialization,
]: ...
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = 'application/json',
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._create_campaign_group_oapg(
body=body,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
| [
"[email protected]"
] | |
a58740e2a6ef0f1c5c1c2d3373a3d57e3b7311d6 | e6904315fef720d562727c259fe55edcaaf2f84b | /src/orion/core/io/evc_builder.py | 01094146ed00e9b0623a8a0adf56c0ef4a18b01b | [
"BSD-3-Clause"
] | permissive | mnoukhov/orion | c93c4655f6b1b6358f8ead78a3adbe9d871785c7 | 7849d77344e84ec805207cf4148aecf6f7d6b3d7 | refs/heads/master | 2020-03-25T05:37:54.251082 | 2019-08-19T17:33:15 | 2019-08-19T17:33:15 | 143,457,714 | 0 | 0 | NOASSERTION | 2018-10-31T02:37:32 | 2018-08-03T17:55:57 | Python | UTF-8 | Python | false | false | 2,275 | py | # -*- coding: utf-8 -*-
# pylint:disable=protected-access
"""
:mod:`orion.core.io.evc_builder` -- Builder of experiment version control tree
==============================================================================
.. module:: experiment
:platform: Unix
:synopsis: Builder of the experiment version control tree
The EVCBuilder takes care of building a main experiment along with an EVC tree and connect them
together.
A user can define a root and some leafs that should be the extremums of the tree. Those can be
different than the actual root and leafs of the global EVC tree, making the trimmed version a small
subset of the global version.
"""
from orion.core.evc.experiment import ExperimentNode
from orion.core.io.experiment_builder import ExperimentBuilder
class EVCBuilder(object):
"""Builder of experiment version control trees using
:class:`orion.core.evc.experiment.ExperimentNode`
.. seealso::
`orion.core.io.experiment_builder` for more information on the process of building
experiments.
:class:`orion.core.evc.experiment`
:class:`orion.core.worker.experiment`
"""
# pylint:disable=no-self-use
def connect_to_version_control_tree(self, experiment):
"""Build the EVC and connect the experiment to it"""
experiment_node = ExperimentNode(experiment.name, experiment=experiment)
experiment.connect_to_version_control_tree(experiment_node)
def build_view_from(self, cmdargs):
"""Build an experiment view based on global config and connect it to the EVC"""
experiment_view = ExperimentBuilder().build_view_from(cmdargs)
self.connect_to_version_control_tree(experiment_view)
return experiment_view
def build_from(self, cmdargs):
"""Build an experiment based on config and connect it to the EVC"""
experiment = ExperimentBuilder().build_from(cmdargs)
self.connect_to_version_control_tree(experiment)
return experiment
def build_from_config(self, config):
"""Build an experiment based on given config and connect it to the EVC"""
experiment = ExperimentBuilder().build_from_config(config)
self.connect_to_version_control_tree(experiment)
return experiment
| [
"[email protected]"
] | |
a06e77569bb9fc552a12e6e6f5ee56d5c33ebea1 | 602bdbd1d8ef4d36ccfdcae5756bc8e448d30584 | /share/basiccms/web/checkout.py | 86bb792ceeb5be2a1dd97fafe87b116f9d8f365f | [] | no_license | timparkin/timparkingallery | 1136027bf9cfbad31319958f20771a6fdc9f5fc4 | 6e6c02684a701817a2efae27e21b77765daa2c33 | refs/heads/master | 2016-09-06T00:28:16.965416 | 2008-11-25T21:15:45 | 2008-11-25T21:15:45 | 12,716 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,952 | py | from pollen.mail import mailutil
from twisted.internet import defer
from twisted.python import log
from nevow import url, accessors, inevow, tags as T, rend
import formal
from crux import skin, icrux
from tub.public.web.common import getStoreSession
from ecommerce.salesorder.manager import SalesOrder, SalesOrderItem
from ecommerce.salesorder.util import createSalesOrderItem
from basiccms import basket as dw_basket
from basiccms.web import common
from basiccms.web.utils import RenderFragmentMixin, RenderInheritMixin
class DetailsPage(RenderInheritMixin, RenderFragmentMixin, common.Page):
docFactory = skin.loader('CheckoutDetailsPage.html')
def __init__(self, avatar):
super(DetailsPage, self).__init__()
self.avatar = avatar
def getCountryOptions(self, storeSession):
data = {}
d = self.avatar.getDeliveryCountries(storeSession)
d.addCallback(lambda options: data.update({'delivery': options}))
d.addCallback(lambda ignore: self.avatar.realm.getBillingCountryOptions())
d.addCallback(lambda options: data.update({'billing': options}))
d.addCallback(lambda options: data)
return d
def form_details(self, ctx):
storeSession = getStoreSession(ctx)
d = self.getCountryOptions(storeSession)
d.addCallback(lambda options: self._build_details_form(options['billing'], options['delivery']))
return d
def _build_details_form(self, billingCountryOptions, deliveryCountryOptions):
form = formal.Form()
form.addField('firstName', formal.String(required=True, strip=True))
form.addField('lastName', formal.String(required=True, strip=True))
form.addField('phoneNumber', formal.String(required=True, strip=True))
form.addField('billingAddress1', formal.String(required=True, strip=True))
form.addField('billingAddress2', formal.String(strip=True))
form.addField('billingAddress3', formal.String(strip=True))
form.addField('billingCity', formal.String(required=True, strip=True))
form.addField('billingPostcode', formal.String(required=True, strip=True))
form.addField('billingCountry', formal.String(required=True, strip=True),
widgetFactory=formal.widgetFactory(formal.SelectChoice, options=billingCountryOptions) )
form.addField('cardType', formal.String(required=True),
formal.widgetFactory(formal.SelectChoice, CommonData.Cards))
form.addField('cardNumber', formal.String(required=True, strip=True))
form.addField('cvv2', formal.String(required=True, strip=True),
label='Card Security Code',description='last three numbers on signature strip')
form.addField('expiryDate', formal.Date(required=True),
formal.widgetFactory(formal.MMYYDatePartsInput), description='e.g. 12/05' )
form.addField('issueNumber', formal.String(strip=True),
description='for maestro and switch only')
form.addField('startDate', formal.Date(),
formal.widgetFactory(formal.MMYYDatePartsInput), description='for switch only' )
delivery = formal.Group('delivery', label='Delivery Address', description="Only enter details here if the delivery address is different from the billing address above.")
form.add( delivery )
delivery.add( formal.Field('name', formal.String(strip=True)) )
delivery.add( formal.Field('address1', formal.String(strip=True)))
delivery.add( formal.Field('address2', formal.String(strip=True)))
delivery.add( formal.Field('address3', formal.String(strip=True)))
delivery.add( formal.Field('city', formal.String(strip=True)))
delivery.add( formal.Field('postcode', formal.String(strip=True)) )
delivery.add( formal.Field('country', formal.String(strip=True),
widgetFactory=formal.widgetFactory(formal.SelectChoice, options=deliveryCountryOptions)) )
message = formal.Group('message', label='Gift Message', description="If you have chosen to use our gift wrapping service you can specify a message here")
form.add( message )
message.add( formal.Field('message', formal.String(strip=True), widgetFactory=formal.TextArea) )
form.addAction(self._confirm, label="Confirm Order")
if self.avatar.checkoutDetails:
form.data = self.avatar.checkoutDetails
elif self.avatar.customer:
form.data = {
'firstName': self.avatar.customer.first_name,
'lastName': self.avatar.customer.last_name,
'phoneNumber': self.avatar.customer.phoneNumber,
'billingAddress1': self.avatar.customer.billingAddress1,
'billingAddress2': self.avatar.customer.billingAddress2,
'billingAddress3': self.avatar.customer.billingAddress3,
'billingCity': self.avatar.customer.billingCity,
'billingPostcode': self.avatar.customer.billingPostcode,
'billingCountry': self.avatar.customer.billingCountry,
}
if self.avatar.realm.config['ecommerce']['paymentGateway'].get('use_test_data', False):
from datetime import date
from dateutil.relativedelta import relativedelta
form.data['cardType'] = 'VISA'
form.data['cardNumber'] = '4111111111111111'
form.data['cvv2'] = '432'
form.data['expiryDate'] = date.today()+relativedelta(months=6)
return form
def _confirm(self, ctx, form, data):
deliveryAddressSpecified = data['delivery.address1'] or data['delivery.address2'] or data['delivery.address3']
if data['delivery.name'] or deliveryAddressSpecified or data['delivery.city'] \
or data['delivery.postcode'] or data['delivery.country']:
if not data['delivery.name']:
raise formal.FieldError('All delivery details must be entered.', 'delivery.name')
if not deliveryAddressSpecified:
raise formal.FieldError('All delivery details must be entered.', 'delivery.address1')
if not data['delivery.city']:
raise formal.FieldError('All delivery details must be entered.', 'delivery.city')
if not data['delivery.postcode']:
raise formal.FieldError('All delivery details must be entered.', 'delivery.postcode')
if not data['delivery.country']:
raise formal.FieldError('All delivery details must be entered.', 'delivery.country')
self.avatar.checkoutDetails = data
if data['delivery.country']:
if self.avatar.basket.deliveryOptions.getCurrentCountry() != data['delivery.country'].lower():
raise formal.FieldError('Delivery country does not match basket delivery option.', 'delivery.country')
else:
if self.avatar.basket.deliveryOptions.getCurrentCountry() != data['billingCountry'].lower():
raise formal.FieldError('Delivery country does not match basket delivery option.', 'billingCountry')
return url.URL.fromContext(ctx).sibling('confirm')
class ThankYouPage(common.Page):
docFactory = skin.loader('CheckoutThankYouPage.html')
def __init__(self, avatar):
super(ThankYouPage, self).__init__()
self.avatar = avatar
def render_order_num(self, ctx, data):
order_num = inevow.IRequest(ctx).args.get('order_num', [''])[0]
return order_num
def render_tracking(self, ctx, data):
order_num = inevow.IRequest(ctx).args.get('order_num', [''])[0]
basket_value = inevow.IRequest(ctx).args.get('basket_value', [''])[0]
ctx.tag.fillSlots('order_num', order_num)
ctx.tag.fillSlots('basket_value', basket_value)
return ctx.tag
def debug(r, mess):
print '>>DEBUG', mess, r
return r
| [
"[email protected]"
] | |
890a0e4832d87c843d5509306210f0da7f740075 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/TSZLMM/YW_TSZLMM_SZXJ_085.py | aee9b54b61b3b19aec3adc52e31a8f6ab6a2da24 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,096 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_TSZLMM_SZXJ_085(xtp_test_case):
# YW_TSZLMM_SZXJ_085
def test_YW_TSZLMM_SZXJ_085(self):
title = '默认3:订单报价超过涨跌幅限制-深A限价卖><跌停价(跌停价-0.02)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11010122,
'errorMSG': queryOrderErrorMsg(11010122),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('003154', '2', '0', '10', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':trade_type + 1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': stkparm['跌停价']-0.02,
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
48c38008dc8f830780911cc0ffbe98050fe9f2b8 | 337815ff32ebbf6e8dd2606f69d66e8efda4cd03 | /epi_judge_python_solutions/is_string_palindromic_punctuation.py | 8a74011a9f894f17696adcf9b67b7a1ac42109d9 | [] | no_license | federicociner/epi | b85eefbf5f5bad77e2e780ffbf4ac4f9ca0809a8 | 32f2a1056353bca55d0d5839be5e0b73809cb45d | refs/heads/master | 2020-12-19T09:22:43.430370 | 2020-02-04T02:34:53 | 2020-02-04T02:34:53 | 235,693,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | from test_framework import generic_test
def is_palindrome(s: str) -> bool:
# i moves forward, and j moves backward.
i, j = 0, len(s) - 1
while i < j:
# i and j both skip non-alphanumeric characters.
while not s[i].isalnum() and i < j:
i += 1
while not s[j].isalnum() and i < j:
j -= 1
if s[i].lower() != s[j].lower():
return False
i, j = i + 1, j - 1
return True
def is_palindrome_pythonic(s):
return all(
a == b
for a, b in zip(
map(str.lower, filter(str.isalnum, s)),
map(str.lower, filter(str.isalnum, reversed(s))),
)
)
if __name__ == "__main__":
exit(
generic_test.generic_test_main(
"is_string_palindromic_punctuation.py",
"is_string_palindromic_punctuation.tsv",
is_palindrome,
)
)
| [
"[email protected]"
] | |
77ffa800cee616cbc92dbb8224e7af3e41aaee4c | 7f114a1fb511b816c116d5b9e67cb998e3e23956 | /PyplayS163.py | 8fb12da406b708d8118f33d4a51858ee26d8c0b8 | [] | no_license | Bharanij27/bharanirep | 90ac34eb28deaa7ec96d042de456de71b96866d7 | 982133a7939c889d433c178a601441fa087293d9 | refs/heads/master | 2021-08-07T20:22:36.244395 | 2020-06-05T04:58:10 | 2020-06-05T04:58:10 | 186,580,768 | 0 | 6 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | n,k=map(int,input().split())
l=list(map(int,input().split()))
if k in l: print("yes")
else: print("no")
| [
"[email protected]"
] | |
b0f33f7fcb55a25559aa9ec6e4005f66fd5f16e2 | 93e8c89c7d83c00280c32ea9f5330d3d4cf9a6d9 | /ch_10_oops/03_instance_class_attributes.py | 15f55e56c13b45b76c5dba6b6df9c1a4364bb31a | [] | no_license | vidhisharma1212/oops | 1d76940d084b3828db6f4bd9093ee18a8e512183 | fb4252683c652a18c818948dd328c8903f2d04ee | refs/heads/main | 2023-07-01T21:28:46.823861 | 2021-08-09T08:40:54 | 2021-08-09T08:40:54 | 393,379,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | class Employee:
company= 'Google'
# salary= 900
vid= Employee()
ron= Employee()
shyam= Employee()
# vid.salary= 300
# ron.salary= 500
Employee.salary= 900
print(vid.salary)
print(ron.salary)
print(shyam.salary)
shyam.salary=100000
print(shyam.salary)
print(vid.company)
print(ron.company)
Employee.company= 'Youtube'
print(vid.company)
print(ron.company) | [
"[email protected]"
] | |
eee813bc568246e104455e856e8360de7d16c2cb | 975fb6bf66c878415a880003c2d635163cb664d8 | /qoc/standard/functions/convenience.py | 35ea6b8916c0317e9234a845118aaab8e03ea59a | [
"MIT"
] | permissive | SchusterLab/qoc | 8833628a9b7df3727b982b667310059563dfded7 | 36d615170effc1b705d4543d92f979e511edfec2 | refs/heads/master | 2023-06-07T07:49:33.720205 | 2023-03-12T20:19:55 | 2023-03-12T20:19:55 | 198,457,530 | 12 | 14 | MIT | 2021-05-10T02:23:11 | 2019-07-23T15:24:41 | Python | UTF-8 | Python | false | false | 2,797 | py | """
convenience.py - definitions of common computations
All functions in this module that are exported,
i.e. those that don't begin with '_', are autograd compatible.
"""
from functools import reduce
from autograd.extend import defvjp, primitive
import autograd.numpy as anp
import numpy as np
import scipy.linalg as la
### COMPUTATIONS ###
def commutator(a, b):
"""
Compute the commutator of two matrices.
Arguments:
a :: numpy.ndarray - the left matrix
b :: numpy.ndarray - the right matrix
Returns:
_commutator :: numpy.ndarray - the commutator of a and b
"""
commutator_ = anp.matmul(a, b) - anp.matmul(b, a)
return commutator_
def conjugate_transpose(matrix):
"""
Compute the conjugate transpose of a matrix.
Args:
matrix :: numpy.ndarray - the matrix to compute
the conjugate transpose of
operation_policy :: qoc.OperationPolicy - what data type is
used to perform the operation and with which method
Returns:
_conjugate_tranpose :: numpy.ndarray the conjugate transpose
of matrix
"""
conjugate_transpose_ = anp.conjugate(anp.swapaxes(matrix, -1, -2))
return conjugate_transpose_
def krons(*matrices):
"""
Compute the kronecker product of a list of matrices.
Args:
matrices :: numpy.ndarray - the list of matrices to
compute the kronecker product of
operation_policy :: qoc.OperationPolicy - what data type is
used to perform the operation and with which method
"""
krons_ = reduce(anp.kron, matrices)
return krons_
def matmuls(*matrices):
"""
Compute the kronecker product of a list of matrices.
Args:
matrices :: numpy.ndarray - the list of matrices to
compute the kronecker product of
operation_policy :: qoc.OperationPolicy - what data type is
used to perform the operation and with which method
"""
matmuls_ = reduce(anp.matmul, matrices)
return matmuls_
def rms_norm(array):
"""
Compute the rms norm of the array.
Arguments:
array :: ndarray (N) - The array to compute the norm of.
Returns:
norm :: float - The rms norm of the array.
"""
square_norm = anp.sum(array * anp.conjugate(array))
size = anp.prod(anp.shape(array))
rms_norm_ = anp.sqrt(square_norm / size)
return rms_norm_
### ISOMORPHISMS ###
# A row vector is np.array([[0, 1, 2]])
# A column vector is np.array([[0], [1], [2]])
column_vector_list_to_matrix = (lambda column_vector_list:
anp.hstack(column_vector_list))
matrix_to_column_vector_list = (lambda matrix:
anp.stack([anp.vstack(matrix[:, i])
for i in range(matrix.shape[1])]))
| [
"[email protected]"
] | |
2b112ccb194f9ad783c20cb17572fb0072f813b1 | 14804b282e567bf45c974b9a55cbdfa1907c5958 | /16_Standard_Library/A_Modules/_turtle_04.py | cd5ff64ebe782eaa5fac54f84e57482e0cd772a7 | [
"MIT"
] | permissive | Oscar-Oliveira/Python-3 | cfdcbcf4548144fb2488625f53f76b20e4d8c5b0 | fa791225a6810b75890d24407b73c5e1b514acbe | refs/heads/master | 2021-09-26T06:27:16.367956 | 2018-10-27T10:42:21 | 2018-10-27T10:42:21 | 101,991,657 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | """
turtle
"""
import turtle
import random
colors = ["blue", "black", "brown", "red", "orange", "green",
"yellow", "beige", "turquoise", "pink"]
wn = turtle.Screen()
turtles = [turtle.Turtle() for _ in range(10)]
for i, t in enumerate(turtles):
t.shape("turtle")
t.color(colors[i])
t.penup()
t.goto(-260, i * 30)
t.pendown()
for _ in range(100):
for _, t in enumerate(turtles):
t.forward(random.randint(0, 10))
wn.listen()
wn.mainloop()
| [
"[email protected]"
] | |
2d023ce13d42aeef06d982be4b8609792e5496ca | 23fddc940a266c2d1d0e0b1687c36cdbcc9d54d9 | /app/admin/__init__.py | b72f72aeec303c88ed6a8f146eb448e50be15bcf | [] | no_license | Cuick/traversing | 210fcfb1c780037de59343fffeb4fa4d3f2eae32 | c78982580af7f63c8bff4dcb37005b7f7c682b5b | refs/heads/master | 2021-01-10T17:38:37.899460 | 2016-11-18T06:06:55 | 2016-11-18T06:06:55 | 55,397,540 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | #-*- coding:utf-8 -*-
"""
created by server on 14-5-26上午11:59.
"""
import action
def doWhenStop():
"""服务器关闭前的处理
"""
pass
| [
"[email protected]"
] | |
eb905dd46d64599308b58106219fd94e874c27af | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/aio/operations_async/_vpn_server_configurations_associated_with_virtual_wan_operations_async.py | f5c8445e1d311bde344a60cba25058ed763eeb77 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 7,435 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnServerConfigurationsAssociatedWithVirtualWanOperations:
"""VpnServerConfigurationsAssociatedWithVirtualWanOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _list_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
**kwargs
) -> "models.VpnServerConfigurationsResponse":
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnServerConfigurationsResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
# Construct URL
url = self._list_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnServerConfigurationsResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnServerConfigurations'} # type: ignore
async def begin_list(
self,
resource_group_name: str,
virtual_wan_name: str,
**kwargs
) -> "models.VpnServerConfigurationsResponse":
"""Gives the list of VpnServerConfigurations associated with Virtual Wan in a resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN whose associated VpnServerConfigurations is
needed.
:type virtual_wan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: VpnServerConfigurationsResponse, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_08_01.models.VpnServerConfigurationsResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnServerConfigurationsResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnServerConfigurationsResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnServerConfigurations'} # type: ignore
| [
"[email protected]"
] | |
4c36661bdaf9f097617a68f74fd7e9c443e2b16d | b95fa99bb1ba2210b73251614d2613363c37f932 | /deploy/ngram-train/scripts/main-70.py | ce7b6772851d4bcf8747f79598571b1f41cf57ca | [] | no_license | lingxiao/learn-adj-relation | d1a8894fefc776ec0bd414b5f038361ed4b79d16 | dc4285af19e53d7e2d015eb6394f6c601c707da0 | refs/heads/master | 2020-12-30T16:27:51.531268 | 2017-06-07T18:59:48 | 2017-06-07T18:59:48 | 87,714,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | ############################################################
# Module : A series of measures on the graph for experiments
# Date : April 2nd, 2017
# Author : Xiao Ling
############################################################
import os
import re
import networkx as nx
from utils import *
from scripts import *
from app.config import PATH
############################################################
'''
paths
'''
_root = os.path.join(PATH['directories']['deploy'], 'ngram-train')
_word_pair_dir = os.path.join(_root, 'pairs')
_output_dir = os.path.join(_root, 'outputs')
_script_dir = os.path.join(_root ,'scripts')
'''
@Use: collect ngram counts
'''
batch = 70
word_pair_path = os.path.join(_word_pair_dir , 'batch-' + str(batch) + '.txt')
pattern_path = PATH['assets']['patterns']
ngram_dir = PATH['ngrams']['full']
out_dir = _output_dir
log_dir = PATH['directories']['log']
collect_ngram_patterns( word_pair_path
, pattern_path
, ngram_dir
, out_dir
, log_dir
, debug = False)
| [
"[email protected]"
] | |
73687bc070d5f0a867ecaa764f11fb3fba7ed95d | 28be2173e5590cc5b03119e9b83c57980e6a7e8a | /studygroups/migrations/0064_split63.py | e49732264ba24d1eaa1b270237861d4c1c7c8b63 | [
"MIT"
] | permissive | EdgarOrnelas/learning-circles | cd164f123885ed2079b34ad394c9849b370563b9 | 293c849321d735aebbdcb6c65b7c92f751f9fd89 | refs/heads/master | 2021-01-21T20:56:35.429589 | 2017-06-16T09:20:46 | 2017-06-16T09:20:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
def set_meeting_time(apps, schema_editor):
StudyGroupMeeting = apps.get_model('studygroups', 'StudyGroupMeeting')
for meeting in StudyGroupMeeting.objects.all():
meeting.meeting_time = meeting.study_group.meeting_time
meeting.save()
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0063_auto_20160309_1301'),
]
operations = [
migrations.AlterField(
model_name='studygroupmeeting',
name='meeting_time',
field=models.TimeField(),
),
migrations.RunPython(set_meeting_time),
]
| [
"[email protected]"
] | |
1fa016ff9f3a7768af348bd01c7db7b60543d4de | 1afe3895ae8969ccba6d45e531ab5d59b8a41696 | /confession/user_app/migrations/0012_auto_20190121_1659.py | 673e55aaaef40def26992ea3a95286503b101c0f | [] | no_license | FZTeam/confession | 72e3ca0b2ab6016055b4ad6791f5a69aa3732368 | 7808d2810c65d0be956270f15d8ca489e1a9defe | refs/heads/master | 2022-12-12T08:30:37.603455 | 2019-02-25T15:56:31 | 2019-02-25T15:56:31 | 167,647,099 | 0 | 1 | null | 2022-07-06T19:59:26 | 2019-01-26T03:52:59 | Python | UTF-8 | Python | false | false | 616 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-01-21 16:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_app', '0011_auto_20190121_1658'),
]
operations = [
migrations.AlterField(
model_name='user',
name='action_time',
field=models.TimeField(auto_now=True),
),
migrations.AlterField(
model_name='user',
name='create_time',
field=models.TimeField(auto_now_add=True),
),
]
| [
"[email protected]"
] | |
c6d924a273405d47e1ca7228439de237c16e8109 | 4904acd900496b4883c2f5b4aa6b45d1ef6654c0 | /graphgallery/utils/__init__.py | b41031c08476cf8f7aff25809ca677c95d3ae196 | [
"MIT"
] | permissive | blindSpoter01/GraphGallery | aee039edd759be9272d123463b0ad73a57e561c7 | e41caeb32a07da95364f15b85cad527a67763255 | refs/heads/master | 2023-06-17T11:42:27.169751 | 2021-07-15T03:07:39 | 2021-07-15T03:07:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | from .context_manager import nullcontext
from .raise_error import raise_if_kwargs
from .tqdm import tqdm
from .context_manager import nullcontext
from .progbar import Progbar
from .misc import *
from .logger import setup_logger, get_logger
from .timeout import TimeOut
| [
"[email protected]"
] | |
7eabe48f91e014ea0b88c898557d4b21f62f256b | 0ddcfcbfc3faa81c79e320c34c35a972dab86498 | /puzzles/maximum_value_of_k_coins_from_piles.py | 61093ea2df9249ea3435755ccc5a997eed57f8bd | [] | no_license | IvanWoo/coding-interview-questions | 3311da45895ac4f3c394b22530079c79a9215a1c | 1312305b199b65a11804a000432ebe28d1fba87e | refs/heads/master | 2023-08-09T19:46:28.278111 | 2023-06-21T01:47:07 | 2023-06-21T01:47:07 | 135,307,912 | 0 | 0 | null | 2023-07-20T12:14:38 | 2018-05-29T14:24:43 | Python | UTF-8 | Python | false | false | 1,536 | py | # https://leetcode.com/problems/maximum-value-of-k-coins-from-piles/description/
"""
There are n piles of coins on a table. Each pile consists of a positive number of coins of assorted denominations.
In one move, you can choose any coin on top of any pile, remove it, and add it to your wallet.
Given a list piles, where piles[i] is a list of integers denoting the composition of the ith pile from top to bottom, and a positive integer k, return the maximum total value of coins you can have in your wallet if you choose exactly k coins optimally.
Example 1:
Input: piles = [[1,100,3],[7,8,9]], k = 2
Output: 101
Explanation:
The above diagram shows the different ways we can choose k coins.
The maximum total we can obtain is 101.
Example 2:
Input: piles = [[100],[100],[100],[100],[100],[100],[1,1,1,1,1,1,700]], k = 7
Output: 706
Explanation:
The maximum total can be obtained if we choose all coins from the last pile.
Constraints:
n == piles.length
1 <= n <= 1000
1 <= piles[i][j] <= 105
1 <= k <= sum(piles[i].length) <= 2000
"""
def max_value_of_coins(piles: list[list[int]], k: int) -> int:
n = len(piles)
dp = [[0] * (k + 1) for _ in range(n + 1)]
for i in range(1, n + 1):
for j in range(1, k + 1):
pile_sum = 0
for x in range(len(piles[i - 1])):
if j >= x + 1:
pile_sum += piles[i - 1][x]
dp[i][j] = max(dp[i][j], dp[i - 1][j - x - 1] + pile_sum)
dp[i][j] = max(dp[i][j], dp[i - 1][j])
return dp[n][k]
| [
"[email protected]"
] | |
a39c7ff7874061c6046993e0a11fa1f4106e2173 | e62b1e748582584a5c2a05fff970fe09e72752b4 | /bra/sprzedaz.py | 4c4f500857455573c6de7867b4ae8daedbfadd1b | [] | no_license | wlodekf/jpk | 5957b515ecbcded9b4f27d6a0785ee89e3a0d585 | 1c200350f57469e890a124d07f741d836d9a0833 | refs/heads/master | 2023-07-10T20:15:11.111276 | 2021-08-11T12:21:14 | 2021-08-11T12:21:14 | 394,978,461 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,952 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import csv
import re
import datetime
import decimal
import asyncio
import websockets
import traceback
from django.conf import settings
from django.db.models import Max, Q
from fk.models import Kon, MagDok, MagWiersz, MagNumer
from .models import Faktura, Wiersz, ImportSprzedazy
su= lambda s: s.strip().upper() if s else s
ZERO= decimal.Decimal(0.0)
def cp1250_decoder(csv_data):
"""
Dekodowanie danych z pliku CSV w standardzie Windows.
Zastępowane są również podwójne apostrofy bo dekoder je przepuszcza.
"""
for line in csv_data:
line= line.decode('cp1250', errors= 'ignore')
line= re.sub('\u201E', '"', line)
line= re.sub('\u201D', '"', line)
yield line
def ustal_delimiter(plik, przynajmniej):
"""
Ustalenie czy delimiterem w pliku CSV jest średnik czy przecinek.
csv.Snifer jakoś nie chce działać.
"""
delim= ';'
for p in cp1250_decoder(plik):
if p.count(';') >= przynajmniej and p.count(',') < przynajmniej:
delim= ';'
break
if p.count(',') >= przynajmniej and p.count(';') < przynajmniej:
delim= ','
break
plik.seek(0)
return delim
class Postep():
"""
Obsługa wysyłania do przeglądarki
"""
def __init__(self):
self.pop= 0
self.connected= False
self.stopped= False
def loop_stop(self, loop):
loop.stop()
# def check_server(self, loop, ws_server):
# print('Checking server: ', self.connected)
# if not self.connected:
# self.stopped= True
# print(type(ws_server), dir(ws_server))
# ws_server.close()
def wykonaj(self, zadanie):
print('starting event loop')
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
start_server= websockets.serve(zadanie, '0.0.0.0', 5678)
print('trzy', type(start_server))
loop= asyncio.get_event_loop()
print('starting server')
ws_server= loop.run_until_complete(start_server)
print('ws_server', ws_server)
# loop.call_later(1.0, self.check_server, loop, a)
print('server started')
loop.run_forever()
print('loop.closing')
loop.close()
async def show_progress(self, websocket, i, ile):
# if not self.stopped:
# self.connected= True
# else:
# return
ten= int(i/ile*100)
if ten != self.pop:
await websocket.send(str(ten))
self.pop= ten
def stop_progress(self, websocket):
websocket.ws_server.close()
websocket.loop.call_later(0.1, self.loop_stop, websocket.loop)
class SprzedazImporter():
"""
Importer sprzedaży do JPK_FA.
Sprzedaż importowana jest z plików CSV w ustalonym formacie.
Nagłówki faktur i wiersze zapisane powinny być w osobnych plikach.
Po wczytaniu faktury i wiersze (opcjonalne) zapisywane są w tabelach
w bazie JPK.
Mogą być również przeniesione do rejestru sprzedaży VAT w systemie FK.
Importowana sprzedaż może być również wykorzystana do wgrywania do
rejestru sprzedaży VAT.
"""
def __init__(self, firma= None, imp= None):
super().__init__()
if imp:
self.firma= imp.firma.oznaczenie
self.imp= imp
else:
self.firma= firma
self.f_pominiete= 0
self.w_pominiete= 0
def sprzedaz_importuj(self, form, request):
"""
Wczytanie faktur i wierszy z plików CSV i zapisanie w tabelach
Faktura i Wiersz.
Przy okazji liczone są statystyki - liczby pozycji i sumy kwot
na stawki VAT.
"""
# Utworzenie rekordu z podsumowaniem importu
# Na razie tylko informacja o plikach i kto/kiedy
self.imp= ImportSprzedazy.objects.create(
firma= self.firma,
faktury= form.cleaned_data['faktury'],
wiersze= form.cleaned_data['wiersze'],
kto= request.user.username
)
self.imp.nadpisane= 0
# Import faktur i wierszy z wgranych plików z zapisem do bazy
self.importuj_faktury(self.imp.faktury)
self.importuj_wiersze(self.imp.wiersze)
# Zapisanie liczby zaimportowanych faktur i wierszy
self.imp.ile_faktur= self.ile_faktur()
self.imp.ile_wierszy= self.ile_wierszy()
self.imp.save()
def importuj_faktury(self, plik):
"""
Zaimportowanie faktur sprzedaży z pliku CSV zapisanie ich w
bazie danych jpk w tablicach tymczasowych.
"""
self.plik= plik
# self.postep= Postep()
# self.postep.wykonaj(self._importuj_faktury)
#
# async def _importuj_faktury(self, websocket, path):
self.faktury= []
fak_reader= csv.reader(cp1250_decoder(self.plik), delimiter= ustal_delimiter(self.plik, 24))
header= None
self.imp.od_daty= None
self.imp.do_daty= None
ile= sum(1 for row in fak_reader)
self.plik.seek(0)
fak_reader= csv.reader(cp1250_decoder(self.plik), delimiter= ustal_delimiter(self.plik, 24))
for i, row in enumerate(fak_reader):
# Pominięcie nagłówka
if not header:
header= '|'.join(row)
continue
# print('|'.join(row))
# Pominięcie pustego wiersza
if not row[0] or not row[1]:
self.f_pominiete += 1
continue
fak= Faktura.from_csv(row, header)
fak.import_sprzedazy= self.imp
if not self.imp.od_daty or fak.data_wystawienia < self.imp.od_daty:
self.imp.od_daty= fak.data_wystawienia
if not self.imp.do_daty or fak.data_wystawienia > self.imp.do_daty:
self.imp.do_daty= fak.data_wystawienia
self.podsumuj(fak)
if Faktura.objects.filter(import_sprzedazy__firma= self.imp.firma, ident= fak.ident).exists():
self.imp.nadpisane += 1
self.przetworz_fak(fak)
fak.save()
self.faktury.append(fak)
# await self.postep.show_progress(websocket, i, ile)
# self.postep.stop_progress(websocket)
def przetworz_fak(self, fak):
"""
Dodatkowe przetworzenie faktury.
"""
if self.firma.oznaczenie == 'printf':
# Ustalenie konta sprzedaży na podstawie numeru projektu/zlecenia
# zawartego w numerze faktury
try:
projekt= fak.nr_faktury.split('/')[2]
fak.konto_spr= '7011'+projekt+'01'
except:
pass
def importuj_wiersze(self, plik):
"""
Zaimportowanie wierszy faktur.
"""
def fak_pozycji(poz_ident, faktury):
for fak in faktury:
if fak.ident == poz.ident:
return fak
return None
self.wiersze= []
if not plik: return
poz_reader= csv.reader(cp1250_decoder(plik), delimiter= ustal_delimiter(plik, 9))
header= None
for row in poz_reader:
# Pominięcie nagłówka
if not header:
header= '|'.join(row)
continue
# Pominięcie pustego wiersza
if not row[0] or not row[1]:
self.w_pominiete += 1
continue
poz= Wiersz.from_csv(row, header)
poz.firma= self.firma
poz.faktura= fak_pozycji(poz.ident, self.faktury)
poz.save()
self.wiersze.append(poz)
def sprzedaz_akceptuj(self):
"""
Akceptacja faktur.
"""
# self.postep= Postep()
# self.postep.wykonaj(self._sprzedaz_akceptuj)
#
#
# async def _sprzedaz_akceptuj(self, websocket, path):
"""
Faktury zostały już wczytane, teraz usuwane są duplikaty, tzn. jeżeli
w danym imporcie są faktury, które już były w bazie to te stare są usuwane.
"""
faktury= Faktura.objects.filter(import_sprzedazy= self.imp)
ile= len(faktury)
pop= 0
self.imp.nadpisane= 0
for i, fak in enumerate(faktury):
# Sprawdzenie czy faktura o podanym ident już istnieje
# Jeżeli tak to jest usuwana (a w jej miejsce będzie wstawiona nowa)
f= Faktura.objects.filter(Q(import_sprzedazy__firma= self.imp.firma, ident= fak.ident) & ~Q(pk= fak.pk))
if f:
f.delete()
self.imp.nadpisane += 1
# await self.postep.show_progress(websocket, i, ile)
self.imp.save()
# self.postep.stop_progress(websocket)
def ile_faktur(self):
return len(self.faktury)
def ile_wierszy(self):
return len(self.wiersze)
def podsumuj(self, fak):
"""
Ustalenie liczby faktur z poszczególnymi stawkami oraz
sum netto i vat w poszczególnych stawkach.
"""
i= self.imp
if fak.netto_23 or fak.vat_23: i.ile_23 += 1
i.netto_23 += fak.netto_23
i.vat_23 += fak.vat_23
if fak.netto_8 or fak.vat_8: i.ile_8 += 1
i.netto_8 += fak.netto_8
i.vat_8 += fak.vat_8
if fak.netto_5 or fak.vat_5: i.ile_5 += 1
i.netto_5 += fak.netto_5
i.vat_5 += fak.vat_5
if fak.netto_0: i.ile_0 += 1
i.netto_0 += fak.netto_0
if fak.netto_zw: i.ile_zw += 1
i.netto_zw += fak.netto_zw
i.naleznosc += fak.naleznosc
class SprzedazRejestrVAT():
"""
Przenoszenie zaimportowanej sprzedaży do rejestru sprzedaży VAT w systemie FK.
"""
def __init__(self, imp= None):
super().__init__()
self.imp= imp
self.firma= imp.firma.oznaczenie
def do_rejestru(self, form):
self.form= form
# self.postep= Postep()
# self.postep.wykonaj(self._do_rejestru)
#
#
# async def _do_rejestru(self, websocket, path):
"""
Zapisanie zaimportowanych faktur sprzedaży do rejestru sprzedaży.
Być może powinno być tak, że import dotyczy tylko jednego podrejestru VAT
i powinien być podawany przy upload (albo przy zapisie do rejestru).
"""
# Zapamiętanie fakturu zapisu do rejestru sprzedaży
self.imp.do_rejestru= True
self.imp.rejestr= self.form.cleaned_data['rejestr']
self.imp.konto_kon= re.sub('[- ]', '', self.form.cleaned_data['konto_kon'])
self.imp.konto_spr= re.sub('[- ]', '', self.form.cleaned_data['konto_spr'])
self.imp.save()
miesiac= None
numer= None
faktury= self.imp.faktura_set.all().order_by('data_wystawienia', 'nr_faktury')
ile= len(faktury)
for i, f in enumerate(faktury):
kon= self.ustal_kon(f)
fak= MagDok()
fak.nr_dysp= f.id # powiązanie z importem (dane do JPK_FA)
fak.stat= 'D'
fak.korekta= 'K' if f.korygujaca else 'D'
fak.dzial= 'USL'
fak.symbol= 'FV'
fak.rodz_te= self.imp.rejestr
# Ewentualna zmiana rodz_te w rejestrze sprzedaży?
# łącznie z przenumerowaniem, ale jak uniknąć dziur?
# Ewentualnie w rejestrze importu
if not numer:
miesiac= (f.data_wystawienia.year % 100)*100 + f.data_wystawienia.month
numer= MagNumer.nastepny(dbs= self.imp.firma.oznaczenie, dzial= self.imp.rejestr, symbol= 'FR', korekta= 'D', rok= miesiac)
self.imp.od_numeru= numer
self.imp.od_daty= f.data_wystawienia
else:
numer += 1
fak.numer= numer
self.imp.do_numeru= numer
self.imp.do_daty= f.data_wystawienia
fak.kod_wydz= '000'
fak.nr_dok= f.nr_faktury
fak.data= f.data_wystawienia
fak.data_sp= f.data_sprzedazy
fak.id_kli= kon # ustalić na podstawie NIP, ewentualnie utworzyć nowego
fak.nip= f.nip_nabywcy
fak.upust_sp= 0
fak.upust_gt= 0
fak.sp_zapl= 'P'
fak.term_zapl= f.termin_platnosci or f.data_wystawienia
fak.uwagi= f.uwagi
if not fak.uwagi:
for w in f.wiersz_set.all().order_by('id'):
fak.uwagi= w.nazwa.upper()
break
fak.wart_det= 0
fak.wart_bru= f.naleznosc
fak.zaplata= 0
fak.data_pod= f.data_sprzedazy
fak.dni_na_zapl= (fak.term_zapl- fak.data).days
fak.zaplacone= 0
# Korekta
fak.nr_dow2= f.nr_korygowanej
fak.data2= f.data_korygowanej
# Zapisanie konta kontrahenta w polu zamów
# Automat dekretujący odpowiednio to obsłuży dekretując na to konto
# zamiast domyślne
fak.zamow= self.imp.konto_kon or f.konto_kon
fak.zamow= fak.zamow.strip() if fak.zamow else None
fak.save(using= settings.DBS(self.firma))
naleznosc= decimal.Decimal(0)
if True:
naleznosc += self.wiersz_nag(fak, f, '23', f.netto_23, f.vat_23)
naleznosc += self.wiersz_nag(fak, f, ' 8', f.netto_8, f.vat_8)
naleznosc += self.wiersz_nag(fak, f, ' 5', f.netto_5, f.vat_5)
naleznosc += self.wiersz_nag(fak, f, ' 0', f.netto_0, ZERO)
naleznosc += self.wiersz_nag(fak, f, 'ZW', f.netto_zw, ZERO)
else:
for w in f.wiersz_set.all():
naleznosc += self.wiersz(fak, w)
if naleznosc != fak.wart_bru:
fak.uwagi= 'NIEZGODNOŚĆ WARTOŚCI POZYCJI I NALEŻNOŚCI {} vs. {}'.format(naleznosc, fak.wart_bru)
fak.save()
f.fak_id= fak.id
f.save(update_fields=['fak_id'])
# await self.postep.show_progress(websocket, i, ile)
if numer and miesiac:
MagNumer.ostatni(dbs= self.imp.firma.oznaczenie, dzial= self.imp.rejestr, symbol= 'FR', korekta= 'D', rok= miesiac, numer= numer+1)
self.imp.save()
# self.postep.stop_progress(websocket)
return numer
def wiersz_nag(self, fak, f, stawka, netto, vat):
if netto != ZERO or vat != ZERO:
wie= MagWiersz()
wie.id_dok= fak
wie.il_dysp= -1
wie.il_real= -1
wie.cena_real= netto
wie.cena_ewid= vat
wie.vat= stawka
wie.wartosc= netto + vat
wie.rodzaj= '01'
wie.konto= self.imp.konto_spr or f.konto_spr
if self.firma == 'printf':
try:
wie.konto += (stawka if re.match('[A-Z]+', stawka) else '{:02d}'.format(int(stawka.strip())))
except:
traceback.print_exc()
wie.konto= wie.konto.strip() if wie.konto else None
wie.save(using= settings.DBS(self.firma))
return wie.wartosc
else:
return ZERO
def wiersz(self, fak, w):
"""
Zapisanie do podanej faktury kolejnego wiersza.
"""
wie= MagWiersz()
wie.id_dok= fak
wie.il_dysp= -w.ilosc
wie.il_real= -w.ilosc
wie.jm= w.jm
wie.cena_real= w.netto
wie.cena_ewid= w.brutto - w.netto
wie.vat= w.stawka
wie.wartosc= w.brutto
wie.rodzaj= '01'
wie.upust= w.upust
wie.konto= '732170090123'
wie.save(using= settings.DBS(self.firma))
return wie.wartosc
def adres_kon(self, adres):
m= re.match('(.*)(\d\d\-\d\d\d)(.*)', adres)
if m:
return m.group(1), m.group(2), m.group(3)
return adres[:40], '', adres[40:70]
def ustal_kon(self, f):
"""
Ustalenie kontrahenta na podstawie numeru NIP.
"""
kon= Kon.objects.using(settings.DBS(self.firma)).filter(id= f.nip_nabywcy)
if kon:
return kon[0]
kon= Kon()
# Numer dla zagranicznego
nr_kon= Kon.objects.using(settings.DBS(self.firma)).exclude(nr_kon__startswith= 'Z').aggregate(Max('nr_kon'))
kon.nr_kon= '{:05d}'.format(int(nr_kon['nr_kon__max'].strip())+1)
if '/' in f.nazwa_nabywcy:
kon.skrot, kon.nazwa= f.nazwa_nabywcy.split('/')
else:
kon.nazwa= f.nazwa_nabywcy
kon.id= f.nip_nabywcy
kon.idtyp= 'NIPUE' if re.match('[A-Z][A-Z]', f.nip_nabywcy) else 'NIP'
kon.ulica, kon.kod, kon.miejsc= self.adres_kon(f.adres_nabywcy)
kon.kraj= f.nip_nabywcy[:2] if re.match('[A-Z][A-Z]', f.nip_nabywcy) else 'PL'
kon.id_obcy= f.id # zapamiętanie skąd się zwiął (faktura)
kon.skrot= su(kon.skrot)
kon.nazwa= su(kon.nazwa)
kon.miejsc= su(kon.miejsc)
kon.ulica= su(kon.ulica)
kon.kiedy= datetime.date.today() # data utworzenia
kon.data_us= kon.kiedy
if f.termin_platnosci and f.data_wystawienia:
kon.term_zap= (f.termin_platnosci - f.data_wystawienia).days
kon.save(using= settings.DBS(self.firma))
return kon
| [
"[email protected]"
] | |
f6cfbb1b55ec14f10d8504e6f9cfc2d5e037a025 | 8efe56ee34c455a6b1336897f6d457acbc9c10f9 | /examples/tf/trpo_cartpole_batch_sampler.py | 0123ededecc2bc226c82064afb576a0c3b154b04 | [
"MIT"
] | permissive | neurips2020submission11699/metarl | ab18d11e708bf569d76cb2fab2bcce089badd111 | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | refs/heads/master | 2022-10-15T22:03:09.948673 | 2020-06-11T19:22:55 | 2020-06-11T19:30:58 | 268,410,657 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,140 | py | #!/usr/bin/env python3
"""This is an example to train a task with parallel sampling."""
import click
from metarl import wrap_experiment
from metarl.envs import MetaRLEnv
from metarl.experiment import LocalTFRunner
from metarl.experiment.deterministic import set_seed
from metarl.np.baselines import LinearFeatureBaseline
from metarl.tf.algos import TRPO
from metarl.tf.policies import CategoricalMLPPolicy
from metarl.tf.samplers import BatchSampler
@click.command()
@click.option('--batch_size', type=int, default=4000)
@click.option('--max_path_length', type=int, default=100)
@wrap_experiment
def trpo_cartpole_batch_sampler(ctxt=None,
seed=1,
batch_size=4000,
max_path_length=100):
"""Train TRPO with CartPole-v1 environment.
Args:
ctxt (metarl.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Number of timesteps to use in each training step.
max_path_length (int): Number of timesteps to truncate paths to.
"""
set_seed(seed)
n_envs = batch_size // max_path_length
with LocalTFRunner(ctxt, max_cpus=n_envs) as runner:
env = MetaRLEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=max_path_length,
discount=0.99,
max_kl_step=0.01)
runner.setup(algo=algo,
env=env,
sampler_cls=BatchSampler,
sampler_args={'n_envs': n_envs})
runner.train(n_epochs=100, batch_size=4000, plot=False)
trpo_cartpole_batch_sampler()
| [
"[email protected]"
] | |
ec5ee589299f8dbc8bcd523e7b26e96896864327 | 7949f96ee7feeaa163608dbd256b0b76d1b89258 | /toontown/safezone/DistributedFindFour.py | c6488b8a595e93b2cc893ff46e000b115dd9aba2 | [] | no_license | xxdecryptionxx/ToontownOnline | 414619744b4c40588f9a86c8e01cb951ffe53e2d | e6c20e6ce56f2320217f2ddde8f632a63848bd6b | refs/heads/master | 2021-01-11T03:08:59.934044 | 2018-07-27T01:26:21 | 2018-07-27T01:26:21 | 71,086,644 | 8 | 10 | null | 2018-06-01T00:13:34 | 2016-10-17T00:39:41 | Python | UTF-8 | Python | false | false | 38,816 | py | # File: t (Python 2.4)
from pandac.PandaModules import *
from direct.distributed.ClockDelta import *
from direct.task.Task import Task
from direct.interval.IntervalGlobal import *
from TrolleyConstants import *
from direct.gui.DirectGui import *
from toontown.toonbase import TTLocalizer
from direct.distributed import DistributedNode
from direct.distributed.ClockDelta import globalClockDelta
from ChineseCheckersBoard import ChineseCheckersBoard
from direct.fsm import ClassicFSM, State
from direct.fsm import StateData
from toontown.toonbase.ToontownTimer import ToontownTimer
from toontown.toonbase import ToontownGlobals
from direct.distributed.ClockDelta import *
from otp.otpbase import OTPGlobals
from direct.showbase import PythonUtil
class DistributedFindFour(DistributedNode.DistributedNode):
def __init__(self, cr):
NodePath.__init__(self, 'DistributedFindFour')
DistributedNode.DistributedNode.__init__(self, cr)
self.cr = cr
self.reparentTo(render)
self.boardNode = loader.loadModel('phase_6/models/golf/findfour_game.bam')
self.boardNode.reparentTo(self)
self.board = [
[
0,
0,
0,
0,
0,
0,
0],
[
0,
0,
0,
0,
0,
0,
0],
[
0,
0,
0,
0,
0,
0,
0],
[
0,
0,
0,
0,
0,
0,
0],
[
0,
0,
0,
0,
0,
0,
0],
[
0,
0,
0,
0,
0,
0,
0]]
self.exitButton = None
self.inGame = False
self.waiting = True
self.startButton = None
self.playerNum = None
self.turnText = None
self.isMyTurn = False
self.wantTimer = True
self.leaveButton = None
self.screenText = None
self.turnText = None
self.exitButton = None
self.numRandomMoves = 0
self.blinker = Sequence()
self.playersTurnBlinker = Sequence()
self.yourTurnBlinker = Sequence()
self.winningSequence = Sequence()
self.moveSequence = Sequence()
self.moveList = []
self.mySquares = []
self.playerSeats = None
self.moveCol = None
self.move = None
self.accept('mouse1', self.mouseClick)
self.traverser = base.cTrav
self.pickerNode = CollisionNode('mouseRay')
self.pickerNP = camera.attachNewNode(self.pickerNode)
self.pickerNode.setFromCollideMask(BitMask32(4096))
self.pickerRay = CollisionRay()
self.pickerNode.addSolid(self.pickerRay)
self.myHandler = CollisionHandlerQueue()
self.traverser.addCollider(self.pickerNP, self.myHandler)
self.buttonModels = loader.loadModel('phase_3.5/models/gui/inventory_gui')
self.upButton = self.buttonModels.find('**//InventoryButtonUp')
self.downButton = self.buttonModels.find('**/InventoryButtonDown')
self.rolloverButton = self.buttonModels.find('**/InventoryButtonRollover')
self.clockNode = ToontownTimer()
self.clockNode.setPos(1.1599999999999999, 0, -0.82999999999999996)
self.clockNode.setScale(0.29999999999999999)
self.clockNode.hide()
self.tintConstant = Vec4(0.25, 0.25, 0.25, 0)
self.ghostConstant = Vec4(0, 0, 0, 0.5)
self.knockSound = base.loadSfx('phase_5/audio/sfx/GUI_knock_1.mp3')
self.clickSound = base.loadSfx('phase_3/audio/sfx/GUI_balloon_popup.mp3')
self.moveSound = base.loadSfx('phase_6/audio/sfx/CC_move.mp3')
self.accept('stoppedAsleep', self.handleSleep)
ClassicFSM = ClassicFSM
State = State
import direct.fsm
self.fsm = ClassicFSM.ClassicFSM('ChineseCheckers', [
State.State('waitingToBegin', self.enterWaitingToBegin, self.exitWaitingToBegin, [
'playing',
'gameOver']),
State.State('playing', self.enterPlaying, self.exitPlaying, [
'gameOver']),
State.State('gameOver', self.enterGameOver, self.exitGameOver, [
'waitingToBegin'])], 'waitingToBegin', 'waitingToBegin')
startLoc = self.boardNode.find('**/locators')
self.locatorList = startLoc.getChildren()
self.startingPositions = self.locatorList.pop(0)
self.startingPositions = self.startingPositions.getChildren()
instancePiece = self.boardNode.find('**/pieces')
tempList = []
for x in range(7):
self.startingPositions[x].setTag('StartLocator', '%d' % x)
collNode = CollisionNode('startpicker%d' % x)
collNode.setIntoCollideMask(BitMask32(4096))
tempList.append(self.startingPositions[x].attachNewNode(collNode))
tempList[x].node().addSolid(CollisionTube(0, 0, 0.23000000000000001, 0, 0, -0.23000000000000001, 0.20000000000000001))
for z in self.startingPositions:
y = instancePiece.copyTo(z)
for val in y.getChildren():
val.hide()
tempList = []
for x in range(42):
self.locatorList[x].setTag('GamePeiceLocator', '%d' % x)
collNode = CollisionNode('startpicker%d' % x)
collNode.setIntoCollideMask(BitMask32(4096))
tempList.append(self.locatorList[x].attachNewNode(collNode))
tempList[x].node().addSolid(CollisionSphere(0, 0, 0, 0.20000000000000001))
for z in self.locatorList:
y = instancePiece.copyTo(z)
for val in y.getChildren():
val.hide()
dummyHide = instancePiece.getParent().attachNewNode('DummyHider')
instancePiece.reparentTo(dummyHide)
dummyHide.hide()
def setName(self, name):
self.name = name
def announceGenerate(self):
DistributedNode.DistributedNode.announceGenerate(self)
if self.table.fsm.getCurrentState().getName() != 'observing':
if base.localAvatar.doId in self.table.tableState:
self.seatPos = self.table.tableState.index(base.localAvatar.doId)
if self.seatPos <= 2:
for x in self.startingPositions:
x.setH(0)
for x in self.locatorList:
x.setH(0)
else:
for x in self.startingPositions:
x.setH(180)
for x in self.locatorList:
x.setH(180)
self.moveCameraForGame()
else:
self.seatPos = self.table.seatBumpForObserve
if self.seatPos > 2:
for x in self.startingPositions:
x.setH(180)
for x in self.locatorList:
x.setH(180)
self.moveCameraForGame()
def handleSleep(self, task = None):
if self.fsm.getCurrentState().getName() == 'waitingToBegin':
self.exitButtonPushed()
if task != None:
pass
1
def setTableDoId(self, doId):
self.tableDoId = doId
self.table = self.cr.doId2do[doId]
self.table.setTimerFunc(self.startButtonPushed)
self.fsm.enterInitialState()
self.table.setGameDoId(self.doId)
def disable(self):
DistributedNode.DistributedNode.disable(self)
if self.leaveButton:
self.leaveButton.destroy()
self.leavebutton = None
if self.screenText:
self.screenText.destroy()
self.screenText = None
if self.turnText:
self.turnText.destroy()
self.turnText = None
self.clockNode.stop()
self.clockNode.hide()
self.ignore('mouse1')
self.ignore('stoppedAsleep')
self.fsm = None
taskMgr.remove('playerTurnTask')
def delete(self):
DistributedNode.DistributedNode.delete(self)
self.table.gameDoId = None
self.table.game = None
if self.exitButton:
self.exitButton.destroy()
if self.startButton:
self.startButton.destroy()
self.clockNode.stop()
self.clockNode.hide()
self.table.startButtonPushed = None
self.ignore('mouse1')
self.ignore('stoppedAsleep')
self.fsm = None
self.table = None
self.winningSequence.finish()
taskMgr.remove('playerTurnTask')
def getTimer(self):
self.sendUpdate('requestTimer', [])
def setTimer(self, timerEnd):
if self.fsm.getCurrentState() != None and self.fsm.getCurrentState().getName() == 'waitingToBegin' and not (self.table.fsm.getCurrentState().getName() == 'observing'):
self.clockNode.stop()
time = globalClockDelta.networkToLocalTime(timerEnd)
timeLeft = int(time - globalClock.getRealTime())
if timeLeft > 0 and timerEnd != 0:
if timeLeft > 60:
timeLeft = 60
self.clockNode.setPos(1.1599999999999999, 0, -0.82999999999999996)
self.clockNode.countdown(timeLeft, self.startButtonPushed)
self.clockNode.show()
else:
self.clockNode.stop()
self.clockNode.hide()
def setTurnTimer(self, turnEnd):
if self.fsm.getCurrentState() != None and self.fsm.getCurrentState().getName() == 'playing':
self.clockNode.stop()
time = globalClockDelta.networkToLocalTime(turnEnd)
timeLeft = int(time - globalClock.getRealTime())
if timeLeft > 0:
self.clockNode.setPos(0.64000000000000001, 0, -0.27000000000000002)
self.clockNode.countdown(timeLeft, self.doRandomMove)
self.clockNode.show()
def gameStart(self, playerNum):
if playerNum != 255:
self.playerNum = playerNum
if self.playerNum == 1:
self.playerColorString = 'Red'
else:
self.playerColorString = 'Yellow'
self.moveCameraForGame()
self.fsm.request('playing')
def sendTurn(self, playersTurn):
if self.fsm.getCurrentState().getName() == 'playing':
if playersTurn == self.playerNum:
self.isMyTurn = True
taskMgr.add(self.turnTask, 'playerTurnTask')
self.enableTurnScreenText(playersTurn)
def illegalMove(self):
self.exitButtonPushed()
def moveCameraForGame(self):
if self.table.cameraBoardTrack.isPlaying():
self.table.cameraBoardTrack.pause()
rotation = 0
if self.seatPos <= 2:
position = self.table.seats[1].getPos()
position = position + Vec3(0, -8, 12.800000000000001)
int = LerpPosHprInterval(camera, 2, position, Vec3(0, -38, 0), camera.getPos(), camera.getHpr())
else:
position = self.table.seats[4].getPos()
position = position + Vec3(0, -8, 12.800000000000001)
if camera.getH() < 0:
int = LerpPosHprInterval(camera, 2, position, Vec3(-180, -20, 0), camera.getPos(), camera.getHpr())
else:
int = LerpPosHprInterval(camera, 2, position, Vec3(180, -20, 0), camera.getPos(), camera.getHpr())
int.start()
def enterWaitingToBegin(self):
if self.table.fsm.getCurrentState().getName() != 'observing':
self.enableExitButton()
self.enableStartButton()
def exitWaitingToBegin(self):
if self.exitButton:
self.exitButton.destroy()
self.exitButton = None
if self.startButton:
self.startButton.destroy()
self.exitButton = None
self.clockNode.stop()
self.clockNode.hide()
def enterPlaying(self):
self.inGame = True
self.enableScreenText()
if self.table.fsm.getCurrentState().getName() != 'observing':
self.enableLeaveButton()
def exitPlaying(self):
self.inGame = False
if self.leaveButton:
self.leaveButton.destroy()
self.leavebutton = None
self.playerNum = None
if self.screenText:
self.screenText.destroy()
self.screenText = None
if self.turnText:
self.turnText.destroy()
self.turnText = None
self.clockNode.stop()
self.clockNode.hide()
def enterGameOver(self):
pass
def exitGameOver(self):
pass
def exitWaitCountdown(self):
self._DistributedFindFour__disableCollisions()
self.ignore('trolleyExitButton')
self.clockNode.reset()
def enableExitButton(self):
self.exitButton = DirectButton(relief = None, text = TTLocalizer.ChineseCheckersGetUpButton, text_fg = (1, 1, 0.65000000000000002, 1), text_pos = (0, -0.23000000000000001), text_scale = 0.80000000000000004, image = (self.upButton, self.downButton, self.rolloverButton), image_color = (1, 0, 0, 1), image_scale = (20, 1, 11), pos = (0.92000000000000004, 0, 0.80000000000000004), scale = 0.14999999999999999, command = lambda self = self: self.exitButtonPushed())
def enableScreenText(self):
defaultPos = (-0.69999999999999996, -0.28999999999999998)
if self.playerNum == 1:
message = 'You are Red'
color = Vec4(1, 0, 0, 1)
elif self.playerNum == 2:
message = 'You are Yellow'
color = Vec4(1, 1, 0, 1)
else:
message = TTLocalizer.CheckersObserver
color = Vec4(0, 0, 0, 1)
self.screenText = OnscreenText(text = message, pos = defaultPos, scale = 0.10000000000000001, fg = color, align = TextNode.ACenter, mayChange = 1)
def enableStartButton(self):
self.startButton = DirectButton(relief = None, text = TTLocalizer.ChineseCheckersStartButton, text_fg = (1, 1, 0.65000000000000002, 1), text_pos = (0, -0.23000000000000001), text_scale = 0.59999999999999998, image = (self.upButton, self.downButton, self.rolloverButton), image_color = (1, 0, 0, 1), image_scale = (20, 1, 11), pos = (0.92000000000000004, 0, 0.56999999999999995), scale = 0.14999999999999999, command = lambda self = self: self.startButtonPushed())
def enableLeaveButton(self):
self.leaveButton = DirectButton(relief = None, text = TTLocalizer.ChineseCheckersQuitButton, text_fg = (1, 1, 0.65000000000000002, 1), text_pos = (0, -0.13), text_scale = 0.5, image = (self.upButton, self.downButton, self.rolloverButton), image_color = (1, 0, 0, 1), image_scale = (20, 1, 11), pos = (0.92000000000000004, 0, 0.80000000000000004), scale = 0.14999999999999999, command = lambda self = self: self.exitButtonPushed())
def enableTurnScreenText(self, player):
playerOrder = [
1,
4,
2,
5,
3,
6]
message1 = TTLocalizer.CheckersIts
if self.turnText != None:
self.turnText.destroy()
if player == self.playerNum:
message2 = TTLocalizer.ChineseCheckersYourTurn
color = (0, 0, 0, 1)
elif player == 1:
message2 = "Red's Turn"
color = (1, 0, 0, 1)
elif player == 2:
message2 = "Yellow's Turn"
color = (1, 1, 0, 1)
self.turnText = OnscreenText(text = message1 + message2, pos = (-0.69999999999999996, -0.39000000000000001), scale = 0.091999999999999998, fg = color, align = TextNode.ACenter, mayChange = 1)
def startButtonPushed(self):
self.sendUpdate('requestBegin')
self.startButton.hide()
self.clockNode.stop()
self.clockNode.hide()
def exitButtonPushed(self):
self.fsm.request('gameOver')
self.table.fsm.request('off')
self.clockNode.stop()
self.clockNode.hide()
self.table.sendUpdate('requestExit')
def mouseClick(self):
messenger.send('wakeup')
if self.isMyTurn == True and self.inGame == True and not self.moveSequence.isPlaying():
if self.moveCol != None:
self.d_requestMove(self.moveCol)
self.moveCol = None
self.isMyTurn = False
taskMgr.remove('playerTurnTask')
def handleClicked(self, index):
pass
def turnTask(self, task):
if base.mouseWatcherNode.hasMouse() == False:
return task.cont
if self.isMyTurn == False:
return task.cont
if self.moveSequence.isPlaying():
return task.cont
mpos = base.mouseWatcherNode.getMouse()
self.pickerRay.setFromLens(base.camNode, mpos.getX(), mpos.getY())
self.traverser.traverse(render)
if self.myHandler.getNumEntries() > 0:
self.myHandler.sortEntries()
pickedObj = self.myHandler.getEntry(0).getIntoNodePath()
pickedObj = pickedObj.getNetTag('StartLocator')
if pickedObj:
colVal = int(pickedObj)
if colVal == self.moveCol:
return task.cont
if self.board[0][colVal] == 0:
if self.moveCol != None:
for x in self.startingPositions[self.moveCol].getChild(1).getChildren():
x.hide()
self.moveCol = colVal
if self.playerNum == 1:
self.startingPositions[self.moveCol].getChild(1).getChild(2).show()
elif self.playerNum == 2:
self.startingPositions[self.moveCol].getChild(1).getChild(3).show()
return task.cont
def d_requestMove(self, moveCol):
self.sendUpdate('requestMove', [
moveCol])
def setGameState(self, tableState, moveCol, movePos, turn):
messenger.send('wakeup')
if self.table.fsm.getCurrentState().getName() == 'observing':
isBlank = True
for x in range(7):
if self.board[5][x] != 0:
isBlank = False
break
continue
gameBlank = True
for x in range(7):
if tableState[5][x] != 0:
gameBlank = False
break
continue
if isBlank == True and gameBlank == False:
for x in range(6):
for y in range(7):
self.board[x][y] = tableState[x][y]
self.updateGameState()
return None
if moveCol == 0 and movePos == 0 and turn == 0:
for x in range(6):
for y in range(7):
self.board[x][y] = tableState[x][y]
self.updateGameState()
else:
self.animatePeice(tableState, moveCol, movePos, turn)
didIWin = self.checkForWin()
if didIWin != None:
self.sendUpdate('requestWin', [
didIWin])
def updateGameState(self):
for x in range(6):
for y in range(7):
for z in self.locatorList[x * 7 + y].getChild(1).getChildren():
z.hide()
for x in range(6):
for y in range(7):
state = self.board[x][y]
if state == 1:
self.locatorList[x * 7 + y].getChild(1).getChild(0).show()
continue
if state == 2:
self.locatorList[x * 7 + y].getChild(1).getChild(1).show()
continue
def checkForWin(self):
for x in range(6):
for y in range(7):
if self.board[x][y] == self.playerNum:
if self.checkHorizontal(x, y, self.playerNum) == True:
return [
x,
y]
elif self.checkVertical(x, y, self.playerNum) == True:
return [
x,
y]
elif self.checkDiagonal(x, y, self.playerNum) == True:
return [
x,
y]
self.checkHorizontal(x, y, self.playerNum) == True
def announceWinnerPosition(self, x, y, winDirection, playerNum):
self.isMyturn = False
if self.turnText:
self.turnText.hide()
self.clockNode.stop()
self.clockNode.hide()
if winDirection == 0:
blinkList = self.findHorizontal(x, y, playerNum)
elif winDirection == 1:
blinkList = self.findVertical(x, y, playerNum)
elif winDirection == 2:
blinkList = self.findDiagonal(x, y, playerNum)
if blinkList != []:
print blinkList
val0 = x * 7 + y
x = blinkList[0][0]
y = blinkList[0][1]
val1 = x * 7 + y
x = blinkList[1][0]
y = blinkList[1][1]
val2 = x * 7 + y
x = blinkList[2][0]
y = blinkList[2][1]
val3 = x * 7 + y
self.winningSequence = Sequence()
downBlinkerParallel = Parallel(LerpColorInterval(self.locatorList[val0], 0.29999999999999999, Vec4(0.5, 0.5, 0.5, 0.5), Vec4(1, 1, 1, 1)), LerpColorInterval(self.locatorList[val1], 0.29999999999999999, Vec4(0.5, 0.5, 0.5, 0.5), Vec4(1, 1, 1, 1)), LerpColorInterval(self.locatorList[val2], 0.29999999999999999, Vec4(0.5, 0.5, 0.5, 0.5), Vec4(1, 1, 1, 1)), LerpColorInterval(self.locatorList[val3], 0.29999999999999999, Vec4(0.5, 0.5, 0.5, 0.5), Vec4(1, 1, 1, 1)))
upBlinkerParallel = Parallel(LerpColorInterval(self.locatorList[val0], 0.29999999999999999, Vec4(1, 1, 1, 1), Vec4(0.5, 0.5, 0.5, 0.5)), LerpColorInterval(self.locatorList[val1], 0.29999999999999999, Vec4(1, 1, 1, 1), Vec4(0.5, 0.5, 0.5, 0.5)), LerpColorInterval(self.locatorList[val2], 0.29999999999999999, Vec4(1, 1, 1, 1), Vec4(0.5, 0.5, 0.5, 0.5)), LerpColorInterval(self.locatorList[val3], 0.29999999999999999, Vec4(1, 1, 1, 1), Vec4(0.5, 0.5, 0.5, 0.5)))
self.winningSequence.append(downBlinkerParallel)
self.winningSequence.append(upBlinkerParallel)
self.winningSequence.loop()
def tie(self):
self.tieSequence = Sequence(autoFinish = 1)
self.clockNode.stop()
self.clockNode.hide()
self.isMyTurn = False
self.moveSequence.finish()
if self.turnText:
self.turnText.hide()
for x in range(41):
self.tieSequence.append(Parallel(LerpColorInterval(self.locatorList[x], 0.14999999999999999, Vec4(0.5, 0.5, 0.5, 0.5), Vec4(1, 1, 1, 1)), LerpColorInterval(self.locatorList[x], 0.14999999999999999, Vec4(1, 1, 1, 1), Vec4(0.5, 0.5, 0.5, 0.5))))
whisper = WhisperPopup('This Find Four game has resulted in a Tie!', OTPGlobals.getInterfaceFont(), WhisperPopup.WTNormal)
whisper.manage(base.marginManager)
self.tieSequence.start()
def hideChildren(self, nodeList):
pass
def animatePeice(self, tableState, moveCol, movePos, turn):
messenger.send('wakeup')
for x in range(6):
for y in range(7):
self.board[x][y] = tableState[x][y]
pos = self.startingPositions[moveCol].getPos()
if turn == 0:
peice = self.startingPositions[moveCol].getChild(1).getChildren()[2]
peice.show()
elif turn == 1:
peice = self.startingPositions[moveCol].getChild(1).getChildren()[3]
peice.show()
self.moveSequence = Sequence()
startPos = self.startingPositions[moveCol].getPos()
arrayLoc = movePos * 7 + moveCol
self.moveSequence.append(LerpPosInterval(self.startingPositions[moveCol], 1.5, self.locatorList[arrayLoc].getPos(self), startPos))
self.moveSequence.append(Func(peice.hide))
self.moveSequence.append(Func(self.startingPositions[moveCol].setPos, startPos))
self.moveSequence.append(Func(self.updateGameState))
self.moveSequence.start()
def announceWin(self, avId):
self.fsm.request('gameOver')
def doRandomMove(self):
if self.isMyTurn:
if self.moveCol != None:
self.d_requestMove(self.moveCol)
self.moveCol = None
self.isMyTurn = False
taskMgr.remove('playerTurnTask')
else:
hasfound = False
while hasfound == False:
from random import *
x = randint(0, 6)
if self.board[0][x] == 0:
self.d_requestMove(x)
self.moveCol = None
self.isMyTurn = False
taskMgr.remove('playerTurnTask')
hasfound = True
continue
def doNothing(self):
pass
def checkHorizontal(self, rVal, cVal, playerNum):
if cVal == 3:
for x in range(1, 4):
if self.board[rVal][cVal - x] != playerNum:
break
if self.board[rVal][cVal - x] == playerNum and x == 3:
return True
continue
for x in range(1, 4):
if self.board[rVal][cVal + x] != playerNum:
break
if self.board[rVal][cVal + x] == playerNum and x == 3:
return True
continue
return False
elif cVal == 2:
for x in range(1, 4):
if self.board[rVal][cVal + x] != playerNum:
break
if self.board[rVal][cVal + x] == playerNum and x == 3:
return True
continue
return False
elif cVal == 4:
for x in range(1, 4):
if self.board[rVal][cVal - x] != playerNum:
break
if self.board[rVal][cVal - x] == playerNum and x == 3:
return True
continue
return False
else:
return False
def checkVertical(self, rVal, cVal, playerNum):
if rVal == 2:
for x in range(1, 4):
if self.board[rVal + x][cVal] != playerNum:
break
if self.board[rVal + x][cVal] == playerNum and x == 3:
return True
continue
return False
elif rVal == 3:
for x in range(1, 4):
if self.board[rVal - x][cVal] != playerNum:
break
if self.board[rVal - x][cVal] == playerNum and x == 3:
return True
continue
return False
else:
return False
def checkDiagonal(self, rVal, cVal, playerNum):
if cVal <= 2:
if rVal == 2:
for x in range(1, 4):
if self.board[rVal + x][cVal + x] != playerNum:
break
if self.board[rVal + x][cVal + x] == playerNum and x == 3:
return True
continue
return False
elif rVal == 3:
for x in range(1, 4):
if self.board[rVal - x][cVal + x] != playerNum:
break
if self.board[rVal - x][cVal + x] == playerNum and x == 3:
return True
continue
return False
elif cVal >= 4:
if rVal == 2:
for x in range(1, 4):
if self.board[rVal + x][cVal - x] != playerNum:
break
if self.board[rVal + x][cVal - x] == playerNum and x == 3:
return True
continue
return False
elif rVal == 3:
for x in range(1, 4):
if self.board[rVal - x][cVal - x] != playerNum:
break
if self.board[rVal - x][cVal - x] == playerNum and x == 3:
return True
continue
return False
elif rVal == 3 and rVal == 4 or rVal == 5:
for x in range(1, 4):
if self.board[rVal - x][cVal - x] != playerNum:
break
if self.board[rVal - x][cVal - x] == playerNum and x == 3:
return True
continue
for x in range(1, 4):
if self.board[rVal - x][cVal - x] != playerNum:
break
if self.board[rVal - x][cVal - x] == playerNum and x == 3:
return True
continue
return False
elif rVal == 0 and rVal == 1 or rVal == 2:
for x in range(1, 4):
if self.board[rVal + x][cVal - x] != playerNum:
break
if self.board[rVal + x][cVal - x] == playerNum and x == 3:
return True
continue
for x in range(1, 4):
if self.board[rVal + x][cVal + x] != playerNum:
break
if self.board[rVal + x][cVal + x] == playerNum and x == 3:
return True
continue
return False
return False
def findHorizontal(self, rVal, cVal, playerNum):
if cVal == 3:
retList = []
for x in range(1, 4):
retList.append([
rVal,
cVal - x])
if self.board[rVal][cVal - x] != playerNum:
retList = []
break
if self.board[rVal][cVal - x] == playerNum and x == 3:
return retList
continue
for x in range(1, 4):
retList.append([
rVal,
cVal + x])
if self.board[rVal][cVal + x] != playerNum:
retList = []
break
if self.board[rVal][cVal + x] == playerNum and x == 3:
return retList
continue
return []
elif cVal == 2:
retList = []
for x in range(1, 4):
retList.append([
rVal,
cVal + x])
if self.board[rVal][cVal + x] != playerNum:
retList = []
break
if self.board[rVal][cVal + x] == playerNum and x == 3:
return retList
continue
return []
elif cVal == 4:
retList = []
for x in range(1, 4):
retList.append([
rVal,
cVal - x])
if self.board[rVal][cVal - x] != playerNum:
retList = []
break
if self.board[rVal][cVal - x] == playerNum and x == 3:
return retList
continue
return []
else:
return []
def findVertical(self, rVal, cVal, playerNum):
if rVal == 2:
retList = []
for x in range(1, 4):
retList.append([
rVal + x,
cVal])
if self.board[rVal + x][cVal] != playerNum:
retList = []
break
if self.board[rVal + x][cVal] == playerNum and x == 3:
return retList
continue
return []
elif rVal == 3:
retList = []
for x in range(1, 4):
retList.append([
rVal - x,
cVal])
if self.board[rVal - x][cVal] != playerNum:
retList = []
break
if self.board[rVal - x][cVal] == playerNum and x == 3:
return retList
continue
return []
else:
return []
def findDiagonal(self, rVal, cVal, playerNum):
retList = []
if cVal <= 2:
if rVal == 2:
for x in range(1, 4):
retList.append([
rVal + x,
cVal + x])
if self.board[rVal + x][cVal + x] != playerNum:
retList = []
break
if self.board[rVal + x][cVal + x] == playerNum and x == 3:
return retList
continue
return []
elif rVal == 3:
for x in range(1, 4):
retList.append([
rVal - x,
cVal + x])
if self.board[rVal - x][cVal + x] != playerNum:
retList = []
break
if self.board[rVal - x][cVal + x] == playerNum and x == 3:
return retList
continue
return []
elif cVal >= 4:
if rVal == 2:
for x in range(1, 4):
retList.append([
rVal + x,
cVal - x])
if self.board[rVal + x][cVal - x] != playerNum:
retList = []
break
if self.board[rVal + x][cVal - x] == playerNum and x == 3:
return retList
continue
return []
elif rVal == 3:
for x in range(1, 4):
retList.append([
rVal - x,
cVal - x])
if self.board[rVal - x][cVal - x] != playerNum:
retList = []
break
if self.board[rVal - x][cVal - x] == playerNum and x == 3:
return retList
continue
return []
elif rVal == 3 and rVal == 4 or rVal == 5:
for x in range(1, 4):
retList.append([
rVal - x,
cVal - x])
if self.board[rVal - x][cVal - x] != playerNum:
retList = []
break
if self.board[rVal - x][cVal - x] == playerNum and x == 3:
return retList
continue
for x in range(1, 4):
retList.append([
rVal + x,
cVal - x])
if self.board[rVal + x][cVal - x] != playerNum:
retList = []
break
if self.board[rVal + x][cVal - x] == playerNum and x == 3:
return retList
continue
return []
elif rVal == 0 and rVal == 1 or rVal == 2:
for x in range(1, 4):
retList.append([
rVal + x,
cVal - x])
if self.board[rVal + x][cVal - x] != playerNum:
retList = []
break
if self.board[rVal + x][cVal - x] == playerNum and x == 3:
return retList
continue
for x in range(1, 4):
retList.append([
rVal + x,
cVal + x])
if self.board[rVal + x][cVal + x] != playerNum:
retList = []
break
if self.board[rVal + x][cVal + x] == playerNum and x == 3:
return retList
continue
return []
return []
| [
"[email protected]"
] | |
5477dcd8e308ebb2b6dc85d43bc6177fb264a20c | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /third_party/blink/web_tests/external/wpt/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py | 5719a859fa4bc7e4ab4d1e9329ca74b2af6666f7 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-w3c-03-bsd-license",
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft"
] | permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 11,631 | py | import operator
from ..node import NodeVisitor, DataNode, ConditionalNode, KeyValueNode, ListNode, ValueNode
from ..parser import parse
class ConditionalValue(object):
def __init__(self, node, condition_func):
self.node = node
self.condition_func = condition_func
if isinstance(node, ConditionalNode):
assert len(node.children) == 2
self.condition_node = self.node.children[0]
self.value_node = self.node.children[1]
else:
assert isinstance(node, (ValueNode, ListNode))
self.condition_node = None
self.value_node = self.node
@property
def value(self):
if isinstance(self.value_node, ValueNode):
return self.value_node.data
else:
return [item.data for item in self.value_node.children]
@value.setter
def value(self, value):
if isinstance(self.value_node, ValueNode):
self.value_node.data = value
else:
assert(isinstance(self.value_node, ListNode))
while self.value_node.children:
self.value_node.children[0].remove()
assert len(self.value_node.children) == 0
for list_value in value:
self.value_node.append(ValueNode(list_value))
def __call__(self, run_info):
return self.condition_func(run_info)
def set_value(self, value):
if type(value) not in (str, unicode):
value = unicode(value)
self.value = value
def value_as(self, type_func):
"""Get value and convert to a given type.
This is unfortunate, but we don't currently have a good way to specify that
specific properties should have their data returned as specific types"""
value = self.value
if type_func is not None:
value = type_func(value)
return value
def remove(self):
if len(self.node.parent.children) == 1:
self.node.parent.remove()
self.node.remove()
class Compiler(NodeVisitor):
def compile(self, tree, data_cls_getter=None, **kwargs):
"""Compile a raw AST into a form where conditional expressions
are represented by ConditionalValue objects that can be evaluated
at runtime.
tree - The root node of the wptmanifest AST to compile
data_cls_getter - A function taking two parameters; the previous
output node and the current ast node and returning
the class of the output node to use for the current
ast node
"""
if data_cls_getter is None:
self.data_cls_getter = lambda x, y: ManifestItem
else:
self.data_cls_getter = data_cls_getter
self.tree = tree
self.output_node = self._initial_output_node(tree, **kwargs)
self.visit(tree)
if hasattr(self.output_node, "set_defaults"):
self.output_node.set_defaults()
assert self.output_node is not None
return self.output_node
def compile_condition(self, condition):
"""Compile a ConditionalNode into a ConditionalValue.
condition: A ConditionalNode"""
data_node = DataNode()
key_value_node = KeyValueNode()
key_value_node.append(condition.copy())
data_node.append(key_value_node)
manifest_item = self.compile(data_node)
return manifest_item._data[None][0]
def _initial_output_node(self, node, **kwargs):
return self.data_cls_getter(None, None)(node, **kwargs)
def visit_DataNode(self, node):
if node != self.tree:
output_parent = self.output_node
self.output_node = self.data_cls_getter(self.output_node, node)(node)
else:
output_parent = None
assert self.output_node is not None
for child in node.children:
self.visit(child)
if output_parent is not None:
# Append to the parent *after* processing all the node data
output_parent.append(self.output_node)
self.output_node = self.output_node.parent
assert self.output_node is not None
def visit_KeyValueNode(self, node):
key_values = []
for child in node.children:
condition, value = self.visit(child)
key_values.append(ConditionalValue(child, condition))
self.output_node._add_key_value(node, key_values)
def visit_ListNode(self, node):
return (lambda x:True, [self.visit(child) for child in node.children])
def visit_ValueNode(self, node):
return (lambda x: True, node.data)
def visit_AtomNode(self, node):
return (lambda x: True, node.data)
def visit_ConditionalNode(self, node):
return self.visit(node.children[0]), self.visit(node.children[1])
def visit_StringNode(self, node):
indexes = [self.visit(child) for child in node.children]
def value(x):
rv = node.data
for index in indexes:
rv = rv[index(x)]
return rv
return value
def visit_NumberNode(self, node):
if "." in node.data:
return lambda x: float(node.data)
else:
return lambda x: int(node.data)
def visit_VariableNode(self, node):
indexes = [self.visit(child) for child in node.children]
def value(x):
data = x[node.data]
for index in indexes:
data = data[index(x)]
return data
return value
def visit_IndexNode(self, node):
assert len(node.children) == 1
return self.visit(node.children[0])
def visit_UnaryExpressionNode(self, node):
assert len(node.children) == 2
operator = self.visit(node.children[0])
operand = self.visit(node.children[1])
return lambda x: operator(operand(x))
def visit_BinaryExpressionNode(self, node):
assert len(node.children) == 3
operator = self.visit(node.children[0])
operand_0 = self.visit(node.children[1])
operand_1 = self.visit(node.children[2])
assert operand_0 is not None
assert operand_1 is not None
return lambda x: operator(operand_0(x), operand_1(x))
def visit_UnaryOperatorNode(self, node):
return {"not": operator.not_}[node.data]
def visit_BinaryOperatorNode(self, node):
return {"and": operator.and_,
"or": operator.or_,
"==": operator.eq,
"!=": operator.ne}[node.data]
class ManifestItem(object):
def __init__(self, node=None, **kwargs):
self.node = node
self.parent = None
self.children = []
self._data = {}
def __repr__(self):
return "<conditional.ManifestItem %s>" % (self.node.data)
def __str__(self):
rv = [repr(self)]
for item in self.children:
rv.extend(" %s" % line for line in str(item).split("\n"))
return "\n".join(rv)
def __contains__(self, key):
return key in self._data
@property
def is_empty(self):
if self._data:
return False
return all(child.is_empty for child in self.children)
@property
def root(self):
node = self
while node.parent is not None:
node = node.parent
return node
@property
def name(self):
return self.node.data
def has_key(self, key):
for node in [self, self.root]:
if key in node._data:
return True
return False
def get(self, key, run_info=None):
if run_info is None:
run_info = {}
for node in [self, self.root]:
if key in node._data:
for cond_value in node._data[key]:
try:
matches = cond_value(run_info)
except KeyError:
matches = False
if matches:
return cond_value.value
raise KeyError
def set(self, key, value, condition=None):
# First try to update the existing value
if key in self._data:
cond_values = self._data[key]
for cond_value in cond_values:
if cond_value.condition_node == condition:
cond_value.value = value
return
# If there isn't a conditional match reuse the existing KeyValueNode as the
# parent
node = None
for child in self.node.children:
if child.data == key:
node = child
break
assert node is not None
else:
node = KeyValueNode(key)
self.node.append(node)
if isinstance(value, list):
value_node = ListNode()
for item in value:
value_node.append(ValueNode(unicode(item)))
else:
value_node = ValueNode(unicode(value))
if condition is not None:
conditional_node = ConditionalNode()
conditional_node.append(condition)
conditional_node.append(value_node)
node.append(conditional_node)
cond_value = Compiler().compile_condition(conditional_node)
else:
node.append(value_node)
cond_value = ConditionalValue(value_node, lambda x: True)
# Update the cache of child values. This is pretty annoying and maybe
# it should just work directly on the tree
if key not in self._data:
self._data[key] = []
if self._data[key] and self._data[key][-1].condition_node is None:
self._data[key].insert(len(self._data[key]) - 1, cond_value)
else:
self._data[key].append(cond_value)
def _add_key_value(self, node, values):
"""Called during construction to set a key-value node"""
self._data[node.data] = values
def append(self, child):
self.children.append(child)
child.parent = self
if child.node.parent != self.node:
self.node.append(child.node)
return child
def remove(self):
if self.parent:
self.parent._remove_child(self)
def _remove_child(self, child):
self.children.remove(child)
child.parent = None
def iterchildren(self, name=None):
for item in self.children:
if item.name == name or name is None:
yield item
def _flatten(self):
rv = {}
for node in [self, self.root]:
for name, value in node._data.iteritems():
if name not in rv:
rv[name] = value
return rv
def iteritems(self):
for item in self._flatten().iteritems():
yield item
def iterkeys(self):
for item in self._flatten().iterkeys():
yield item
def remove_value(self, key, value):
if key not in self._data:
return
try:
self._data[key].remove(value)
except ValueError:
return
if not self._data[key]:
del self._data[key]
value.remove()
def compile_ast(ast, data_cls_getter=None, **kwargs):
return Compiler().compile(ast, data_cls_getter=data_cls_getter, **kwargs)
def compile(stream, data_cls_getter=None, **kwargs):
return compile_ast(parse(stream),
data_cls_getter=data_cls_getter,
**kwargs)
| [
"[email protected]"
] | |
b2b4971f6f115b35ab3d38e85042808deb3e4102 | 5182897b2f107f4fd919af59c6762d66c9be5f1d | /.history/src/Simulador_20200708111002.py | e4355d8041f85f51b628df0e95950022e25c13c9 | [
"MIT"
] | permissive | eduardodut/Trabalho_final_estatistica_cd | 422b7e702f96291f522bcc68d2e961d80d328c14 | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | refs/heads/master | 2022-11-23T03:14:05.493054 | 2020-07-16T23:49:26 | 2020-07-16T23:49:26 | 277,867,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,326 | py | import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo
import random
from itertools import permutations
class Simulador():
def __init__(
self,
tamanho_matriz, #numero de linhas e colunas da matriz esférica
percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1
percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.num_atualizacoes = 0
self.individuos_infectados_tipo_2 = []
self.individuos_infectados_tipo_1 = []
self.individuos_infectados_curados = []
self.individuos_infectados_mortos = []
self.matriz_individuos = np.zeros([tamanho_matriz,tamanho_matriz])
self.fabrica_individuo = Fabrica_individuo(
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,
atualizacoes_cura)
#objeto que é responsável por validar a movimentação no grid n x n
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
self.populacao_inicial = int(tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
#dataframe que guardará os resultados de cada atualização
self.dataframe = pd.DataFrame(dict, index = [0])
self.popular(tamanho_matriz)
def popular(self, tamanho_matriz):
#lista de possíveis combinações de índices da matriz de dados
permutacoes = permutations(list(range(tamanho_matriz)),2)
lista_indices = list(permutacoes)
random.shuffle(lista_indices)
#cria o primeiro tipo1:
self.indices_infectados_tipo_1.append(lista_indices[0])
indiv = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(lista_indices[0][0], lista_indices[0][1])
self.individuos_infectados_tipo_1.append(indiv)
#cria o restante dos tipos 1
for i in range(1,self.num_inicial_tipo1):
pass
self.matriz_individuos[lista_indices[0][0], lista_indices[0][1]] = )
#cria o restante dos tipo 2:
for indice in lista_indices[1:self.num_inicial_tipo2-2]:
print(indice)
#cria os tipo1:
#cria a população saudável:
for i in lista_indices[0:]:
print(i)
class Fabrica_individuo():
def __init__(
self,
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.chance_infeccao = chance_infeccao
self.chance_infeccao_tipo2 = chance_infeccao_tipo2
self.chance_morte = chance_morte
self.atualizacoes_cura = atualizacoes_cura
def criar_individuo(self, status_inicial, posicao):
return Individuo(
status_inicial,
self.chance_infeccao,
self.chance_infeccao_tipo2,
self.chance_morte,
self.atualizacoes_cura,
posicao)
chance_infeccao = 0.3
chance_infeccao_tipo2 = 0.2
chance_morte = 0.2
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0.05
percentual_inicial_tipo2 = 0.01
sim = Simulador(
1000,
1,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
ind = sim.fabrica_individuo.criar_individuo(Individuo.MORTO, (0,0))
dict = {'num_sadios':1,
'num_infect_t1':2,
'num_infect_t2':3,
'num_curados':4,
'num_mortos':5}
s = pd.Series(dict)
sim.dataframe = sim.dataframe.append(s, ignore_index=True)
print(sim.dataframe)
#print(sim.num_inicial_tipo2)
| [
"[email protected]"
] | |
00939f6d74f528a23ab0e586d52057ea17789070 | ffadf9541d01cf9af20c419759d48b1eb01bfd35 | /pachong/PCdemo1/day15/股市行情定点爬取.py | 842ff30d11cf43f28fc448cc022cde009269ba8e | [] | no_license | 1987617587/lsh_py | b1bb1016eaafcba03bbc4a5310c1db04ae227af4 | 80eb5175cd0e5b3c6c5e2ebb906bb78d9a8f9e0d | refs/heads/master | 2021-01-02T05:14:31.330287 | 2020-06-20T05:18:23 | 2020-06-20T05:18:23 | 239,498,994 | 2 | 1 | null | 2020-06-07T23:09:56 | 2020-02-10T11:46:47 | Python | UTF-8 | Python | false | false | 5,975 | py | # author:lsh
# datetime:2020/4/13 19:56
'''
.::::. _oo0oo_
.::::::::. o8888888o
::::::::::: 88" . "88
..:::::::::::' (| -_- |)
'::::::::::::' 0\ = /0
.:::::::::: ___/`---'\___
'::::::::::::::.. .' \\| |# '.
..::::::::::::. / \\||| : |||# \
``:::::::::::::::: / _||||| -:- |||||- \
::::``:::::::::' .:::. | | \\\ - #/ | |
::::' ':::::' .::::::::. | \_| ''\---/'' |_/ |
.::::' :::: .:::::::'::::. \ .-\__ '-' ___/-. /
.:::' ::::: .:::::::::' ':::::. ___'. .' /--.--\ `. .'___
.::' :::::.:::::::::' ':::::. ."" '< `.___\_<|>_/___.' >' "".
.::' ::::::::::::::' ``::::. | | : `- \`.;`\ _ /`;.`/ - ` : | |
...::: ::::::::::::' ``::. \ \ `_. \_ __\ /__ _/ .-` / /
```` ':. ':::::::::' ::::.. `-.____`.___ \_____/___.-`___.-'
'.:::::' ':'````.. `=---='
女神保佑 永无BUG 佛祖保佑 永无BUG
'''
from celery import Celery
from celery.schedules import crontab
import requests
import demjson
import pymysql
import time
import random
import math
import re
uri = 'redis://@127.0.0.1:6379/7'
app = Celery('tasks', broker=uri)
# 每天下午15:30执行
c1 = crontab(minute=30, hour=15)
@app.task
def goto_request(count_url):
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='123456', db='py1911')
cur = conn.cursor()
# count_url = 'http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCount'
data_url = 'http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData'
type_ls = ['sh_a', 'sh_b', 'sz_a', 'sz_b', 'sh_z', 'sz_z']
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
}
pat_1 = re.compile(r'(\d+)')
size = 40
for type in type_ls:
# 请求指定类别股票数量
param1 = {
'data': type
}
html = requests.get(count_url, params=param1, headers=headers).text
count = int(pat_1.search(html).group(1))
page_count = math.ceil(count / size)
print('count:', count, 'page_count:', page_count)
# 请求不同类别不同页码的股票信息
for page in range(1, page_count + 1):
param2 = {
'page': page,
'num': 40,
'sort': 'symbol',
'asc': 1,
'data': type,
'symbol': '',
'_s_r_a': 'init',
}
print('type:', type, 'page:', page)
html = requests.get(data_url, params=param2, headers=headers).text
# print(html)
ls = demjson.decode(html)
for each in ls:
symbol = each['symbol']
print('symbol:', symbol)
code = each['code']
print(f'code:{code}')
name = each['name']
print('name:', name)
trade = each['trade']
print('trade:', trade)
pricechange = each['pricechange']
print('pricechange:', pricechange)
changepercent = each['changepercent']
print('changepercent:', changepercent)
buy = each['buy']
print('buy:', buy)
sell = each['sell']
print('sell:', sell)
settlement = each['settlement']
print(f'settlement:{settlement}')
open = each['open']
print('open:', open)
high = each['high']
print('high:', high)
low = each['low']
print('low:', low)
volume = each['volume']
print('volume:', volume)
amount = each['amount']
print('amount:', amount)
ticktime = each['ticktime']
print('ticktime:', ticktime)
print('=' * 200)
strsql = 'insert into finance VALUES(0,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
params = [symbol, code, name, trade, pricechange, changepercent, buy, sell, settlement, open, high, low]
cur.execute(strsql, params)
conn.commit()
time.sleep(random.random())
cur.close()
conn.close()
return '爬取成功'
app.conf.beat_schedule = {
'send-every-15-hours': {
# 指定任务明
'task': 'tasks.goto_request',
# 定时时间
'schedule': 30.0,
# 'schedule':c1,
#传递任务函数需要的参数
'args': ('http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeStockCount',)
},
}
| [
"[email protected]"
] | |
59fecb3751deccbc1893af6d99c785ffb06deb69 | 7fae887c73cc6df8de886897114eb07c28674fa7 | /cs101/unit27/27_4.py | 7f5cedd2fc201fd3745d03708f744ca64942ee95 | [] | no_license | thinkreed/pyf | 24960e3523be3c2e1661608f60f866a5cd7e747f | 04f5e7f46aa95719f848bb64a512458895147da3 | refs/heads/master | 2022-12-10T17:43:49.709946 | 2019-06-13T01:13:00 | 2019-06-13T01:13:00 | 109,550,040 | 2 | 0 | null | 2022-12-08T05:14:52 | 2017-11-05T03:53:51 | Python | UTF-8 | Python | false | false | 972 | py | english = {1: "January", 2: "February", 3: "March", 4: "April", 5: "May",
6: "June", 7: "July", 8: "August", 9: "September", 10: "October",
11: "November", 12: "December"}
swedish = {1: "januari", 2: "februari", 3: "mars", 4: "april", 5: "maj",
6: "juni", 7: "juli", 8: "augusti", 9: "september", 10: "oktober",
11: "november", 12: "december"}
def date_converter(month_dictionary, date):
start = date.find('/')
month = month_dictionary[int(date[:start])]
end = date.find('/', start + 1)
day = date[start + 1:end]
year = date[end + 1:]
return day + ' ' + month + ' ' + year
def date_converter2(month_dictionary, date):
month, day, year = date.split('/')
return day + ' ' + month_dictionary[int(month)] + ' ' + year
print(date_converter(english, '5/11/2012'))
print(date_converter(english, '5/11/12'))
print(date_converter(swedish, '5/11/2012'))
print(date_converter2(swedish, '12/5/1791'))
| [
"[email protected]"
] | |
9ec06f7e2751cf047aecded3a7687a957892776e | ce083128fa87ca86c65059893aa8882d088461f5 | /python/flask-mail-labs/.venv/lib/python2.7/site-packages/IPython/terminal/interactiveshell.py | 4119b87698d99d50b6b6c44d195e21abaf2e3faa | [] | no_license | marcosptf/fedora | 581a446e7f81d8ae9a260eafb92814bc486ee077 | 359db63ff1fa79696b7bc803bcfa0042bff8ab44 | refs/heads/master | 2023-04-06T14:53:40.378260 | 2023-03-26T00:47:52 | 2023-03-26T00:47:52 | 26,059,824 | 6 | 5 | null | 2022-12-08T00:43:21 | 2014-11-01T18:48:56 | null | UTF-8 | Python | false | false | 32,119 | py | # -*- coding: utf-8 -*-
"""Subclass of InteractiveShell for terminal based frontends."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import bdb
import os
import sys
from IPython.core.error import TryNext, UsageError
from IPython.core.usage import interactive_usage
from IPython.core.inputsplitter import IPythonInputSplitter, ESC_MAGIC
from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.lib.clipboard import ClipboardEmpty
from IPython.utils.contexts import NoOpContext
from IPython.utils.decorators import undoc
from IPython.utils.encoding import get_stream_enc
from IPython.utils import py3compat
from IPython.utils.terminal import toggle_set_term_title, set_term_title
from IPython.utils.process import abbrev_cwd
from IPython.utils.warn import warn, error
from IPython.utils.text import num_ini_spaces, SList, strip_email_quotes
from traitlets import Integer, CBool, Unicode
def get_default_editor():
try:
ed = os.environ['EDITOR']
if not py3compat.PY3:
ed = ed.decode()
return ed
except KeyError:
pass
except UnicodeError:
warn("$EDITOR environment variable is not pure ASCII. Using platform "
"default editor.")
if os.name == 'posix':
return 'vi' # the only one guaranteed to be there!
else:
return 'notepad' # same in Windows!
def get_pasted_lines(sentinel, l_input=py3compat.input, quiet=False):
""" Yield pasted lines until the user enters the given sentinel value.
"""
if not quiet:
print("Pasting code; enter '%s' alone on the line to stop or use Ctrl-D." \
% sentinel)
prompt = ":"
else:
prompt = ""
while True:
try:
l = py3compat.str_to_unicode(l_input(prompt))
if l == sentinel:
return
else:
yield l
except EOFError:
print('<EOF>')
return
@undoc
def no_op(*a, **kw): pass
class ReadlineNoRecord(object):
"""Context manager to execute some code, then reload readline history
so that interactive input to the code doesn't appear when pressing up."""
def __init__(self, shell):
self.shell = shell
self._nested_level = 0
def __enter__(self):
if self._nested_level == 0:
try:
self.orig_length = self.current_length()
self.readline_tail = self.get_readline_tail()
except (AttributeError, IndexError): # Can fail with pyreadline
self.orig_length, self.readline_tail = 999999, []
self._nested_level += 1
def __exit__(self, type, value, traceback):
self._nested_level -= 1
if self._nested_level == 0:
# Try clipping the end if it's got longer
try:
e = self.current_length() - self.orig_length
if e > 0:
for _ in range(e):
self.shell.readline.remove_history_item(self.orig_length)
# If it still doesn't match, just reload readline history.
if self.current_length() != self.orig_length \
or self.get_readline_tail() != self.readline_tail:
self.shell.refill_readline_hist()
except (AttributeError, IndexError):
pass
# Returning False will cause exceptions to propagate
return False
def current_length(self):
return self.shell.readline.get_current_history_length()
def get_readline_tail(self, n=10):
"""Get the last n items in readline history."""
end = self.shell.readline.get_current_history_length() + 1
start = max(end-n, 1)
ghi = self.shell.readline.get_history_item
return [ghi(x) for x in range(start, end)]
@magics_class
class TerminalMagics(Magics):
def __init__(self, shell):
super(TerminalMagics, self).__init__(shell)
self.input_splitter = IPythonInputSplitter()
def store_or_execute(self, block, name):
""" Execute a block, or store it in a variable, per the user's request.
"""
if name:
# If storing it for further editing
self.shell.user_ns[name] = SList(block.splitlines())
print("Block assigned to '%s'" % name)
else:
b = self.preclean_input(block)
self.shell.user_ns['pasted_block'] = b
self.shell.using_paste_magics = True
try:
self.shell.run_cell(b)
finally:
self.shell.using_paste_magics = False
def preclean_input(self, block):
lines = block.splitlines()
while lines and not lines[0].strip():
lines = lines[1:]
return strip_email_quotes('\n'.join(lines))
def rerun_pasted(self, name='pasted_block'):
""" Rerun a previously pasted command.
"""
b = self.shell.user_ns.get(name)
# Sanity checks
if b is None:
raise UsageError('No previous pasted block available')
if not isinstance(b, py3compat.string_types):
raise UsageError(
"Variable 'pasted_block' is not a string, can't execute")
print("Re-executing '%s...' (%d chars)"% (b.split('\n',1)[0], len(b)))
self.shell.run_cell(b)
@line_magic
def autoindent(self, parameter_s = ''):
"""Toggle autoindent on/off (if available)."""
self.shell.set_autoindent()
print("Automatic indentation is:",['OFF','ON'][self.shell.autoindent])
@line_magic
def cpaste(self, parameter_s=''):
"""Paste & execute a pre-formatted code block from clipboard.
You must terminate the block with '--' (two minus-signs) or Ctrl-D
alone on the line. You can also provide your own sentinel with '%paste
-s %%' ('%%' is the new sentinel for this operation).
The block is dedented prior to execution to enable execution of method
definitions. '>' and '+' characters at the beginning of a line are
ignored, to allow pasting directly from e-mails, diff files and
doctests (the '...' continuation prompt is also stripped). The
executed block is also assigned to variable named 'pasted_block' for
later editing with '%edit pasted_block'.
You can also pass a variable name as an argument, e.g. '%cpaste foo'.
This assigns the pasted block to variable 'foo' as string, without
dedenting or executing it (preceding >>> and + is still stripped)
'%cpaste -r' re-executes the block previously entered by cpaste.
'%cpaste -q' suppresses any additional output messages.
Do not be alarmed by garbled output on Windows (it's a readline bug).
Just press enter and type -- (and press enter again) and the block
will be what was just pasted.
IPython statements (magics, shell escapes) are not supported (yet).
See also
--------
paste: automatically pull code from clipboard.
Examples
--------
::
In [8]: %cpaste
Pasting code; enter '--' alone on the line to stop.
:>>> a = ["world!", "Hello"]
:>>> print " ".join(sorted(a))
:--
Hello world!
"""
opts, name = self.parse_options(parameter_s, 'rqs:', mode='string')
if 'r' in opts:
self.rerun_pasted()
return
quiet = ('q' in opts)
sentinel = opts.get('s', u'--')
block = '\n'.join(get_pasted_lines(sentinel, quiet=quiet))
self.store_or_execute(block, name)
@line_magic
def paste(self, parameter_s=''):
"""Paste & execute a pre-formatted code block from clipboard.
The text is pulled directly from the clipboard without user
intervention and printed back on the screen before execution (unless
the -q flag is given to force quiet mode).
The block is dedented prior to execution to enable execution of method
definitions. '>' and '+' characters at the beginning of a line are
ignored, to allow pasting directly from e-mails, diff files and
doctests (the '...' continuation prompt is also stripped). The
executed block is also assigned to variable named 'pasted_block' for
later editing with '%edit pasted_block'.
You can also pass a variable name as an argument, e.g. '%paste foo'.
This assigns the pasted block to variable 'foo' as string, without
executing it (preceding >>> and + is still stripped).
Options:
-r: re-executes the block previously entered by cpaste.
-q: quiet mode: do not echo the pasted text back to the terminal.
IPython statements (magics, shell escapes) are not supported (yet).
See also
--------
cpaste: manually paste code into terminal until you mark its end.
"""
opts, name = self.parse_options(parameter_s, 'rq', mode='string')
if 'r' in opts:
self.rerun_pasted()
return
try:
block = self.shell.hooks.clipboard_get()
except TryNext as clipboard_exc:
message = getattr(clipboard_exc, 'args')
if message:
error(message[0])
else:
error('Could not get text from the clipboard.')
return
except ClipboardEmpty:
raise UsageError("The clipboard appears to be empty")
# By default, echo back to terminal unless quiet mode is requested
if 'q' not in opts:
write = self.shell.write
write(self.shell.pycolorize(block))
if not block.endswith('\n'):
write('\n')
write("## -- End pasted text --\n")
self.store_or_execute(block, name)
# Class-level: add a '%cls' magic only on Windows
if sys.platform == 'win32':
@line_magic
def cls(self, s):
"""Clear screen.
"""
os.system("cls")
class TerminalInteractiveShell(InteractiveShell):
autoedit_syntax = CBool(False, config=True,
help="auto editing of files with syntax errors.")
confirm_exit = CBool(True, config=True,
help="""
Set to confirm when you try to exit IPython with an EOF (Control-D
in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
you can force a direct exit without any confirmation.""",
)
# This display_banner only controls whether or not self.show_banner()
# is called when mainloop/interact are called. The default is False
# because for the terminal based application, the banner behavior
# is controlled by the application.
display_banner = CBool(False) # This isn't configurable!
embedded = CBool(False)
embedded_active = CBool(False)
editor = Unicode(get_default_editor(), config=True,
help="Set the editor used by IPython (default to $EDITOR/vi/notepad)."
)
pager = Unicode('less', config=True,
help="The shell program to be used for paging.")
screen_length = Integer(0, config=True,
help=
"""Number of lines of your screen, used to control printing of very
long strings. Strings longer than this number of lines will be sent
through a pager instead of directly printed. The default value for
this is 0, which means IPython will auto-detect your screen size every
time it needs to print certain potentially long strings (this doesn't
change the behavior of the 'print' keyword, it's only triggered
internally). If for some reason this isn't working well (it needs
curses support), specify it yourself. Otherwise don't change the
default.""",
)
term_title = CBool(False, config=True,
help="Enable auto setting the terminal title."
)
usage = Unicode(interactive_usage)
# This `using_paste_magics` is used to detect whether the code is being
# executed via paste magics functions
using_paste_magics = CBool(False)
# In the terminal, GUI control is done via PyOS_InputHook
@staticmethod
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
"""
# Deferred import
from IPython.lib.inputhook import enable_gui as real_enable_gui
try:
return real_enable_gui(gui, app)
except ValueError as e:
raise UsageError("%s" % e)
system = InteractiveShell.system_raw
#-------------------------------------------------------------------------
# Overrides of init stages
#-------------------------------------------------------------------------
def init_display_formatter(self):
super(TerminalInteractiveShell, self).init_display_formatter()
# terminal only supports plaintext
self.display_formatter.active_types = ['text/plain']
#-------------------------------------------------------------------------
# Things related to readline
#-------------------------------------------------------------------------
def init_readline(self):
"""Command history completion/saving/reloading."""
if self.readline_use:
import IPython.utils.rlineimpl as readline
self.rl_next_input = None
self.rl_do_indent = False
if not self.readline_use or not readline.have_readline:
self.readline = None
# Set a number of methods that depend on readline to be no-op
self.readline_no_record = NoOpContext()
self.set_readline_completer = no_op
self.set_custom_completer = no_op
if self.readline_use:
warn('Readline services not available or not loaded.')
else:
self.has_readline = True
self.readline = readline
sys.modules['readline'] = readline
# Platform-specific configuration
if os.name == 'nt':
# FIXME - check with Frederick to see if we can harmonize
# naming conventions with pyreadline to avoid this
# platform-dependent check
self.readline_startup_hook = readline.set_pre_input_hook
else:
self.readline_startup_hook = readline.set_startup_hook
# Readline config order:
# - IPython config (default value)
# - custom inputrc
# - IPython config (user customized)
# load IPython config before inputrc if default
# skip if libedit because parse_and_bind syntax is different
if not self._custom_readline_config and not readline.uses_libedit:
for rlcommand in self.readline_parse_and_bind:
readline.parse_and_bind(rlcommand)
# Load user's initrc file (readline config)
# Or if libedit is used, load editrc.
inputrc_name = os.environ.get('INPUTRC')
if inputrc_name is None:
inputrc_name = '.inputrc'
if readline.uses_libedit:
inputrc_name = '.editrc'
inputrc_name = os.path.join(self.home_dir, inputrc_name)
if os.path.isfile(inputrc_name):
try:
readline.read_init_file(inputrc_name)
except:
warn('Problems reading readline initialization file <%s>'
% inputrc_name)
# load IPython config after inputrc if user has customized
if self._custom_readline_config:
for rlcommand in self.readline_parse_and_bind:
readline.parse_and_bind(rlcommand)
# Remove some chars from the delimiters list. If we encounter
# unicode chars, discard them.
delims = readline.get_completer_delims()
if not py3compat.PY3:
delims = delims.encode("ascii", "ignore")
for d in self.readline_remove_delims:
delims = delims.replace(d, "")
delims = delims.replace(ESC_MAGIC, '')
readline.set_completer_delims(delims)
# Store these so we can restore them if something like rpy2 modifies
# them.
self.readline_delims = delims
# otherwise we end up with a monster history after a while:
readline.set_history_length(self.history_length)
self.refill_readline_hist()
self.readline_no_record = ReadlineNoRecord(self)
# Configure auto-indent for all platforms
self.set_autoindent(self.autoindent)
def init_completer(self):
super(TerminalInteractiveShell, self).init_completer()
# Only configure readline if we truly are using readline.
if self.has_readline:
self.set_readline_completer()
def set_readline_completer(self):
"""Reset readline's completer to be our own."""
self.readline.set_completer(self.Completer.rlcomplete)
def pre_readline(self):
"""readline hook to be used at the start of each line.
It handles auto-indent and text from set_next_input."""
if self.rl_do_indent:
self.readline.insert_text(self._indent_current_str())
if self.rl_next_input is not None:
self.readline.insert_text(self.rl_next_input)
self.rl_next_input = None
def refill_readline_hist(self):
# Load the last 1000 lines from history
self.readline.clear_history()
stdin_encoding = sys.stdin.encoding or "utf-8"
last_cell = u""
for _, _, cell in self.history_manager.get_tail(self.history_load_length,
include_latest=True):
# Ignore blank lines and consecutive duplicates
cell = cell.rstrip()
if cell and (cell != last_cell):
try:
if self.multiline_history:
self.readline.add_history(py3compat.unicode_to_str(cell,
stdin_encoding))
else:
for line in cell.splitlines():
self.readline.add_history(py3compat.unicode_to_str(line,
stdin_encoding))
last_cell = cell
except (TypeError, ValueError) as e:
# The history DB can get corrupted so it returns strings
# containing null bytes, which readline objects to.
warn(("Failed to add string to readline history.\n"
"Error: {}\n"
"Cell: {!r}").format(e, cell))
#-------------------------------------------------------------------------
# Things related to the terminal
#-------------------------------------------------------------------------
@property
def usable_screen_length(self):
if self.screen_length == 0:
return 0
else:
num_lines_bot = self.separate_in.count('\n')+1
return self.screen_length - num_lines_bot
def _term_title_changed(self, name, new_value):
self.init_term_title()
def init_term_title(self):
# Enable or disable the terminal title.
if self.term_title:
toggle_set_term_title(True)
set_term_title('IPython: ' + abbrev_cwd())
else:
toggle_set_term_title(False)
#-------------------------------------------------------------------------
# Things related to aliases
#-------------------------------------------------------------------------
def init_alias(self):
# The parent class defines aliases that can be safely used with any
# frontend.
super(TerminalInteractiveShell, self).init_alias()
# Now define aliases that only make sense on the terminal, because they
# need direct access to the console in a way that we can't emulate in
# GUI or web frontend
if os.name == 'posix':
aliases = [('clear', 'clear'), ('more', 'more'), ('less', 'less'),
('man', 'man')]
else :
aliases = []
for name, cmd in aliases:
self.alias_manager.soft_define_alias(name, cmd)
#-------------------------------------------------------------------------
# Mainloop and code execution logic
#-------------------------------------------------------------------------
def mainloop(self, display_banner=None):
"""Start the mainloop.
If an optional banner argument is given, it will override the
internally created default banner.
"""
with self.builtin_trap, self.display_trap:
while 1:
try:
self.interact(display_banner=display_banner)
#self.interact_with_readline()
# XXX for testing of a readline-decoupled repl loop, call
# interact_with_readline above
break
except KeyboardInterrupt:
# this should not be necessary, but KeyboardInterrupt
# handling seems rather unpredictable...
self.write("\nKeyboardInterrupt in interact()\n")
def _replace_rlhist_multiline(self, source_raw, hlen_before_cell):
"""Store multiple lines as a single entry in history"""
# do nothing without readline or disabled multiline
if not self.has_readline or not self.multiline_history:
return hlen_before_cell
# windows rl has no remove_history_item
if not hasattr(self.readline, "remove_history_item"):
return hlen_before_cell
# skip empty cells
if not source_raw.rstrip():
return hlen_before_cell
# nothing changed do nothing, e.g. when rl removes consecutive dups
hlen = self.readline.get_current_history_length()
if hlen == hlen_before_cell:
return hlen_before_cell
for i in range(hlen - hlen_before_cell):
self.readline.remove_history_item(hlen - i - 1)
stdin_encoding = get_stream_enc(sys.stdin, 'utf-8')
self.readline.add_history(py3compat.unicode_to_str(source_raw.rstrip(),
stdin_encoding))
return self.readline.get_current_history_length()
def interact(self, display_banner=None):
"""Closely emulate the interactive Python console."""
# batch run -> do not interact
if self.exit_now:
return
if display_banner is None:
display_banner = self.display_banner
if isinstance(display_banner, py3compat.string_types):
self.show_banner(display_banner)
elif display_banner:
self.show_banner()
more = False
if self.has_readline:
self.readline_startup_hook(self.pre_readline)
hlen_b4_cell = self.readline.get_current_history_length()
else:
hlen_b4_cell = 0
# exit_now is set by a call to %Exit or %Quit, through the
# ask_exit callback.
while not self.exit_now:
self.hooks.pre_prompt_hook()
if more:
try:
prompt = self.prompt_manager.render('in2')
except:
self.showtraceback()
if self.autoindent:
self.rl_do_indent = True
else:
try:
prompt = self.separate_in + self.prompt_manager.render('in')
except:
self.showtraceback()
try:
line = self.raw_input(prompt)
if self.exit_now:
# quick exit on sys.std[in|out] close
break
if self.autoindent:
self.rl_do_indent = False
except KeyboardInterrupt:
#double-guard against keyboardinterrupts during kbdint handling
try:
self.write('\n' + self.get_exception_only())
source_raw = self.input_splitter.raw_reset()
hlen_b4_cell = \
self._replace_rlhist_multiline(source_raw, hlen_b4_cell)
more = False
except KeyboardInterrupt:
pass
except EOFError:
if self.autoindent:
self.rl_do_indent = False
if self.has_readline:
self.readline_startup_hook(None)
self.write('\n')
self.exit()
except bdb.BdbQuit:
warn('The Python debugger has exited with a BdbQuit exception.\n'
'Because of how pdb handles the stack, it is impossible\n'
'for IPython to properly format this particular exception.\n'
'IPython will resume normal operation.')
except:
# exceptions here are VERY RARE, but they can be triggered
# asynchronously by signal handlers, for example.
self.showtraceback()
else:
try:
self.input_splitter.push(line)
more = self.input_splitter.push_accepts_more()
except SyntaxError:
# Run the code directly - run_cell takes care of displaying
# the exception.
more = False
if (self.SyntaxTB.last_syntax_error and
self.autoedit_syntax):
self.edit_syntax_error()
if not more:
source_raw = self.input_splitter.raw_reset()
self.run_cell(source_raw, store_history=True)
hlen_b4_cell = \
self._replace_rlhist_multiline(source_raw, hlen_b4_cell)
# Turn off the exit flag, so the mainloop can be restarted if desired
self.exit_now = False
def raw_input(self, prompt=''):
"""Write a prompt and read a line.
The returned line does not include the trailing newline.
When the user enters the EOF key sequence, EOFError is raised.
Parameters
----------
prompt : str, optional
A string to be printed to prompt the user.
"""
# raw_input expects str, but we pass it unicode sometimes
prompt = py3compat.cast_bytes_py2(prompt)
try:
line = py3compat.cast_unicode_py2(self.raw_input_original(prompt))
except ValueError:
warn("\n********\nYou or a %run:ed script called sys.stdin.close()"
" or sys.stdout.close()!\nExiting IPython!\n")
self.ask_exit()
return ""
# Try to be reasonably smart about not re-indenting pasted input more
# than necessary. We do this by trimming out the auto-indent initial
# spaces, if the user's actual input started itself with whitespace.
if self.autoindent:
if num_ini_spaces(line) > self.indent_current_nsp:
line = line[self.indent_current_nsp:]
self.indent_current_nsp = 0
return line
#-------------------------------------------------------------------------
# Methods to support auto-editing of SyntaxErrors.
#-------------------------------------------------------------------------
def edit_syntax_error(self):
"""The bottom half of the syntax error handler called in the main loop.
Loop until syntax error is fixed or user cancels.
"""
while self.SyntaxTB.last_syntax_error:
# copy and clear last_syntax_error
err = self.SyntaxTB.clear_err_state()
if not self._should_recompile(err):
return
try:
# may set last_syntax_error again if a SyntaxError is raised
self.safe_execfile(err.filename,self.user_ns)
except:
self.showtraceback()
else:
try:
f = open(err.filename)
try:
# This should be inside a display_trap block and I
# think it is.
sys.displayhook(f.read())
finally:
f.close()
except:
self.showtraceback()
def _should_recompile(self,e):
"""Utility routine for edit_syntax_error"""
if e.filename in ('<ipython console>','<input>','<string>',
'<console>','<BackgroundJob compilation>',
None):
return False
try:
if (self.autoedit_syntax and
not self.ask_yes_no('Return to editor to correct syntax error? '
'[Y/n] ','y')):
return False
except EOFError:
return False
def int0(x):
try:
return int(x)
except TypeError:
return 0
# always pass integer line and offset values to editor hook
try:
self.hooks.fix_error_editor(e.filename,
int0(e.lineno),int0(e.offset),e.msg)
except TryNext:
warn('Could not open editor')
return False
return True
#-------------------------------------------------------------------------
# Things related to exiting
#-------------------------------------------------------------------------
def ask_exit(self):
""" Ask the shell to exit. Can be overiden and used as a callback. """
self.exit_now = True
def exit(self):
"""Handle interactive exit.
This method calls the ask_exit callback."""
if self.confirm_exit:
if self.ask_yes_no('Do you really want to exit ([y]/n)?','y','n'):
self.ask_exit()
else:
self.ask_exit()
#-------------------------------------------------------------------------
# Things related to magics
#-------------------------------------------------------------------------
def init_magics(self):
super(TerminalInteractiveShell, self).init_magics()
self.register_magics(TerminalMagics)
def showindentationerror(self):
super(TerminalInteractiveShell, self).showindentationerror()
if not self.using_paste_magics:
print("If you want to paste code into IPython, try the "
"%paste and %cpaste magic functions.")
InteractiveShellABC.register(TerminalInteractiveShell)
| [
"[email protected]"
] | |
251f4b94234d2cacbf822e32f19aae0edea50f0f | 090fd16451ef226f0660d4be797c8a5fbf309f97 | /training_data/whole_image/to_tensors/metadata_to_grid_tensor.py | 4ca081192a85d619c3672f048fe0bd5599b745bb | [] | no_license | philip-brohan/Robot_Rainfall_Rescue | 2cdd20131a1ceae4e4af54059381b815f1cc138b | 4121d69aba6c8d180b57d92a0da11d09cd6843b4 | refs/heads/master | 2023-04-13T10:07:08.160001 | 2021-04-21T10:24:17 | 2021-04-21T10:24:17 | 267,624,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,783 | py | #!/usr/bin/env python
# Make a tensor containing grid-cell corner locations from the image metadata
import os
import sys
import math
import tensorflow as tf
import numpy
import pickle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--rootd", help="root directory", type=str, required=True)
parser.add_argument("--docn", help="Document name", type=str, required=True)
args = parser.parse_args()
# Load the metadata
with open(
"%s/meta/%s.pkl" % (args.rootd, args.docn),
"rb",
) as pkf:
mdata = pickle.load(pkf)
# mdata is a dictionary - convert it to a class so contents are attributes
# and we can share code with tyrImage.
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
mdata = AttrDict(mdata)
# From the metadata, find the centres of the data grid
# (120*2 floats on the range 0-1)
# Functions copied from the tyrimage class - should reuse that class instead
# Rotate by angle degrees clockwise
def gRotate(self, point, angle=None, origin=None):
if angle is None:
angle = self.rotate
if angle == 0:
return point
if origin is None:
origin = gCentre(self)
ox, oy = origin[0] * self.pageWidth, origin[1] * self.pageHeight
px, py = point[0] * self.pageWidth, point[1] * self.pageHeight
angle = math.radians(angle) * -1
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return qx / self.pageWidth, qy / self.pageHeight
def gCentre(self):
return (
0.5 + self.xshift / self.pageWidth + (self.xscale - 1) * 0.43,
0.525 + self.yshift / self.pageHeight - (self.yscale - 1) * 0.2,
)
# Corners of grid
def topLeft(self):
return (
0.1 + self.xshift / self.pageWidth,
0.725 + self.yshift / self.pageHeight,
)
def topRight(self):
return (
0.96 + self.xshift / self.pageWidth + (self.xscale - 1) * 0.86,
0.725 + self.yshift / self.pageHeight,
)
def bottomLeft(self):
return (
0.1 + self.xshift / self.pageWidth,
0.325 + self.yshift / self.pageHeight - (self.yscale - 1) * 0.4,
)
def bottomRight(self):
return (
0.96 + self.xshift / self.pageWidth + (self.xscale - 1) * 0.86,
0.325 + self.yshift / self.pageHeight - (self.yscale - 1) * 0.4,
)
def topAt(self, x):
return (
topRight(self)[0] * x + topLeft(self)[0] * (1 - x),
topRight(self)[1] * x + topLeft(self)[1] * (1 - x),
)
def bottomAt(self, x):
return (
bottomRight(self)[0] * x + bottomLeft(self)[0] * (1 - x),
bottomRight(self)[1] * x + bottomLeft(self)[1] * (1 - x),
)
def leftAt(self, y):
return (
topLeft(self)[0] * y + bottomLeft(self)[0] * (1 - y),
topLeft(self)[1] * y + bottomLeft(self)[1] * (1 - y),
)
target = []
for yri in range(10):
x = (
mdata.monthsWidth
+ (yri + 0.5) * (1.0 - mdata.meansWidth - mdata.monthsWidth) / 10
)
tp = topAt(mdata, x)
for mni in range(12):
lft = leftAt(
mdata,
1.0
- mdata.yearHeight
- (mni + 1) * (1.0 - mdata.yearHeight - mdata.totalsHeight) / (12 + 1),
)
txp = gRotate(mdata, [tp[0], lft[1]])
target.extend(txp)
ict = tf.convert_to_tensor(target, numpy.float32)
# Output the tensor
opdir = "%s/tensors/cell-centres/" % args.rootd
if not os.path.isdir(opdir):
try: # These calls sometimes collide
os.makedirs(opdir)
except FileExistsError:
pass
# Write to file
sict = tf.io.serialize_tensor(ict)
tf.io.write_file("%s/%s.tfd" % (opdir, args.docn), sict)
| [
"[email protected]"
] | |
d38333c18d45679b62018269967f8c0ac35bdc26 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/ml/azure-ai-ml/tests/automl_job/e2etests/test_automl_image_segmentation.py | ffccf9eebf789cc84db05ad6812bd7fd5eab6066 | [
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-generic-cla"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 4,934 | py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import copy
import platform
from typing import Tuple
import pytest
from test_utilities.utils import assert_final_job_status, get_automl_job_properties
from azure.ai.ml import MLClient, automl
from azure.ai.ml.constants._common import AssetTypes
from azure.ai.ml.entities import Data
from azure.ai.ml.entities._inputs_outputs import Input
from azure.ai.ml.entities._job.automl import SearchSpace
from azure.ai.ml.entities._job.automl.image import ImageInstanceSegmentationJob, ImageObjectDetectionSearchSpace
from azure.ai.ml.operations._run_history_constants import JobStatus
from azure.ai.ml.sweep import BanditPolicy, Choice, Uniform
from devtools_testutils import AzureRecordedTestCase, is_live
@pytest.mark.automle2etest
@pytest.mark.usefixtures("recorded_test")
@pytest.mark.skipif(
condition=not is_live() or platform.python_implementation() == "PyPy",
reason="Datasets downloaded by test are too large to record reliably"
)
class TestAutoMLImageSegmentation(AzureRecordedTestCase):
def _create_jsonl_segmentation(self, client, train_path, val_path):
fridge_data = Data(
path="./odFridgeObjectsMask",
type=AssetTypes.URI_FOLDER,
)
data_path_uri = client.data.create_or_update(fridge_data)
data_path = "./odFridgeObjectsMask/"
from automl_job.jsonl_converter import convert_mask_in_VOC_to_jsonl
convert_mask_in_VOC_to_jsonl(data_path, data_path_uri.path, train_path, val_path)
def test_image_segmentation_run(self, image_segmentation_dataset: Tuple[Input, Input], client: MLClient) -> None:
# Note: this test launches two jobs in order to avoid calling the dataset fixture more than once. Ideally, it
# would have sufficed to mark the fixture with session scope, but pytest-xdist breaks this functionality:
# https://github.com/pytest-dev/pytest-xdist/issues/271.
# Get training and validation data
train_path, val_path = image_segmentation_dataset
# Create jsonl file
self._create_jsonl_segmentation(client=client, train_path=train_path, val_path=val_path)
training_data = Input(type=AssetTypes.MLTABLE, path=train_path)
validation_data = Input(type=AssetTypes.MLTABLE, path=val_path)
# Make generic segmentation job
image_instance_segmentation_job = automl.image_instance_segmentation(
compute="gpu-cluster",
experiment_name="image-e2e-tests",
training_data=training_data,
validation_data=validation_data,
target_column_name="label",
primary_metric="MeanAveragePrecision",
properties=get_automl_job_properties(),
)
# Configure regular sweep job
image_instance_segmentation_job_sweep = copy.deepcopy(image_instance_segmentation_job)
image_instance_segmentation_job_sweep.set_training_parameters(early_stopping=True, evaluation_frequency=1)
image_instance_segmentation_job_sweep.extend_search_space(
[
SearchSpace(
model_name=Choice(["maskrcnn_resnet50_fpn"]),
learning_rate=Uniform(0.0001, 0.001),
optimizer=Choice(["sgd", "adam", "adamw"]),
min_size=Choice([600, 800]),
),
]
)
image_instance_segmentation_job_sweep.set_limits(max_trials=1, max_concurrent_trials=1)
image_instance_segmentation_job_sweep.set_sweep(
sampling_algorithm="Random",
early_termination=BanditPolicy(evaluation_interval=2, slack_factor=0.2, delay_evaluation=6),
)
# Configure AutoMode job
image_instance_segmentation_job_automode = copy.deepcopy(image_instance_segmentation_job)
# TODO: after shipping the AutoMode feature, do not set flag and call `set_limits()` instead of changing
# the limits object directly.
image_instance_segmentation_job_automode.properties["enable_automode"] = True
image_instance_segmentation_job_automode.limits.max_trials = 2
image_instance_segmentation_job_automode.limits.max_concurrent_trials = 2
# Trigger regular sweep and then AutoMode job
submitted_job_sweep = client.jobs.create_or_update(image_instance_segmentation_job_sweep)
submitted_job_automode = client.jobs.create_or_update(image_instance_segmentation_job_automode)
# Assert completion of regular sweep job
assert_final_job_status(submitted_job_sweep, client, ImageInstanceSegmentationJob, JobStatus.COMPLETED)
# Assert completion of Automode job
assert_final_job_status(submitted_job_automode, client, ImageInstanceSegmentationJob, JobStatus.COMPLETED)
| [
"[email protected]"
] | |
78ac1d8ab0955e19b6f31f6d9c758f0027b2d9c6 | eef243e450cea7e91bac2f71f0bfd45a00c6f12c | /.history/app/api_service/nlp_processing_20210127230126.py | bde6c9249a1735a48dc000ac6a5ff9b4947ad313 | [] | no_license | hoaf13/nlp-chatbot-lol | 910ab2ea3b62d5219901050271fc1a1340e46a2f | 18cb64efa9d6b4cafe1015f1cd94f4409271ef56 | refs/heads/master | 2023-05-08T04:17:19.450718 | 2021-02-02T02:37:38 | 2021-02-02T02:37:38 | 332,535,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,073 | py | import csv
import json
import numpy as np
import sklearn
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import LSTM
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.layers import LSTM, GRU,Bidirectional, Flatten, Dense
from keras_self_attention import SeqSelfAttention
import csv, re
import json
import numpy as np
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
from keras import optimizers
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras_self_attention import SeqSelfAttention, SeqWeightedAttention
from app.models import Champion, Conversation
from app import db
dict_intent={
'build_item':0,
'support_socket':1,
'counter':2,
'be_countered':3,
'skill_up':4,
'how_to_play':5,
'combo':6,
'combine_with':7,
'how_to_use_skill':8,
'introduce':9
}
CHAMPIONS = []
dict_digit2intent = {}
key = 0
for i in dict_intent.keys():
dict_digit2intent[key] = i
key += 1
f = open('app/api_service/my_upload/champions.txt', "r")
reg = ""
for cham in f:
reg += cham.split ('\n')[0] + '|'
CHAMPIONS.append(cham.split ('\n')[0])
reg = reg[:-1]
f.close()
skills = ['Q', 'W', 'E' , 'R', 'q','w','e','r']
def get_entity(content):
hero = re.search(reg.lower(), content.lower())
if hero != None:
hero = hero.group()
else: hero = ""
if hero == "":
hero = re.search(reg, content)
if hero != None:
hero = hero.group()
else: hero = ""
spl = content.split(" ")
skill = ""
for i in spl:
if i in skills:
skill = i
break
if hero != "":
for c in CHAMPIONS:
if c.lower() == hero.lower():
hero = c
break
if 'jarvan' in content.lower():
hero = 'Jarvan IV'
if 'mundo' in content.lower():
hero = 'Dr. Mundo'
return hero, skill.upper()
def load_model():
model = Sequential()
model.add(Embedding(208, 5248, input_length=17))
model.add(Bidirectional(LSTM(128, return_sequences=True)))
# model.add(LSTM(128, return_sequences = True))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
model.compile(loss= 'categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
model.load_weights('app/api_service/my_upload/hoaf13-nlp.h5')
# model.summary()
return model
def process_content(reg, content):
# content = content.lower()
x = re.search(reg, content)
if x != None:
content = content.replace(x.group(), "{hero}")
return content
def process_data(model, content):
f = open('app/api_service/my_upload/bow.txt', 'r')
dictionary = ''
for word in f:
dictionary += word + " "
f.close()
data = [dictionary]
token_obj = Tokenizer()
token_obj.fit_on_texts(data)
max_len = 17
X_train_token = token_obj.texts_to_sequences([content])
X_pad = pad_sequences(X_train_token, maxlen=max_len, padding='post')
result = model.predict(X_pad)
intent = np.argmax(result)
hero, skill = get_entity(content)
return dict_digit2intent[intent], result[0][intent], hero, skill
def get_raw_answer(intent, champion):
message_answer = None
if intent == 'build_item': message_answer = champion.build_item
if intent == 'support_socket': message_answer = champion.support_socket
if intent == 'counter': message_answer = champion.counter
if intent == 'be_countered': message_answer = champion.be_countered
if intent == 'skill_up': message_answer = champion.skill_up
if intent == 'how_to_play': message_answer = champion.how_to_play
if intent == 'combo': message_answer = champion.combo
if intent == 'combine_with': message_answer = champion.combine_with
if intent == 'how_to_use_skill': message_answer = champion.how_to_use_skill
if intent == 'introduce': message_answer = champion.introduce
return message_answer
def normalize_message(intent, message_answer, entities, champion,conversation_id):
ans = None
action = None
try:
skill_message = entities['skill']
except Exception:
skill_message = None
try:
champion_message = entities['champion']
except Exception:
champion_message = None
action = "action_"+intent
if intent == 'build_item': # "['Nguyệt Đao', 'Vô Cực Kiếm', 'Vũ Điệu Tử Thần', 'Áo Choàng Bóng Tối', 'Kiếm Ma Youmuu', 'Dao Găm Nham Thạch']"
list_items = eval(message_answer)
items = ', '.join(list_items)
ans = "{} lên đồ như sau: {}".format(champion.name, items)
if intent == 'support_socket': # ImageField
ans = champion.support_socket
if intent == 'counter': # ['Darius', 'Yasuo', 'Zed', 'Master Yi', 'Katarina', 'Hecarim', 'Akali', 'Renekton', 'LeBlanc', 'Jinx', 'Kassadin', 'Jax'] ans = MEDIA_URL + ans.url
message_answer = message_answer.replace('"','')
message_answer = message_answer.replace("'",'')
list_champions = message_answer.strip('][').split(', ')
champions = ', '.join(list_champions)
ans = "{} khắc chế được các tướng: {}".format(champion.name,champions)
if intent == 'be_countered': # ['Jax', 'Riven', 'Teemo', 'Fiora', 'Renekton', 'Tryndamere', 'Pantheon', 'Nasus', 'Lee Sin', 'Irelia', 'Ngộ Không', 'Jayce']
message_answer = message_answer.replace('"','')
message_answer = message_answer.replace("'",'')
list_champions = message_answer.strip('][').split(', ')
champions = ', '.join(list_champions)
ans = "{} bị khắc chế bởi các tướng: {}".format(champion.name, champions)
if intent == 'skill_up': # ['E', 'Q', 'E', 'Q', 'E', 'R', 'Q', 'Q', 'R', 'Q', 'R', 'E', 'E', 'W', 'W', 'W', 'W', 'W']
message_answer = message_answer.replace("'",'')
list_skills = message_answer.strip('][').split(', ')
skills = ', '.join(list_skills)
ans = "Thứ tự lên skill của {}: {}".format(champion.name, skills)
if intent == 'combo': # ['Q', 'R', 'W', 'Attack', 'E']
message_answer = message_answer.replace("'",'')
list_combos = message_answer.strip('][').split(', ')
combos = ', '.join(list_combos)
ans = "{} combo: {}".format(champion.name, combos)
if intent == 'combine_with': # ['Yasuo', 'Zilean', 'Tryndamere', 'Lee Sin', 'Fizz', 'Ahri', 'Orianna', 'Renekton', 'Vayne', 'Akali', 'Jax', 'Ezreal']
message_answer = message_answer.replace('"','')
message_answer = message_answer.replace("'",'')
list_champions = message_answer.replace('[','')
list_champions = list_champions.replace(']','')
# print("list champions: ", list_champions)
ans = "{} phối hợp tốt với: {}".format(champion.name, list_champions)
if intent == 'how_to_use_skill': # {'E': Luoi guom doa day}
skill_champion = eval(champion.how_to_use_skill)
skill = skill_champion[skill_message]
ans = "Skill {}: ".format(skill_message) + skill
if intent == 'introduce': # Từng là những người bảo hộ cao quý của Shurima ...
ans = champion.introduce
if intent == 'how_to_play':
ans = champion.how_to_play
if intent == 'what_about':
conversations = list(Conversation.query.filter_by(conversation_id=conversation_id))
print("length ", len(conversations))
conversation = None
for c in conversations[::-1]:
if c.intent != 'what_about':
conversation = c
break
print(conversation)
last_entities = eval(conversation.entities)
last_intent = conversation.intent
last_message_answer = conversation.message_answer
print("last intent: ",last_intent)
try:
last_champion = last_entities['champion']
except Exception:
last_champion = None
try:
last_skill = last_entities['skill']
except Exception:
last_skill = None
if champion_message != None and skill_message == None:
champion = Champion.query.filter_by(name=champion_message).first()
this_entities = dict()
this_entities['champion'] = champion.name
this_entities['skill'] = last_skill
this_answer = get_raw_answer(last_intent, champion)
ans,action = normalize_message(last_intent, this_answer, this_entities, champion, conversation_id)
return ans,action
if champion_message == None and skill_message != None:
champion = Champion.query.filter_by(name=last_champion).first()
this_entities = dict()
this_entities['champion'] = champion.name
this_entities['skill'] = skill_message
last_intent = 'how_to_use_skill'
this_answer = get_raw_answer(last_intent, champion)
ans,action = normalize_message(last_intent, this_answer, this_entities, champion, conversation_id)
return ans,action
if champion_message == None and skill_message == None:
ans = "Tôi không hiểu ý của bạn. Mời bạn nhập lại câu hỏi rõ ràng hơn."
action = "action_ask_hero_and_skill"
return ans,action
return ans,action
def is_valid_what_about(conversation_id):
conversations = list(Conversation.query.filter_by(conversation_id=conversation_id))
if len(conversations) == 0:
return False
conversation = None
for c in conversations[::-1]:
if c.intent != 'what_about':
conversation = c
break
if conversation == None:
return False
return True
def string_to_dict(entities):
ans = eval(entities)
return ans
def to_json(intent, action, message_answer):
ans = dict()
ans['intent'] = intent
ans['action'] = action
ans['message_answer'] = message_answer
return ans
def is_ask_more(conversation_id):
conversations = list(Conversation.query.filter_by(conversation_id=conversation_id))
if len(conversations) == 0:
return False
print("conversations[-1].action: ", conversations[-1].action)
if conversations[-1].action in ['action_ask_hero','action_ask_skill','action_ask_hero_and_skill','action_ask_intent']:
return True
return False
def get_action_ask_more(conversation_id):
conversations = list(Conversation.query.filter_by(conversation_id=conversation_id))
if conversations[-1].action in ['action_ask_hero','action_ask_skill','action_ask_hero_and_skill','action_ask_intent']:
return conversations[-1].action
return None
def get_conversation_ask_more(conversation_id):
conversations = list(Conversation.query.filter_by(conversation_id=conversation_id))[::-1]
conversation = None
for c in conversations:
if c.action in ['action_ask_hero','action_ask_skill','action_ask_hero_and_skill','action_ask_intent']:
conversation = c
break
if conversation == None:
return list(Conversation.query.filter_by(conversation_id=conversation_id))[-1]
return conversation
def get_conversation_what_about(conversation_id):
conversation = None
conversations = list(Conversation.query.filter_by(conversation_id=conversation_id))
for c in conversations[::-1]:
if c.intent != 'what_about':
conversation = c
break
return conversation
def tolower_message(message_question):
ans = message_question.lower()
return ans
def getDictPostResponse(conversation_id, message_question, entities, prob, intent):
try:
if "chào" in message_question.lower() or "hello" in message_question.lower() or "chao" in message_question.lower():
intent = "say_hi"
action = "action_say_hi"
message_answer = "chào bạn, đây là chatbot lol."
print(message_question.lower())
res = to_json(intent, action, message_answer)
return res
if prob > 0.80:
if ('champion' in entities and intent != 'how_to_use_skill') or ('champion' in entities and 'skill' in entities and intent == 'how_to_use_skill'):
champion = Champion.query.filter_by(name=entities['champion']).first()
message_answer = get_raw_answer(intent, champion)
message_answer,action = normalize_message(intent,message_answer,entities,champion,conversation_id)
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=intent,entities=entities, action="action_"+intent)
res = to_json(intent, action, message_answer)
# res['probability'] = str(prob)
#return intent, action, message
return res
if "còn" in message_question.lower() or "thì sao" in message_question.lower():
intent = 'what_about'
if is_ask_more(conversation_id) == False:
if intent == 'what_about':
conversation_what_about = get_conversation_what_about(conversation_id)
print(conversation_what_about.intent)
if 'champion' not in entities and 'skill' not in entities:
action = 'action_ask_hero_and_skill'
message_answer = 'Không xác định được tướng và kĩ năng, mời bạn nhập thêm.'
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=conversation_what_about.intent,entities=entities, action=action)
res = to_json(intent, action, message_answer)
# res['probability'] = str(prob)
#return intent, action, message
return res
if 'champion' not in entities:
action = 'action_ask_hero'
message_answer = 'Không xác định được tướng, mời bạn nhập thêm.'
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=conversation_what_about.intent,entities=entities, action=action)
res = to_json(intent, action, message_answer)
# res['probability'] = str(prob)
#return intent, action, message
return res
entities_what_about = string_to_dict(conversation_what_about.entities)
# print("entities_what_about: {}".format(entities_what_about))
if 'skill' in entities_what_about and conversation_what_about.intent == 'how_to_use_skill':
entities['skill'] = entities_what_about['skill']
if 'skill' not in entities and conversation_what_about.intent == 'how_to_use_skill':
action = 'action_ask_skill'
message_answer = 'Không xác định được kĩ năng, mời bạn nhập thêm.'
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=conversation_what_about.intent,entities=entities, action=action)
res = to_json(intent, action, message_answer)
return res
intent = conversation_what_about.intent
champion = Champion.query.filter_by(name=entities['champion']).first()
message_answer = get_raw_answer(intent, champion)
message_answer,action = normalize_message(intent,message_answer,entities,champion,conversation_id)
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=intent,entities=entities, action="action_"+intent)
res = to_json(intent, action, message_answer)
return res
if prob < 0.5:
action = 'action_ask_intent'
message_answer = 'Tôi không hiểu ý của bạn, mời bạn nhập thêm.'
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=intent,entities=entities, action=action)
res = to_json(intent, action, message_answer)
return res
if intent == 'how_to_use_skill':
print("entities:", entities)
if 'champion' not in entities and 'skill' not in entities:
action = 'action_ask_hero_and_skill'
message_answer = 'Không xác định được tướng và kĩ năng, mời bạn nhập thêm'
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=intent,entities=entities, action=action)
res = to_json(intent, action, message_answer)
return res
if 'champion' not in entities:
action = 'action_ask_hero'
message_answer = 'Không xác định được tướng, mời bạn nhập thêm'
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=intent,entities=entities, action=action)
res = to_json(intent, action, message_answer)
# res['probability'] = str(prob)
return res
if 'skill' not in entities:
action = 'action_ask_skill'
message_answer = 'Không xác định được kĩ năng, mời bạn nhập thêm'
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=intent,entities=entities, action=action)
res = to_json(intent, action, message_answer)
# res['probability'] = str(prob)
return res
if intent != 'how_to_use_skill':
if 'champion' not in entities:
action = 'action_ask_hero'
message_answer = 'Không xác định được tướng, mời bạn nhập thêm'
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=intent,entities=entities, action=action)
res = to_json(intent, action, message_answer)
# res['probability'] = str(prob)
return res
champion = Champion.query.filter_by(name=entities['champion']).first()
message_answer = get_raw_answer( intent, champion)
message_answer,action = normalize_message(intent,message_answer,entities,champion,conversation_id)
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=intent,entities=entities, action="action_"+intent)
res = to_json(intent, action, message_answer)
# res['probability'] = str(prob)
#return intent, action, message
return res
if is_ask_more(conversation_id):
conversation_ask_more = get_conversation_ask_more(conversation_id)
if conversation_ask_more.action == 'action_ask_hero':
if 'champion' in entities:
name = entities['champion']
intent = conversation_ask_more.intent
champion = Champion.query.filter_by(name=name).first()
entities_ask_more = string_to_dict(conversation_ask_more.entities)
if 'skill' in entities_ask_more:
entities['skill'] = entities_ask_more['skill']
message_answer = get_raw_answer( intent, champion)
message_answer,action = normalize_message(intent,message_answer,entities,champion,conversation_id)
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=intent,entities=entities, action="action_"+intent)
res = to_json(intent, action, message_answer)
# res['probability'] = str(prob)
return res
else:
action = 'action_ask_hero'
message_answer = 'Không xác định được tướng, mời bạn nhập thêm.'
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=intent,entities=entities, action=action)
res = to_json(intent, action, message_answer)
# res['probability'] = str(prob)
return res
if conversation_ask_more.action == 'action_ask_skill':
print(conversation_ask_more.message_question)
if 'skill' in entities:
entities_ask_more = string_to_dict(conversation_ask_more.entities)
if 'champion' in entities_ask_more:
entities['champion'] = entities_ask_more['champion']
print("entities", entities)
name = entities['champion']
intent = conversation_ask_more.intent
champion = Champion.query.filter_by(name=name).first()
message_answer = get_raw_answer(intent, champion)
message_answer,action = normalize_message(intent,message_answer,entities,champion,conversation_id)
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=intent,entities=entities, action="action_"+intent)
res = to_json(intent, action, message_answer)
# res['probability'] = str(prob)
return res
else:
action = 'action_ask_skill'
message_answer = 'Không xác định được kĩ năng, mời bạn nhập thêm.'
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=intent,entities=entities, action=action)
res = to_json(intent, action, message_answer)
# res['probability'] = str(prob)
return res
if conversation_ask_more.action == 'action_ask_hero_and_skill':
if 'skill' in entities and 'champion' in entities:
entities_ask_more = string_to_dict(conversation_ask_more.entities)
name = None
new_entities = dict()
if 'champion' in entities:
new_entities['champion'] = entities['champion']
if 'skill' in entities:
new_entities['champion'] = entities['skill']
if 'champion' in entities_ask_more:
new_entities['champion'] = entities_ask_more['champion']
if 'skill' in entities:
new_entities['champion'] = entities_ask_more['skill']
champion = Champion.query.filter_by(name=new_entities['champion']).first()
intent = conversation_ask_more.intent
message_answer = get_raw_answer(intent, champion)
message_answer,action = normalize_message(intent,message_answer,entities,champion,conversation_id)
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=intent,entities=entities, action="action_"+intent)
res = to_json(intent, action, message_answer)
# res['probability'] = str(prob)
db.session.add(conversation)
db.session.commit()
return res
else:
action = 'action_ask_hero_and_skill'
message_answer = 'Không xác định được tướng và kĩ năng, mời bạn nhập thêm.'
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=intent,entities=entities, action=action)
db.session.add(conversation)
db.session.commit()
res = to_json(intent, action, message_answer)
# res['probability'] = str(prob)
return res
if conversation_ask_more.action == 'action_ask_intent':
if ('champion' in entities and intent != 'how_to_use_skill') or ('champion' in entities and 'skill' in entities and intent == 'how_to_use_skill'):
champion = Champion.query.filter_by(name=entities['champion']).first()
message_answer = get_raw_answer( intent, champion)
message_answer,action = normalize_message(intent,message_answer,entities,champion,conversation_id)
conversation = Conversation(conversation_id=conversation_id,message_question=message_question,
message_answer=message_answer,intent=intent,entities=entities, action="action_"+intent)
db.session.add(conversation)
db.session.commit()
res = to_json(intent, action, message_answer)
# res['probability'] = str(prob)
#return intent, action, message
return res
else:
intent = "ask_intent"
message_answer = 'Tôi không hiểu ý của bạn, mời bạn nhập thêm. '
action = "action_ask_intent"
res = to_json(intent,action, message_answer)
return res
except Exception:
intent = "ask_intent"
message_answer = 'Tôi không hiểu ý của bạn, mời bạn nhập thêm. '
action = "action_ask_intent"
res = to_json(intent,action, message_answer)
return res | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.