blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
777 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
149 values
src_encoding
stringclasses
26 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
3
10.2M
extension
stringclasses
188 values
content
stringlengths
3
10.2M
authors
listlengths
1
1
author_id
stringlengths
1
132
6db2f2bffdf1414130a745c38c695d1487cb7613
05ea9d119263eca2292e23fa2151b7b4cabd8de6
/setup.py
188200af449f4008c216f639b97207689b00c9c8
[ "MIT" ]
permissive
BugBreaker/smote_variants
55f5f650abac163a1dbebf71adb96ce13060e6da
dd937a5c827580912ec81d3e209ae96074623301
refs/heads/master
2020-04-16T21:26:05.344604
2019-01-01T16:36:23
2019-01-01T16:36:23
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,240
py
import os import codecs from setuptools import setup def readme(): with codecs.open('README.rst', encoding='utf-8-sig') as f: return f.read() version_file= os.path.join('smote_variants', '_version.py') with open(version_file) as f: exec(f.read()) DISTNAME= 'smote_variants' DESCRIPTION= 'Variants of the synthetic minority oversampling technique (SMOTE) for imbalanced learning' LONG_DESCRIPTION= readme() LONG_DESCRIPTION_CONTENT_TYPE='text/x-rst' MAINTAINER= 'Gyorgy Kovacs' MAINTAINER_EMAIL= '[email protected]' URL= 'https://github.com/gykovacs/smote-variants' LICENSE= 'MIT' DOWNLOAD_URL= 'https://github.com/gykovacs/smote-variants' VERSION= __version__ CLASSIFIERS= [ 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Operating System :: Unix', 'Operating System :: MacOS'] INSTALL_REQUIRES= ['numpy>=1.13.0', 'scipy', 'scikit-learn', 'joblib', 'minisom', 'statistics', 'tensorflow', 'keras'] EXTRAS_REQUIRE= {'tests': ['nose'], 'docs': ['sphinx', 'sphinx-gallery', 'sphinx_rtd_theme', 'matplotlib', 'pandas']} PYTHON_REQUIRES= '>=3.5' TEST_SUITE='nose.collector' PACKAGE_DIR= {'smote_variants': 'smote_variants'} setup(name=DISTNAME, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, license=LICENSE, url=URL, version=VERSION, download_url=DOWNLOAD_URL, long_description=LONG_DESCRIPTION, long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE, zip_safe=False, classifiers=CLASSIFIERS, install_requires=INSTALL_REQUIRES, extras_require=EXTRAS_REQUIRE, python_requires=PYTHON_REQUIRES, test_suite=TEST_SUITE, package_dir=PACKAGE_DIR) #setup(name='smote_variants', # version=getversion(), # description='smote_variants', # long_description=readme(), # classifiers=[ # 'Development Status :: 3 - Alpha', # 'License :: OSI Approved :: MIT License', # 'Programming Language :: Python', # 'Topic :: Scientific/Engineering :: Artificial Intelligence'], # url='http://github.com/gykovacs/smote_variants', # author='Gyorgy Kovacs', # author_email='[email protected]', # license='MIT', # packages=['smote_variants'], # install_requires=[ # 'joblib', # 'numpy', # 'pandas', # 'scipy', # 'sklearn', # 'minisom', # 'statistics', # ], # py_modules=['smote_variants'], # python_requires='>=3.5', # zip_safe=False, # package_dir= {'smote_variants': 'smote_variants'}, # package_data= {}, # tests_require= ['nose'], # test_suite= 'nose.collector' # )
233365653559692108c043142b29b488ee4196ec
47b028ec01008ed901f2fd7779abe47f52d458fe
/tests/test_api.py
279dc71cb898c8d6e493c54c58b52be80ebc4953
[ "MIT" ]
permissive
avara1986/gozokia
d6a5c2214f96c666283f129b96d22ce78291b32e
03da46359c4a97a35b8f94686cccec7fc9b438cd
refs/heads/master
2020-12-24T06:24:06.031752
2016-10-12T10:27:12
2016-10-12T10:27:12
38,059,692
0
0
null
null
null
null
UTF-8
Python
false
false
1,212
py
# encoding: utf-8 import unittest import os from gozokia import Gozokia from gozokia.rules import GreetingRaise, GreetingObjetive os.environ.setdefault("GOZOKIA_SETTINGS_MODULE", "tests.settings_tests") class ApiTest(unittest.TestCase): def test_foo(self): goz = Gozokia() goz.initialize() goz.set_io(input_type="value", output_type="value") value = "foo" self.assertEqual(goz.api(value), "bar") value = "foo" self.assertEqual(goz.api(value), "bar second") value = "foo" self.assertEqual(goz.api(value), "No rules. you said: foo") def test_greetings(self): goz = Gozokia() goz.rule(name='greeting', type=goz.RAISE_COND, rank=100)(GreetingRaise) goz.initialize() goz.set_io(input_type="value", output_type="value") value = "foo" self.assertEqual(goz.api(value), "bar") value = "Bacon" self.assertEqual(goz.api(value), "No rules. you said: Bacon") value = "Hi" self.assertEqual(goz.api(value), "Hi, who are you?") value = "i am Alberto" self.assertEqual(goz.api(value), "Hi, alberto") if __name__ == '__main__': unittest.main()
abb4fca4705ef6f801a4ae2c4b73ea567e65411f
c7f1c021d88e215dccb0bbb10db02293cd8528b8
/plugins/agent2/agent2/__init__.py
06fa9186ad1d3376ceff7add1d40b3ac454ed2dd
[]
no_license
manuelnaranjo/OpenProximity
d11e61418b2249d210f0bbf5adbc6ccc38cd2773
f93c8009e40ce60c65090370adf0427e944d375b
refs/heads/master
2023-08-31T11:17:28.033936
2011-11-13T03:27:10
2011-11-13T03:27:10
1,778,676
4
2
null
null
null
null
UTF-8
Python
false
false
988
py
# OpenProximity2.0 is a proximity marketing OpenSource system. # Copyright (C) 2009,2008 Naranjo Manuel Francisco <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # A data collector agent compatible with Global Scanner __version_info__=('0','0','1') __version__ = '.'.join(__version_info__) def statistics_reset(connection): print "agent statics reset"
1d0bddf3aee9a3f85fd686500670596d63d534b2
71a4a5ff8dac94da32769710ed7734b6d93013de
/durgajobs/testapp/admin.py
22abbfb07dee953ad05bbe9e0aeebfcf61c1e93b
[]
no_license
djangoprojects5pm/durga-jobs-project
fc6f114ac37276bda1567470831a5ff464432254
7cd967a2114a3bdd3038931a1ee15a7fa2367911
refs/heads/master
2022-11-22T07:39:08.747916
2020-07-20T13:44:04
2020-07-20T13:44:04
281,136,994
0
0
null
null
null
null
UTF-8
Python
false
false
835
py
from django.contrib import admin from testapp.models import hydjobs,blorejobs,chennaijobs,punejobs # Register your models here. class hydjobsAdmin(admin.ModelAdmin): list_display=['date','company','title','eligibility','address','email','phonenumber'] class blorejobsAdmin(admin.ModelAdmin): list_display=['date','company','title','eligibility','address','email','phonenumber'] class chennaijobsAdmin(admin.ModelAdmin): list_display=['date','company','title','eligibility','address','email','phonenumber'] class punejobsAdmin(admin.ModelAdmin): list_display=['date','company','title','eligibility','address','email','phonenumber'] admin.site.register(hydjobs,hydjobsAdmin) admin.site.register(blorejobs,blorejobsAdmin) admin.site.register(chennaijobs,chennaijobsAdmin) admin.site.register(punejobs,punejobsAdmin)
53dcc50f07e28eea9f33d772461b6d58768e8783
409ce560793c070ef4211b99c5a4a5316a258c4f
/pylith/topology/JacobianViewer.py
b8265fba4e31aecf6a5a07b3d37f9c57c2e909a2
[ "MIT" ]
permissive
calum-chamberlain/pylith
bb718bfb4305f03b45d42348e5d4fa5ed5f4a918
8712c39ade53c1cc5ac0e671e4296cee278c1dcf
refs/heads/master
2020-12-06T17:15:08.638337
2016-05-15T20:30:28
2016-05-15T20:30:28
46,401,744
0
0
null
2016-05-15T20:30:29
2015-11-18T07:09:12
C++
UTF-8
Python
false
false
3,525
py
#!/usr/bin/env python # # ---------------------------------------------------------------------- # # Brad T. Aagaard, U.S. Geological Survey # Charles A. Williams, GNS Science # Matthew G. Knepley, University of Chicago # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2015 University of California, Davis # # See COPYING for license information. # # ---------------------------------------------------------------------- # ## @file pylith/topology/JacobianViewer.py ## ## @brief Python object for writing system Jacobian to file. ## ## Factory: jacobian_viewer from pylith.utils.PetscComponent import PetscComponent # JacobianViewer class class JacobianViewer(PetscComponent): """ Python abstract base class for formulations of solving equations. In general, we use some explicit or implicit formulation of the PDEs to create a linear form, [A]{u}={b} that we can solve. Factory: pde_formulation. """ # INVENTORY ////////////////////////////////////////////////////////// class Inventory(PetscComponent.Inventory): """ Python object for managing JacobianViewer facilities and properties. """ ## @class Inventory ## Python object for managing JacobianViewer facilities and properties. ## ## \b Properties ## @li \b filename Filename for Jacobian matrix. ## @li \b time_format C style format string for time stamp in filename. ## @li \b time_constant Value used to normalize time stamp in filename. ## ## \b Facilities ## @li None import pyre.inventory filename = pyre.inventory.str("filename", default="jacobian.mat") filename.meta['tip'] = "Filename for Jacobian matrix." timeFormat = pyre.inventory.str("time_format", default="%f") timeFormat.meta['tip'] = "C style format string for time stamp in filename." from pyre.units.time import second timeConstant = pyre.inventory.dimensional("time_constant", default=1.0*second, validator=pyre.inventory.greater(0.0*second)) timeConstant.meta['tip'] = \ "Values used to normalize time stamp in filename." # PUBLIC METHODS ///////////////////////////////////////////////////// def __init__(self, name="formulation"): """ Constructor. """ PetscComponent.__init__(self, name, facility="jacobian_viewer") return def view(self, jacobian, t, comm): """ Write Jacobian to binary file. """ jacobian.write(self._filenameStamp(t), comm) return # PRIVATE METHODS //////////////////////////////////////////////////// def _configure(self): """ Set members based using inventory. """ PetscComponent._configure(self) self.filename = self.inventory.filename self.timeFormat = self.inventory.timeFormat self.timeConstant = self.inventory.timeConstant return def _filenameStamp(self, t): """ Create filename by extracting basename and adding a time stamp. """ timeStamp = self.timeFormat % (t/self.timeConstant.value) basename = self.filename if basename.endswith(".mat"): basename = basename[0:len(basename)-4] filename = basename + "_t" + timeStamp + ".mat" return filename # FACTORIES //////////////////////////////////////////////////////////// def jacobian_viewer(): """ Factory associated with JacobianViewer. """ return JacobianViewer() # End of file
02026e3cc3bc04d240c802281453151f5a171cb6
4a7e359bb329e3df757c2b6c812512c24ec615eb
/zxserver/migrations/0013_zxcomments_active.py
849c33e5850883aaea0e8f8950c9b6f5450d2b3a
[]
no_license
AlexBlueCrow/farmbackend
7768e84edafea230de713241db6bb566f7b8407e
311b25543d312a6b9d1b68905c15a6785a0b5097
refs/heads/master
2021-08-20T04:47:28.018997
2020-08-12T16:23:49
2020-08-12T16:23:49
214,458,002
0
0
null
null
null
null
UTF-8
Python
false
false
391
py
# Generated by Django 2.2.5 on 2020-03-25 06:09 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('zxserver', '0012_auto_20200324_0957'), ] operations = [ migrations.AddField( model_name='zxcomments', name='active', field=models.BooleanField(default=True), ), ]
e486af2f2186d3f22ed87c24324d04a5f84e88dd
6a15a85acf553d8091d9e17014ff327135859fca
/space_view3d_mouselook_navigation/dairin0d/utils_userinput.py
fbe143da7915f475c8f19cd43c9862698d094362
[]
no_license
PyrokinesisStudio/myblendercontrib
a45d12bb074c25940d0d8b871ac468afb84e100c
4cce128cd65cb0dcda7e410fb16a780217faa68e
refs/heads/master
2021-05-29T13:58:26.447837
2015-07-09T12:08:13
2015-07-09T12:08:13
108,990,581
1
0
null
2017-10-31T12:11:28
2017-10-31T12:11:28
null
UTF-8
Python
false
false
15,157
py
# ***** BEGIN GPL LICENSE BLOCK ***** # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # ***** END GPL LICENSE BLOCK ***** import bpy from .utils_python import reverse_enumerate from .bpy_inspect import BlRna class InputKeyMonitor: all_keys = bpy.types.Event.bl_rna.properties["type"].enum_items.keys() all_modifiers = {'alt', 'ctrl', 'oskey', 'shift'} all_events = bpy.types.Event.bl_rna.properties["value"].enum_items.keys() def __init__(self, event=None): self.event = "" self.states = {} self.invoke_key = 'NONE' self.invoke_event = 'NONE' if event is not None: self.invoke_key = event.type self.invoke_event = event.value self.update(event) def __getitem__(self, name): if ":" in name: return self.event == name return self.states.setdefault(name, False) def __setitem__(self, name, state): self.states[name] = state def update(self, event): if (event.value == 'PRESS') or (event.value == 'DOUBLE_CLICK'): self.states[event.type] = True elif event.value == 'RELEASE': self.states[event.type] = False self.states['alt'] = event.alt self.states['ctrl'] = event.ctrl self.states['oskey'] = event.oskey self.states['shift'] = event.shift self.event = event.type+":"+event.value def keychecker(self, keys): km = self keys = self.parse_keys(keys) def check(state=True): for key in keys: if key.startswith("!"): if km[key[1:]] != state: return True else: if km[key] == state: return True return False check.is_event = ((":" in keys[0]) if keys else False) return check def combine_key_parts(self, key, keyset, use_invoke_key=False): elements = key.split() combined0 = "".join(elements) combined1 = "_".join(elements) if use_invoke_key and (combined0 == "{INVOKEKEY}"): return self.invoke_key if combined0 in keyset: return combined0 elif combined1 in keyset: return combined1 return "" def parse_keys(self, keys_string): parts = keys_string.split(":") keys_string = parts[0] event_id = "" if len(parts) > 1: event_id = self.combine_key_parts(parts[1].upper(), self.all_events) if event_id: event_id = ":"+event_id keys = [] for key in keys_string.split(","): key = key.strip() is_negative = key.startswith("!") prefix = "" if is_negative: key = key[1:] prefix = "!" key_id = self.combine_key_parts(key.upper(), self.all_keys, True) modifier_id = self.combine_key_parts(key.lower(), self.all_modifiers) if key_id: keys.append(prefix+key_id+event_id) elif modifier_id: if len(event_id) != 0: modifier_id = modifier_id.upper() if modifier_id == 'OSKEY': # has no left/right/ndof variants keys.append(prefix+modifier_id+event_id) else: keys.append(prefix+"LEFT_"+modifier_id+event_id) keys.append(prefix+"RIGHT_"+modifier_id+event_id) keys.append(prefix+"NDOF_BUTTON_"+modifier_id+event_id) else: keys.append(prefix+modifier_id) return keys class ModeStack: def __init__(self, keys, transitions, default_mode, mode='NONE'): self.keys = keys self.prev_state = {} self.transitions = set(transitions) self.mode = mode self.default_mode = default_mode self.stack = [self.default_mode] # default mode should always be in the stack! def update(self): for name in self.keys: keychecker = self.keys[name] is_on = int(keychecker()) if keychecker.is_event: delta_on = is_on * (-1 if name in self.stack else 1) else: delta_on = is_on - self.prev_state.get(name, 0) self.prev_state[name] = is_on if delta_on > 0: if self.transition_allowed(self.mode, name): self.remove(name) self.stack.append(name) # move to top self.mode = name elif delta_on < 0: if self.mode != name: self.remove(name) else: self.find_transition() def remove(self, name): if name in self.stack: self.stack.remove(name) def find_transition(self): for i in range(len(self.stack)-1, -1, -1): name = self.stack[i] if self.transition_allowed(self.mode, name): self.mode = name self.stack = self.stack[:i+1] break def transition_allowed(self, mode0, mode1): is_allowed = (mode0+":"+mode1) in self.transitions is_allowed |= (mode1+":"+mode0) in self.transitions return is_allowed def add_transitions(self, transitions): self.transitions.update(transitions) def remove_transitions(self, transitions): self.transitions.difference_update(transitions) class KeyMapUtils: keymap_categories = [ ['Window'], ['Screen', 'Screen Editing'], ['View2D'], ['View2D Buttons List'], ['Header'], ['Grease Pencil'], ['3D View', 'Object Mode', 'Mesh', 'Curve', 'Armature', 'Metaball', 'Lattice', 'Font', 'Pose', 'Vertex Paint', 'Weight Paint', 'Weight Paint Vertex Selection', 'Face Mask', 'Image Paint', 'Sculpt', 'Particle', 'Knife Tool Modal Map', 'Paint Stroke Modal', 'Object Non-modal', 'View3D Walk Modal', 'View3D Fly Modal', 'View3D Rotate Modal', 'View3D Move Modal', 'View3D Zoom Modal', 'View3D Dolly Modal', '3D View Generic'], ['Frames'], ['Markers'], ['Animation'], ['Animation Channels'], ['Graph Editor', 'Graph Editor Generic'], ['Dopesheet'], ['NLA Editor', 'NLA Channels', 'NLA Generic'], ['Image', 'UV Editor', 'Image Paint', 'UV Sculpt', 'Image Generic'], ['Timeline'], ['Outliner'], ['Node Editor', 'Node Generic'], ['Sequencer', 'SequencerCommon', 'SequencerPreview'], ['Logic Editor'], ['File Browser', 'File Browser Main', 'File Browser Buttons'], ['Info'], ['Property Editor'], ['Text', 'Text Generic'], ['Console'], ['Clip', 'Clip Editor', 'Clip Graph Editor', 'Clip Dopesheet Editor', 'Mask Editing'], ['View3D Gesture Circle'], ['Gesture Straight Line'], ['Gesture Zoom Border'], ['Gesture Border'], ['Standard Modal Map'], ['Transform Modal Map'], ['Paint Curve'], # This one seems to be absent in the UI, so I don't know where it belongs ] @staticmethod def search(idname, place=None): """Iterate over keymap items with given idname. Yields tuples (keyconfig, keymap, keymap item)""" place_is_str = isinstance(place, str) keymaps = None keyconfigs = bpy.context.window_manager.keyconfigs if isinstance(place, bpy.types.KeyMap): keymaps = (place,) keyconfigs = (next((kc for kc in keyconfigs if place.name in kc), None),) elif isinstance(place, bpy.types.KeyConfig): keyconfigs = (place,) for kc in keyconfigs: for km in keymaps or kc.keymaps: if place_is_str and (km.name != place): continue for kmi in km.keymap_items: if kmi.idname == idname: yield (kc, km, kmi) @staticmethod def exists(idname, place=None): return bool(next(KeyMapUtils.search(idname), False)) @staticmethod def set_active(idname, active, place=None): for kc, km, kmi in KeyMapUtils.search(idname, place): kmi.active = active @staticmethod def remove(idname, user_defined=True, user_modified=True, place=None): for kc, km, kmi in list(KeyMapUtils.search(idname, place)): if (not user_defined) and kmi.is_user_defined: continue if (not user_modified) and kmi.is_user_modified: continue km.keymap_items.remove(kmi) @staticmethod def index(km, idname): for i, kmi in enumerate(km.keymap_items): if kmi.idname == idname: return i return -1 @staticmethod def equal(kmi, event, pressed_keys=[]): """Test if event corresponds to the given keymap item""" modifier_match = (kmi.key_modifier == 'NONE') or (kmi.key_modifier in pressed_keys) modifier_match &= kmi.any or ((kmi.alt == event.alt) and (kmi.ctrl == event.ctrl) and (kmi.shift == event.shift) and (kmi.oskey == event.oskey)) return ((kmi.type == event.type) and (kmi.value == event.value) and modifier_match) @staticmethod def clear(ko): if isinstance(ko, bpy.types.KeyMap): ko = ko.keymap_items elif isinstance(ko, bpy.types.KeyConfig): ko = ko.keymaps elif isinstance(ko, bpy.types.WindowManager): ko = ko.keyconfigs while len(ko) != 0: ko.remove(ko[0]) @staticmethod def serialize(ko): if isinstance(ko, bpy.types.KeyMapItem): kmi = ko # also: kmi.map_type ? (seems that it's purely derivative) return dict(idname=kmi.idname, propvalue=kmi.propvalue, type=kmi.type, value=kmi.value, any=kmi.any, shift=kmi.shift, ctrl=kmi.ctrl, alt=kmi.alt, oskey=kmi.oskey, key_modifier=kmi.key_modifier, active=kmi.active, show_expanded=kmi.show_expanded, id=kmi.id, properties=BlRna.serialize(kmi.properties, ignore_default=True)) elif isinstance(ko, bpy.types.KeyMap): km = ko return dict(name=km.name, space_type=km.space_type, region_type=km.region_type, is_modal=km.is_modal, is_user_modified=km.is_user_modified, show_expanded_children=km.show_expanded_children, keymap_items=[KeyMapUtils.serialize(kmi) for kmi in km.keymap_items]) elif isinstance(ko, bpy.types.KeyConfig): kc = ko return dict(name=kc.name, keymaps=[KeyMapUtils.serialize(km) for km in kc.keymaps]) @staticmethod def deserialize(ko, data, head=False): # keymap_items / keymaps / keyconfigs are reported as just "bpy_prop_collection" type if isinstance(ko, bpy.types.KeyMap): if ko.is_modal: kmi = ko.keymap_items.new_modal(data["propvalue"], data["type"], data["value"], any=data.get("any", False), shift=data.get("shift", False), ctrl=data.get("ctrl", False), alt=data.get("alt", False), oskey=data.get("oskey", False), key_modifier=data.get("key_modifier", 'NONE')) else: kmi = ko.keymap_items.new(data["idname"], data["type"], data["value"], any=data.get("any", False), shift=data.get("shift", False), ctrl=data.get("ctrl", False), alt=data.get("alt", False), oskey=data.get("oskey", False), key_modifier=data.get("key_modifier", 'NONE'), head=head) kmi.active = data.get("active", True) kmi.show_expanded = data.get("show_expanded", False) BlRna.deserialize(kmi.properties, data.get("properties", {}), suppress_errors=True) elif isinstance(ko, bpy.types.KeyConfig): # Note: for different modes, different space_type are required! # e.g. 'VIEW_3D' for "3D View", and 'EMPTY' for "Sculpt" km = ko.keymaps.new(data["name"], space_type=data.get("space_type", 'EMPTY'), region_type=data.get("region_type", 'WINDOW'), modal=data.get("is_modal", False)) km.is_user_modified = data.get("is_user_modified", False) km.show_expanded_children = data.get("show_expanded_children", False) for kmi_data in data.get("keymap_items", []): KeyMapUtils.deserialize(km, kmi_data) elif isinstance(ko, bpy.types.WindowManager): kc = ko.keyconfigs.new(data["name"]) for km_data in data.get("keymaps", []): KeyMapUtils.deserialize(kc, km_data) @staticmethod def insert(km, kmi_datas): if not kmi_datas: return km_items = [KeyMapUtils.serialize(kmi) for kmi in km.keymap_items] def insertion_index(idnames, to_end): if "*" in idnames: return (len(km_items)-1 if to_end else 0) for i, kmi_data in (reverse_enumerate(km_items) if to_end else enumerate(km_items)): if kmi_data["idname"] in idnames: return i return None src_count = len(km.keymap_items) only_append = True for after, kmi_data, before in kmi_datas: i_after = (insertion_index(after, True) if after else None) i_before = (insertion_index(before, False) if before else None) if (i_before is None) and (i_after is None): i = len(km_items) elif i_before is None: i = i_after+1 elif i_after is None: i = i_before else: i = (i_after+1 if "*" not in after else i_before) only_append &= (i >= src_count) km_items.insert(i, kmi_data) if only_append: for kmi_data in km_items[src_count:]: KeyMapUtils.deserialize(km, kmi_data) else: KeyMapUtils.clear(km) for kmi_data in km_items: KeyMapUtils.deserialize(km, kmi_data)
ec9d306935b9d6e779748f27758791152ace2ff5
581b8d28255229ce8c08c679ad1f169ec33dccc8
/tests/test_adjdictbqm.py
bebba45ad99b789da7e46a03dd1e0748b981877f
[ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
arcondello/dimod
6e3381299dd08e8aa01af2b95350ef92a9504ef9
9af460dc0f9028a1292cf786063c278cae3c0c80
refs/heads/master
2023-08-20T18:57:06.730554
2020-10-08T02:56:36
2020-10-08T02:56:36
115,138,524
1
0
Apache-2.0
2022-04-21T16:23:24
2017-12-22T17:55:46
Python
UTF-8
Python
false
false
1,444
py
# Copyright 2019 D-Wave Systems Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ============================================================================= import unittest import numpy as np from dimod.bqm.adjdictbqm import AdjDictBQM class TestObjectDtype(unittest.TestCase): # AdjDictBQM has an object dtype so it has some special cases that need # to be tested def test_dtypes_array_like_ints(self): # these should stay as python ints obj = [[0, 1], [1, 2]] bqm = AdjDictBQM(obj, 'BINARY') for _, bias in bqm.quadratic.items(): self.assertIsInstance(bias, int) def test_dtypes_ndarray_ints(self): # these should stay as python ints obj = np.asarray([[0, 1], [1, 2]], dtype=np.int32) bqm = AdjDictBQM(obj, 'BINARY') for _, bias in bqm.quadratic.items(): self.assertIsInstance(bias, np.int32)
e23f0cb5a1eb50017a36c14d298d189291529d76
f704ae02dae34b34f668b475f0355970a20694c3
/python/planetlabAPI.py
ac8cad68dcc843e530443b72bf8fd2d2b8dd3da2
[]
no_license
JustinCappos/stork
18575009d893db75681e8e0efa21db083576d133
3299ea959bad6d145522e8f84df65df5e76fbd81
refs/heads/master
2021-01-13T14:18:46.267743
2015-04-15T15:37:14
2015-04-15T15:37:14
34,002,712
1
1
null
null
null
null
UTF-8
Python
false
false
8,986
py
# /usr/bin/env python """ <Program Name> planetlabAPI.py <Started> September 16, 2007 <Author> Justin Cappos <Purpose> Wraps all of the PLC authentication calls. Hopefully I can make this less of a mess than the old code for this. Detailed information about args, the API itself, etc. is at: http://www.planet-lab.org/doc/plc_api and http://www.planet-lab.org/doc/plcapitut """ import arizonaconfig import xmlrpclib # Only need this for exception handling import socket # [option, long option, variable, action, data, default, metavar, description] """arizonaconfig options=[["", "--PLauthtype", "PLauthtype", "store", "string","https://", None, "The protocol used for auth (default https://)"], ["", "--PLauthsite", "PLauthsite", "store", "string","www.planet-lab.org", None, "The authentication web site (default www.planet-lab.org)"], ["", "--PLauthport", "PLauthport", "store", "int", 443, None, "The port to use for authentication (default 443)"], ["", "--PLauthpath", "PLauthpath", "store", "string", "PLCAPI", None, "The path to use for authentication (default PLCAPI)"], ["", "--PLusername", "PLusername", "store", "string", None, None, "The username for PLC actions"], ["", "--PLpassword", "PLpassword", "store", "string", None, None, "The password for PLC actions"]] includes=[] """ PLlogindata = {} def PlanetLablogout(): """ <Purpose> Unsets the global PLlogindata for PLC. <Arguments> None <Exceptions> None <Side Effects> Unsets the PLlogindata. <Returns> None """ global PLlogindata PLlogindata = {} def PlanetLablogin(username = None, password = None, authtype = None, authsite = None, authport = None, authpath = None): """ <Purpose> Try to login to a PLC. <Arguments> username: A string with the username to try password: A string with the password to try authtype: The first part of the URL; the type of protocol authsite: The DNS name (or IP) where the data should be sent authport: The TCP port where data should be sent authpath: The path on the server (i.e. the path in the URL) <Exceptions> ValueError: This is thrown for most types of problems (bad username, bad password, etc.). I don't intentially try to pass other exceptions through. I'm not sure what xmlrpclib will raise. <Side Effects> Sets PLlogindata. This is implicitly used throughout. <Returns> None (Exception thrown on failure) """ global PLlogindata if username == None: username = arizonaconfig.get_option("PLusername") if not username: raise ValueError, "Must set PlanetLab username (PLusername)" if password == None: password = arizonaconfig.get_option("PLpassword") if not password: raise ValueError, "Must set PlanetLab password (PLpassword)" if password == "prompt": password = raw_input("password:") if not password: raise ValueError, "Must set PlanetLab password (PLpassword)" if authtype == None: authtype = arizonaconfig.get_option("PLauthtype") if authsite == None: authsite = arizonaconfig.get_option("PLauthsite") if authport == None: authport = arizonaconfig.get_option("PLauthport") if authpath == None: authpath = arizonaconfig.get_option("PLauthpath") # Build the authorization dict PLlogindata['auth'] = { 'Username': username, 'AuthMethod': "password", 'AuthString': password, 'Role': 'user'} myurl = authtype+authsite+":"+str(authport)+"/"+authpath+"/" try: PLlogindata['server'] = xmlrpclib.Server(myurl, verbose = 0, allow_none=True) except IOError, errormessage: # perhaps the protocol is wrong? raise ValueError, errormessage try: PLlogindata['server'].AdmAuthCheck(PLlogindata['auth']) except xmlrpclib.Fault, errormessage: errormessagestring = str(errormessage) if errormessagestring.startswith("<Fault 103: ':") and errormessagestring.endswith("'>"): raise ValueError, errormessagestring[14:-2] else: raise except socket.gaierror, errormessage: # if I can't resolve the name of the website (for example) errormessagestring = str(errormessage) if errormessagestring.startswith("(7, '") and errormessagestring.endswith("')"): raise ValueError, errormessagestring[5:-2] elif errormessagestring.startswith("(-2, '") and errormessagestring.endswith("')"): raise ValueError, errormessagestring[6:-2] else: raise except socket.error, errormessage: # Connection error, etc. errormessagestring = str(errormessage) if errormessagestring.startswith("(61, '") and errormessagestring.endswith("')"): raise ValueError, errormessagestring[6:-2] elif errormessagestring.startswith("(113, '") and errormessagestring.endswith("')"): raise ValueError, errormessagestring[7:-2] elif errormessagestring.startswith("(110, '") and errormessagestring.endswith("')"): raise ValueError, errormessagestring[7:-2] else: raise except xmlrpclib.ProtocolError, errormessage: # bad path raise ValueError, errormessage def doplccall(commandname,*args): """ <Purpose> Perform a PLC call with *args and return the return data. THIS FUNCTION USES EVAL IN A NON-SAFE WAY (assuming malicious input)!!! <Arguments> *args: The arguments for the call <Exceptions> ValueError: I try to throw this for most types of problems (bad PLlogindata, etc.). I don't intentially try to pass other exceptions through. I'm not sure what xmlrpclib will raise. <Side Effects> Contacts PLC and may change the site state. <Returns> Depends on the calling function """ if not PLlogindata: raise ValueError, "Non-existant PLserver authentication (must log in first)" arglist = ["PLlogindata['auth']"] for arg in args: arglist.append(repr(arg)) try: retval = eval("PLlogindata['server']."+commandname+"(" + ",".join(arglist) + ")") except xmlrpclib.Fault, errormessage: errormessagestring = str(errormessage) if errormessagestring.startswith("<Fault 100: '") and errormessagestring.endswith("'>"): raise ValueError, errormessagestring[13:-2] elif errormessagestring.startswith("<Fault 102: '") and errormessagestring.endswith("'>"): raise ValueError, errormessagestring[13:-2] else: raise except xmlrpclib.ProtocolError, errormessage: errormessagestring = str(errormessage) # xmlrpclib.ProtocolError: <ProtocolError for www.planet-lab.org:443/PLCAPI/: 500 Internal Server Error> # I'll just raise this instead of trying to parse raise ValueError, errormessagestring except socket.error, errormessage: # socket.error: (110, 'Connection timed out') raise ValueError, "socket.error"+str(errormessage) return retval def getUserData(): """ <Purpose> Perform a PLC call with *args and return the return data. THIS FUNCTION USES EVAL IN A NON-SAFE WAY (assuming malicious input)!!! <Arguments> *args: The arguments for the call <Exceptions> ValueError: I try to throw this for most types of problems (bad PLlogindata, etc.). I don't intentially try to pass other exceptions through. I'm not sure what xmlrpclib will raise. <Side Effects> Contacts PLC and may change the site state. <Returns> Depends on the calling function """ global PLlogindata if not PLlogindata: raise ValueError, "Non-existant PLserver authentication (must log in first)" try: retval = PLlogindata['server'].AdmGetPersons(PLlogindata['auth'], [PLlogindata['auth']['Username']], ['first_name', 'last_name']) except xmlrpclib.Fault, errormessage: errormessagestring = str(errormessage) if errormessagestring.startswith("<Fault 100: '") and errormessagestring.endswith("'>"): raise ValueError, errormessagestring[13:-2] elif errormessagestring.startswith("<Fault 102: '") and errormessagestring.endswith("'>"): raise ValueError, errormessagestring[13:-2] else: raise except xmlrpclib.ProtocolError, errormessage: errormessagestring = str(errormessage) # xmlrpclib.ProtocolError: <ProtocolError for www.planet-lab.org:443/PLCAPI/: 500 Internal Server Error> # I'll just raise this instead of trying to parse raise ValueError, errormessagestring except socket.error, errormessage: # socket.error: (110, 'Connection timed out') raise ValueError, "socket.error"+str(errormessagestring) return retval
5056b4eb34025036efed2f77739e4022a917041f
4ca8382d8e0bc00f4cd440be63d7d64fbbd033c0
/test_site/mysite/news/migrations/0006_auto_20210608_1502.py
c4c425c07c24e3f981fdecb4725908c322ffb1cd
[]
no_license
KarinaYatskevich/python
8ea0b8273399a313f161f5b4ce5e62e421109be1
b889532fe6dbf7f953b250c923584e910bc70b21
refs/heads/master
2023-08-28T14:52:27.298276
2021-10-22T19:27:29
2021-10-22T19:27:29
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,095
py
# Generated by Django 3.2.3 on 2021-06-08 12:02 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('news', '0005_alter_news_category'), ] operations = [ migrations.RenameField( model_name='news', old_name='content', new_name='abstract', ), migrations.AddField( model_name='news', name='URI', field=models.URLField(default='default'), preserve_default=False, ), migrations.AddField( model_name='news', name='abstract_in_another_language', field=models.TextField(blank=True), ), migrations.AddField( model_name='news', name='another_title', field=models.CharField(default='default ', max_length=150), preserve_default=False, ), migrations.AddField( model_name='news', name='bibliographic_entry', field=models.TextField(blank=True), ), ]
2cc8b618cd4e35fd10fa646f358e5c93a80a1fa9
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
/sdk/python/pulumi_azure_native/machinelearningexperimentation/get_account.py
aa9a9943ca410746d33829d222c0140cfd266fd4
[ "BSD-3-Clause", "Apache-2.0" ]
permissive
bpkgoud/pulumi-azure-native
0817502630062efbc35134410c4a784b61a4736d
a3215fe1b87fba69294f248017b1591767c2b96c
refs/heads/master
2023-08-29T22:39:49.984212
2021-11-15T12:43:41
2021-11-15T12:43:41
null
0
0
null
null
null
null
UTF-8
Python
false
false
9,987
py
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs __all__ = [ 'GetAccountResult', 'AwaitableGetAccountResult', 'get_account', 'get_account_output', ] @pulumi.output_type class GetAccountResult: """ An object that represents a machine learning team account. """ def __init__(__self__, account_id=None, creation_date=None, description=None, discovery_uri=None, friendly_name=None, id=None, key_vault_id=None, location=None, name=None, provisioning_state=None, seats=None, storage_account=None, tags=None, type=None, vso_account_id=None): if account_id and not isinstance(account_id, str): raise TypeError("Expected argument 'account_id' to be a str") pulumi.set(__self__, "account_id", account_id) if creation_date and not isinstance(creation_date, str): raise TypeError("Expected argument 'creation_date' to be a str") pulumi.set(__self__, "creation_date", creation_date) if description and not isinstance(description, str): raise TypeError("Expected argument 'description' to be a str") pulumi.set(__self__, "description", description) if discovery_uri and not isinstance(discovery_uri, str): raise TypeError("Expected argument 'discovery_uri' to be a str") pulumi.set(__self__, "discovery_uri", discovery_uri) if friendly_name and not isinstance(friendly_name, str): raise TypeError("Expected argument 'friendly_name' to be a str") pulumi.set(__self__, "friendly_name", friendly_name) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if key_vault_id and not isinstance(key_vault_id, str): raise TypeError("Expected argument 'key_vault_id' to be a str") pulumi.set(__self__, "key_vault_id", key_vault_id) if location and not isinstance(location, str): raise TypeError("Expected argument 'location' to be a str") pulumi.set(__self__, "location", location) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if provisioning_state and not isinstance(provisioning_state, str): raise TypeError("Expected argument 'provisioning_state' to be a str") pulumi.set(__self__, "provisioning_state", provisioning_state) if seats and not isinstance(seats, str): raise TypeError("Expected argument 'seats' to be a str") pulumi.set(__self__, "seats", seats) if storage_account and not isinstance(storage_account, dict): raise TypeError("Expected argument 'storage_account' to be a dict") pulumi.set(__self__, "storage_account", storage_account) if tags and not isinstance(tags, dict): raise TypeError("Expected argument 'tags' to be a dict") pulumi.set(__self__, "tags", tags) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) if vso_account_id and not isinstance(vso_account_id, str): raise TypeError("Expected argument 'vso_account_id' to be a str") pulumi.set(__self__, "vso_account_id", vso_account_id) @property @pulumi.getter(name="accountId") def account_id(self) -> str: """ The immutable id associated with this team account. """ return pulumi.get(self, "account_id") @property @pulumi.getter(name="creationDate") def creation_date(self) -> str: """ The creation date of the machine learning team account in ISO8601 format. """ return pulumi.get(self, "creation_date") @property @pulumi.getter def description(self) -> Optional[str]: """ The description of this workspace. """ return pulumi.get(self, "description") @property @pulumi.getter(name="discoveryUri") def discovery_uri(self) -> str: """ The uri for this machine learning team account. """ return pulumi.get(self, "discovery_uri") @property @pulumi.getter(name="friendlyName") def friendly_name(self) -> Optional[str]: """ The friendly name for this workspace. This will be the workspace name in the arm id when the workspace object gets created """ return pulumi.get(self, "friendly_name") @property @pulumi.getter def id(self) -> str: """ The resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter(name="keyVaultId") def key_vault_id(self) -> str: """ The fully qualified arm id of the user key vault. """ return pulumi.get(self, "key_vault_id") @property @pulumi.getter def location(self) -> str: """ The location of the resource. This cannot be changed after the resource is created. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> str: """ The name of the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ The current deployment state of team account resource. The provisioningState is to indicate states for resource provisioning. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def seats(self) -> Optional[str]: """ The no of users/seats who can access this team account. This property defines the charge on the team account. """ return pulumi.get(self, "seats") @property @pulumi.getter(name="storageAccount") def storage_account(self) -> 'outputs.StorageAccountPropertiesResponse': """ The properties of the storage account for the machine learning team account. """ return pulumi.get(self, "storage_account") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ The tags of the resource. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> str: """ The type of the resource. """ return pulumi.get(self, "type") @property @pulumi.getter(name="vsoAccountId") def vso_account_id(self) -> str: """ The fully qualified arm id of the vso account to be used for this team account. """ return pulumi.get(self, "vso_account_id") class AwaitableGetAccountResult(GetAccountResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetAccountResult( account_id=self.account_id, creation_date=self.creation_date, description=self.description, discovery_uri=self.discovery_uri, friendly_name=self.friendly_name, id=self.id, key_vault_id=self.key_vault_id, location=self.location, name=self.name, provisioning_state=self.provisioning_state, seats=self.seats, storage_account=self.storage_account, tags=self.tags, type=self.type, vso_account_id=self.vso_account_id) def get_account(account_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountResult: """ An object that represents a machine learning team account. API Version: 2017-05-01-preview. :param str account_name: The name of the machine learning team account. :param str resource_group_name: The name of the resource group to which the machine learning team account belongs. """ __args__ = dict() __args__['accountName'] = account_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:machinelearningexperimentation:getAccount', __args__, opts=opts, typ=GetAccountResult).value return AwaitableGetAccountResult( account_id=__ret__.account_id, creation_date=__ret__.creation_date, description=__ret__.description, discovery_uri=__ret__.discovery_uri, friendly_name=__ret__.friendly_name, id=__ret__.id, key_vault_id=__ret__.key_vault_id, location=__ret__.location, name=__ret__.name, provisioning_state=__ret__.provisioning_state, seats=__ret__.seats, storage_account=__ret__.storage_account, tags=__ret__.tags, type=__ret__.type, vso_account_id=__ret__.vso_account_id) @_utilities.lift_output_func(get_account) def get_account_output(account_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAccountResult]: """ An object that represents a machine learning team account. API Version: 2017-05-01-preview. :param str account_name: The name of the machine learning team account. :param str resource_group_name: The name of the resource group to which the machine learning team account belongs. """ ...
940287a3c1503fd4beefd16acea158f5422f72d3
633ab8880dc367feefdb6ef565ed0e70a4094bc1
/10001-11000/10989.py
dac6c76fc02397c18eca1041d39086789a6943ec
[]
no_license
winston1214/baekjoon
2e9740ee2824d7777f6e64d50087b5c040baf2c6
20125255cd5b359023a6297f3761b2db1057d67d
refs/heads/master
2023-03-04T09:07:27.688072
2021-02-16T13:51:49
2021-02-16T13:51:49
284,832,623
3
1
null
null
null
null
UTF-8
Python
false
false
273
py
# @Author YoungMinKim # baekjoon import sys N=int(sys.stdin.readline()) result = [0]*10001 for _ in range(N): a=int(sys.stdin.readline()) result[a] = result[a]+1 for i in range(10001): if result[i] != 0: for j in range(result[i]): print(i)
8807631112c2be71c1b5d45755803ffd6af7db0f
8606e128484a4cc1fc4e7b406817a7ea96b55c8b
/src/run.py
fbb1ff4541fb7f17ac4c7648ff7c86c21ecf6c22
[ "CC0-1.0" ]
permissive
ytyaru/Python.PySimpleGuiWeb.SetUp.20210618110027
a3eba2e262cbf40a3e26e854c849e604244aeb99
9b5163d7eb07ff8efae5de91bd21d02a87455a39
refs/heads/master
2023-06-07T21:16:56.862107
2021-06-18T06:38:15
2021-06-18T06:38:15
378,022,064
0
0
null
null
null
null
UTF-8
Python
false
false
894
py
#!/usr/bin/env python3 # coding: utf8 import PySimpleGUIWeb as sg print(dir(sg)) print(sg.theme_list()) sg.theme('DarkGreen') layout = [ [sg.Text('PySimpleGUIWeb テスト')], [sg.Text('名前', size=(15, 1)), sg.InputText('山田太郎')], [sg.Text('年齢', size=(15, 1)), sg.Spin(None, initial_value=20)], [sg.Text('趣味', size=(15, 1)), sg.Combo(['料理','読書','映画'])], [sg.Submit(button_text='実行')] ] window = sg.Window('PySimpleGUIWeb テスト', layout) while True: event, values = window.read() if event is None: print('exit') break if event == '実行': show_message = "名前:" + values[0] + '\n' show_message += "年齢:" + values[1] + '\n' show_message += "趣味:" + values[2] + 'が入力されました。' print(show_message) sg.popup(show_message) window.close()
17739c606e36fb190c627f7507e332546d8a1ae7
1577e1cf4e89584a125cffb855ca50a9654c6d55
/pyobjc/pyobjc/pyobjc-core-2.5.1/PyObjCTest/test_bridges.py
e803ab31cb684dd435a09d66a77d2d80598b1a77
[ "MIT" ]
permissive
apple-open-source/macos
a4188b5c2ef113d90281d03cd1b14e5ee52ebffb
2d2b15f13487673de33297e49f00ef94af743a9a
refs/heads/master
2023-08-01T11:03:26.870408
2023-03-27T00:00:00
2023-03-27T00:00:00
180,595,052
124
24
null
2022-12-27T14:54:09
2019-04-10T14:06:23
null
UTF-8
Python
false
false
2,106
py
from PyObjCTools.TestSupport import * from PyObjCTest.testbndl import OC_TestClass2 import objc import collections import sys if sys.version_info[0] == 2: from UserList import UserList from UserDict import IterableUserDict else: from collections import UserDict as IterableUserDict, UserList NSMutableArray = objc.lookUpClass("NSMutableArray") NSMutableDictionary = objc.lookUpClass("NSMutableDictionary") def classOfProxy(value): return OC_TestClass2.classOfObject_(value) class TestBridges (TestCase): # NOTE: the two "register" functions from objc._bridges aren't # tested explictly, but the tests in this class do verify that # the default registrations (which are made through those two # functions) work properly. def test_xrange(self): range_type = range if sys.version_info[0] == 3 else xrange v = range_type(0, 10) self.assertTrue(issubclass(classOfProxy(v), NSMutableArray)) def test_user_collectons(self): # Note: Not "UserDict" because UserDict doesn't implement # __iter__ and hence isn't a collections.Mapping, and doesn't # implement enough API to implement the NSDictionary interface. v = IterableUserDict() self.assertTrue(issubclass(classOfProxy(v), NSMutableDictionary)) v = UserList() self.assertTrue(issubclass(classOfProxy(v), NSMutableArray)) def test_abc(self): class MySequence (collections.Sequence): def __getitem__(self, idx): raise IndexError(idx) def __len__(self): return 0 class MyDictionary (collections.Mapping): def __getitem__(self, key): raise KeyError(key) def __len__(self): return 0 def __iter__(self): return yield v = MyDictionary() self.assertTrue(issubclass(classOfProxy(v), NSMutableDictionary)) v = MySequence() self.assertTrue(issubclass(classOfProxy(v), NSMutableArray)) if __name__ == "__main__": main()
90e7ecdd1b7ff7bb7d60e0fd4a70f7234fb44cf0
f000fa4e6ef1de9591eeabff43ba57b7bf32561d
/tests/common/test_retryutils.py
878419ecfa97affa641c0bd768d0921c7056692b
[]
no_license
VictorDenisov/ceph-lcm
1aca07f2d17bfda8760d192ffd6d17645705b6e4
3cfd9ced6879fca1c39039e195d22d897ddcde80
refs/heads/master
2021-01-15T09:19:23.723613
2016-09-17T01:18:45
2016-09-17T01:18:45
68,424,913
0
0
null
2016-09-17T01:17:36
2016-09-17T01:17:36
null
UTF-8
Python
false
false
3,106
py
# -*- coding: utf-8 -*- """Tests for cephlcm.common.retryutils""" import unittest.mock import pymongo.errors import pytest from cephlcm.common import retryutils @pytest.fixture def func_always_fails(): func = unittest.mock.MagicMock() func.__name__ = "" func.side_effect = Exception return func @pytest.fixture def func_always_passed(): return unittest.mock.MagicMock() @pytest.fixture def func_pass_fail(): func = unittest.mock.MagicMock() func.__name__ = "" func.side_effect = [Exception(), True] return func @pytest.mark.parametrize("attempts, attempt", ( (0, 0), (1, 0), (1, 3), )) def test_exp_sleep_time_fails(attempts, attempt): with pytest.raises(ValueError): retryutils.exp_sleep_time(1, 10, attempts, attempt) def test_exp_sleep_time(): assert retryutils.exp_sleep_time(1, 10, 100, 1) == 1 assert retryutils.exp_sleep_time(1, 10, 100, 100) == 10 values = [ retryutils.exp_sleep_time(1, 10, 10, num) for num in range(1, 11)] for idx, less in enumerate(values, start=1): for more in values[idx:]: assert less <= more def test_simple_retry_ok(func_always_passed, func_pass_fail): for func in func_always_passed, func_pass_fail: retryutils.simple_retry()(func)() def test_simple_retry_fail(func_always_fails): with pytest.raises(Exception): retryutils.simple_retry()(func_always_fails)() def test_sleep_retry_ok_always(func_always_passed, no_sleep): retryutils.sleep_retry()(func_always_passed)() no_sleep.assert_not_called() def test_sleep_retry_ok_failed_once(func_pass_fail, no_sleep): retryutils.sleep_retry()(func_pass_fail)() assert len(no_sleep.mock_calls) == 1 def test_sleep_retry_fail(func_always_fails, no_sleep): with pytest.raises(Exception): retryutils.sleep_retry()(func_always_fails)() assert len(no_sleep.mock_calls) == 5 - 1 @pytest.mark.parametrize("exc", ( pymongo.errors.AutoReconnect, pymongo.errors.ConnectionFailure, pymongo.errors.ExecutionTimeout, pymongo.errors.CursorNotFound, pymongo.errors.ExceededMaxWaiters, pymongo.errors.NetworkTimeout, pymongo.errors.NotMasterError, pymongo.errors.ServerSelectionTimeoutError )) def test_mongo_retry_ok(exc, func_pass_fail, no_sleep): func_pass_fail.side_effect = [exc(""), True] retryutils.mongo_retry()(func_pass_fail)() @pytest.mark.parametrize("exc", ( pymongo.errors.PyMongoError, pymongo.errors.ConfigurationError, pymongo.errors.OperationFailure, pymongo.errors.WriteConcernError, pymongo.errors.WriteError, pymongo.errors.WTimeoutError, pymongo.errors.DuplicateKeyError, pymongo.errors.BulkWriteError, pymongo.errors.InvalidOperation, pymongo.errors.BSONError, pymongo.errors.InvalidName, pymongo.errors.InvalidURI, pymongo.errors.DocumentTooLarge )) def test_mongo_retry_fail(exc, func_pass_fail, no_sleep): func_pass_fail.side_effect = [exc(""), True] with pytest.raises(exc): retryutils.mongo_retry()(func_pass_fail)()
2a4ab14fe77c86874630bbd75a3bd0aa9c75fbb6
291fe7fb4cc5b682e560b0c5958e2220054451c6
/Big48/클래스연습1.py
c1975edcbe42a3c2942626d1924915e230b33b94
[]
no_license
MinksChung/BigdataCourse
44dc5e7e578515e1dafbb7870911e09347a788f4
293803415da5d9f354059ea556818cc7610f36a5
refs/heads/master
2022-12-22T06:14:59.880933
2020-01-26T14:58:09
2020-01-26T14:58:09
202,575,724
0
0
null
2022-12-15T23:28:43
2019-08-15T16:29:35
Python
UTF-8
Python
false
false
1,357
py
# 파이썬은 '함수' 중심의 언어(모듈 중심). 모듈과 클래스는 별개 ## 클래스 생성 class Dog: # 클래스명은 대문자로 사용. ( ) 없이 :만 사용 ## 멤버 변수 # 변수선언 color = "" # 색 field = "" # 견종 ## 생성자 함수(init) (생성자는 함수? (o), 객체를 생성할 때 자동으로 호출되는 함수를 생성자라고 한다) def __init__(self): # 클래스안에 있는 생성자라는 표시로 변수명이 self 로 자동완성 됨 print("내가 마 생성자다 마. 내가 마 느그 객체 생성도 하고 마 그 때 호출도 하고 마 다했어 마.") ## 멤버 함수 def jump(self): print("강아지가 뛰고 있다.") def sleep(self): print("강아지가 자고 있다.") ## 출력용 함수(str) (자바의 toString() 역할) def __str__(self): # 이 클래스 내의 변수를 사용하기 위해 self.변수명 으로 사용 return self.color + ", " + self.field ## 객체 생성 dog1 = Dog() dog1.color = "빨간색" dog1.field = "토이푸들" print(dog1) dog1.jump() print("------------------------------------------------------") ## 객체 생성 dog2 = Dog() dog2.color = "까만색" dog2.field = "닥스훈트" print(dog2) dog2.sleep()
5ef9ea6662f4ffcc844776800d65dfb1c07daa47
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
/Gauss_v45r9/Gen/DecFiles/options/13102432.py
ef19e6f2fb686eeee810105d86b362e257cc42be
[]
no_license
Sally27/backup_cmtuser_full
34782102ed23c6335c48650a6eaa901137355d00
8924bebb935b96d438ce85b384cfc132d9af90f6
refs/heads/master
2020-05-21T09:27:04.370765
2018-12-12T14:41:07
2018-12-12T14:41:07
185,989,173
0
0
null
null
null
null
UTF-8
Python
false
false
1,832
py
# file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/13102432.py generated: Fri, 27 Mar 2015 16:09:59 # # Event Type: 13102432 # # ASCII decay Descriptor: {[[B_s0]nos -> pi+ pi- (pi0 -> gamma gamma)]cc, [[B_s0]os -> pi- pi+ (pi0 -> gamma gamma)]cc} # from Configurables import Generation Generation().EventType = 13102432 Generation().SampleGenerationTool = "SignalRepeatedHadronization" from Configurables import SignalRepeatedHadronization Generation().addTool( SignalRepeatedHadronization ) Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction" from Configurables import ToolSvc from Configurables import EvtGenDecay ToolSvc().addTool( EvtGenDecay ) ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bs_pi+pi-pi0=DecProdCut,sqDalitz.dec" Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb" Generation().SignalRepeatedHadronization.SignalPIDList = [ 531,-531 ] # Ad-hoc particle gun code from Configurables import ParticleGun pgun = ParticleGun("ParticleGun") pgun.SignalPdgCode = 531 pgun.DecayTool = "EvtGenDecay" pgun.GenCutTool = "DaughtersInLHCb" from Configurables import FlatNParticles pgun.NumberOfParticlesTool = "FlatNParticles" pgun.addTool( FlatNParticles , name = "FlatNParticles" ) from Configurables import MomentumSpectrum pgun.ParticleGunTool = "MomentumSpectrum" pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" ) pgun.MomentumSpectrum.PdgCodes = [ 531,-531 ] pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_531.root" pgun.MomentumSpectrum.BinningVariables = "pteta" pgun.MomentumSpectrum.HistogramPath = "h_pteta" from Configurables import BeamSpotSmearVertex pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex") pgun.VertexSmearingTool = "BeamSpotSmearVertex" pgun.EventType = 13102432
9aaa01fae066eb8b73fc2e88e7d5553d38668bed
7341a4f317639eed3c6868310c7421e8eb7016ce
/usados/users/admin.py
b8baf198dd2a857373c88643faac6d26fb6227d9
[]
no_license
fabianfalon/drf-boilerplate
f637fb66f7dd260e4a3b9c9daf6217ad99a90765
d24fd3a8de653f9731d41781e1e4e207881fbbb2
refs/heads/master
2020-05-19T06:05:12.629587
2019-11-12T18:44:15
2019-11-12T18:44:15
184,864,871
0
0
null
2019-10-22T06:26:39
2019-05-04T07:25:09
Python
UTF-8
Python
false
false
614
py
"""User models admin.""" # Django from django.contrib import admin from django.contrib.auth.admin import UserAdmin # Models from .models import Profile, User class CustomUserAdmin(UserAdmin): """User model admin.""" list_display = ('email', 'username', 'first_name', 'last_name', 'is_staff',) @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): """Profile model admin.""" list_display = ('user', 'dni', 'address', 'publications_numbers', 'birthdate') search_fields = ('user__username', 'user__email', ) list_filter = ('dni',) admin.site.register(User, CustomUserAdmin)
ca4ae6b51f677ae0b957d2c9e6b32f94cde2244b
3d2e5d1092acccfb73c07d68b6beeffc44b3f776
/planet/src/utils/plotting.py
cda9af3d36ef802034023f45d54fcd336fb5a5c4
[]
no_license
MatthijsBiondina/WorldModels
f6cbcfe5349da7119329ef10831810d1b85c9d02
ab468f1aa978e3aa4e05174db24922085d1e33b1
refs/heads/master
2022-12-22T11:54:46.040828
2020-09-23T11:41:48
2020-09-23T11:41:48
248,212,491
0
0
null
null
null
null
UTF-8
Python
false
false
2,336
py
import json import os from bokeh.plotting import output_file, figure, save from bokeh.layouts import gridplot import src.utils.config as cfg def save_metrics(metrics: dict, save_loc: str): with open(os.path.join(save_loc, 'metrics.json'), 'w+') as f: json.dump(metrics, f, indent=2) output_file(os.path.join(save_loc, 'plt.html'), title=save_loc.split('/')[-1]) s_top = figure(width=720, height=360, title="Performance", x_axis_label='episodes', y_axis_label='reward') s_top.line(not_none(metrics['episodes'], metrics['rewards']), not_none(metrics['rewards']), legend_label="With Action Noise", line_color="orchid", line_width=3, line_alpha=0.66) s_top.line(not_none(metrics['episodes'], metrics['t_scores']), not_none(metrics['t_scores']), legend_label="Without Action Noise", line_color="royalblue", line_width=3, line_alpha=0.66) s_top.legend.location = "bottom_right" s_bot = figure(width=720, height=360, x_range=s_top.x_range, title="Loss Scores", x_axis_label="episode", y_axis_label='loss') s_bot.line(not_none(metrics['episodes'], metrics['o_loss']), not_none(metrics['o_loss']), legend_label="Observation Loss (MSE)", line_color="orchid", line_width=3, line_alpha=0.66) s_bot.line(not_none(metrics['episodes'], metrics['r_loss']), list(map(lambda x: x / cfg.action_repeat, not_none(metrics['r_loss']))), legend_label="Reward Loss (MSE)", line_color="royalblue", line_width=3, line_alpha=0.66) s_bot.line(not_none(metrics['episodes'], metrics['kl_loss']), list(map(lambda x: x / (1 + cfg.overshooting_kl_beta) - cfg.free_nats, not_none(metrics['kl_loss']))), legend_label="Complexity Loss (KL-divergence)", line_color="sienna", line_width=3, line_alpha=0.66) # s_bot.line(not_none(metrics['episodes'], metrics['p_loss']), # list(map(lambda x: x / cfg.action_repeat, not_none(metrics['p_loss']))), # legend_label="Policy Loss (MSE)", line_color="seagreen", line_width=3) p = gridplot([[s_top], [s_bot]]) save(p) pass def not_none(vlist, klist=None): if klist is None: return [x for x in vlist if x is not None] else: return [x for x, k in zip(vlist, klist) if k is not None]
fe0c97d136e57f6216b83ca4c91edb61a8a19ef9
51f7e1a08f9db6dffae33c486381567c6f619d68
/tensorflow/contrib/learn/python/learn/learn_io/graph_io.py
d82ddb5c3b6c776d577b1a65697176de941176ce
[ "Apache-2.0" ]
permissive
jinumohan173/tensorflow
ec33271af31f93a44242bcd34fb6f6549290cc41
c967084e6af90b560b47435ff4d3292677353bfe
refs/heads/master
2020-03-28T01:56:36.509074
2019-05-16T11:39:04
2019-05-16T11:39:04
147,534,316
0
0
Apache-2.0
2018-09-05T14:48:36
2018-09-05T14:48:36
null
UTF-8
Python
false
false
21,737
py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Methods to read data in the graph.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import io_ops from tensorflow.python.ops import logging_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import input as input_ops from tensorflow.python.training import queue_runner # Default name for key in the feature dict. KEY_FEATURE_NAME = '__key__' def read_batch_examples(file_pattern, batch_size, reader, randomize_input=True, num_epochs=None, queue_capacity=10000, num_threads=1, read_batch_size=1, parse_fn=None, name=None): """Adds operations to read, queue, batch `Example` protos. Given file pattern (or list of files), will setup a queue for file names, read `Example` proto using provided `reader`, use batch queue to create batches of examples of size `batch_size`. All queue runners are added to the queue runners collection, and may be started via `start_queue_runners`. All ops are added to the default graph. Use `parse_fn` if you need to do parsing / processing on single examples. Args: file_pattern: List of files or pattern of file paths containing `Example` records. See `tf.gfile.Glob` for pattern rules. batch_size: An int or scalar `Tensor` specifying the batch size to use. reader: A function or class that returns an object with `read` method, (filename tensor) -> (example tensor). randomize_input: Whether the input should be randomized. num_epochs: Integer specifying the number of times to read through the dataset. If `None`, cycles through the dataset forever. NOTE - If specified, creates a variable that must be initialized, so call `tf.initialize_all_variables()` as shown in the tests. queue_capacity: Capacity for input queue. num_threads: The number of threads enqueuing examples. read_batch_size: An int or scalar `Tensor` specifying the number of records to read at once parse_fn: Parsing function, takes `Example` Tensor returns parsed representation. If `None`, no parsing is done. name: Name of resulting op. Returns: String `Tensor` of batched `Example` proto. Raises: ValueError: for invalid inputs. """ _, examples = read_keyed_batch_examples( file_pattern=file_pattern, batch_size=batch_size, reader=reader, randomize_input=randomize_input, num_epochs=num_epochs, queue_capacity=queue_capacity, num_threads=num_threads, read_batch_size=read_batch_size, parse_fn=parse_fn, name=name) return examples def read_keyed_batch_examples( file_pattern, batch_size, reader, randomize_input=True, num_epochs=None, queue_capacity=10000, num_threads=1, read_batch_size=1, parse_fn=None, name=None): """Adds operations to read, queue, batch `Example` protos. Given file pattern (or list of files), will setup a queue for file names, read `Example` proto using provided `reader`, use batch queue to create batches of examples of size `batch_size`. All queue runners are added to the queue runners collection, and may be started via `start_queue_runners`. All ops are added to the default graph. Use `parse_fn` if you need to do parsing / processing on single examples. Args: file_pattern: List of files or pattern of file paths containing `Example` records. See `tf.gfile.Glob` for pattern rules. batch_size: An int or scalar `Tensor` specifying the batch size to use. reader: A function or class that returns an object with `read` method, (filename tensor) -> (example tensor). randomize_input: Whether the input should be randomized. num_epochs: Integer specifying the number of times to read through the dataset. If `None`, cycles through the dataset forever. NOTE - If specified, creates a variable that must be initialized, so call `tf.initialize_all_variables()` as shown in the tests. queue_capacity: Capacity for input queue. num_threads: The number of threads enqueuing examples. read_batch_size: An int or scalar `Tensor` specifying the number of records to read at once parse_fn: Parsing function, takes `Example` Tensor returns parsed representation. If `None`, no parsing is done. name: Name of resulting op. Returns: Returns tuple of: - `Tensor` of string keys. - String `Tensor` of batched `Example` proto. Raises: ValueError: for invalid inputs. """ # Retrieve files to read. if isinstance(file_pattern, list): file_names = file_pattern if not file_names: raise ValueError('No files given to dequeue_examples.') else: file_names = list(gfile.Glob(file_pattern)) if not file_names: raise ValueError('No files match %s.' % file_pattern) # Sort files so it will be deterministic for unit tests. They'll be shuffled # in `string_input_producer` if `randomize_input` is enabled. if not randomize_input: file_names = sorted(file_names) # Check input parameters are given and reasonable. if (not queue_capacity) or (queue_capacity <= 0): raise ValueError('Invalid queue_capacity %s.' % queue_capacity) if (batch_size is None) or ( (not isinstance(batch_size, ops.Tensor)) and (batch_size <= 0 or batch_size > queue_capacity)): raise ValueError( 'Invalid batch_size %s, with queue_capacity %s.' % (batch_size, queue_capacity)) if (read_batch_size is None) or ( (not isinstance(read_batch_size, ops.Tensor)) and (read_batch_size <= 0)): raise ValueError('Invalid read_batch_size %s.' % read_batch_size) if (not num_threads) or (num_threads <= 0): raise ValueError('Invalid num_threads %s.' % num_threads) if (num_epochs is not None) and (num_epochs <= 0): raise ValueError('Invalid num_epochs %s.' % num_epochs) with ops.name_scope(name, 'read_batch_examples', [file_pattern]) as scope: # Setup filename queue with shuffling. with ops.name_scope('file_name_queue') as file_name_queue_scope: file_name_queue = input_ops.string_input_producer( constant_op.constant(file_names, name='input'), shuffle=randomize_input, num_epochs=num_epochs, name=file_name_queue_scope) # Create readers, one per thread and set them to read from filename queue. with ops.name_scope('read'): example_list = [] for _ in range(num_threads): if read_batch_size > 1: keys, examples_proto = reader().read_up_to(file_name_queue, read_batch_size) else: keys, examples_proto = reader().read(file_name_queue) if parse_fn: parsed_examples = parse_fn(examples_proto) # Map keys into example map because batch_join doesn't support # tuple of Tensor + dict. if isinstance(parsed_examples, dict): parsed_examples[KEY_FEATURE_NAME] = keys example_list.append(parsed_examples) else: example_list.append((keys, parsed_examples)) else: example_list.append((keys, examples_proto)) enqueue_many = read_batch_size > 1 if num_epochs is not None: allow_smaller_final_batch = True else: allow_smaller_final_batch = False # Setup batching queue given list of read example tensors. if randomize_input: if isinstance(batch_size, ops.Tensor): min_after_dequeue = int(queue_capacity * 0.4) else: min_after_dequeue = max(queue_capacity - (3 * batch_size), batch_size) queued_examples_with_keys = input_ops.shuffle_batch_join( example_list, batch_size, capacity=queue_capacity, min_after_dequeue=min_after_dequeue, enqueue_many=enqueue_many, name=scope, allow_smaller_final_batch=allow_smaller_final_batch) else: queued_examples_with_keys = input_ops.batch_join( example_list, batch_size, capacity=queue_capacity, enqueue_many=enqueue_many, name=scope, allow_smaller_final_batch=allow_smaller_final_batch) if parse_fn and isinstance(queued_examples_with_keys, dict): queued_keys = queued_examples_with_keys.pop(KEY_FEATURE_NAME) return queued_keys, queued_examples_with_keys return queued_examples_with_keys def read_keyed_batch_features(file_pattern, batch_size, features, reader, randomize_input=True, num_epochs=None, queue_capacity=10000, reader_num_threads=1, feature_queue_capacity=100, num_queue_runners=2, parser_num_threads=None, parse_fn=None, name=None): """Adds operations to read, queue, batch and parse `Example` protos. Given file pattern (or list of files), will setup a queue for file names, read `Example` proto using provided `reader`, use batch queue to create batches of examples of size `batch_size` and parse example given `features` specification. All queue runners are added to the queue runners collection, and may be started via `start_queue_runners`. All ops are added to the default graph. Args: file_pattern: List of files or pattern of file paths containing `Example` records. See `tf.gfile.Glob` for pattern rules. batch_size: An int or scalar `Tensor` specifying the batch size to use. features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` values. reader: A function or class that returns an object with `read` method, (filename tensor) -> (example tensor). randomize_input: Whether the input should be randomized. num_epochs: Integer specifying the number of times to read through the dataset. If None, cycles through the dataset forever. NOTE - If specified, creates a variable that must be initialized, so call tf.initialize_local_variables() as shown in the tests. queue_capacity: Capacity for input queue. reader_num_threads: The number of threads to read examples. feature_queue_capacity: Capacity of the parsed features queue. num_queue_runners: Number of queue runners to start for the feature queue, Adding multiple queue runners for the parsed example queue helps maintain a full queue when the subsequent computations overall are cheaper than parsing. parser_num_threads: (Deprecated) The number of threads to parse examples. parse_fn: Parsing function, takes `Example` Tensor returns parsed representation. If `None`, no parsing is done. name: Name of resulting op. Returns: Returns tuple of: - `Tensor` of string keys. - A dict of `Tensor` or `SparseTensor` objects for each in `features`. Raises: ValueError: for invalid inputs. """ if parser_num_threads: # TODO(sibyl-Aix6ihai): Remove on Sept 3 2016. logging.warning('parser_num_threads is deprecated, it will be removed on' 'Sept 3 2016') with ops.name_scope(name, 'read_batch_features', [file_pattern]) as scope: keys, examples = read_keyed_batch_examples( file_pattern, batch_size, reader, randomize_input=randomize_input, num_epochs=num_epochs, queue_capacity=queue_capacity, num_threads=reader_num_threads, read_batch_size=batch_size, parse_fn=parse_fn, name=scope) # Parse the example. feature_map = parsing_ops.parse_example(examples, features) return queue_parsed_features( feature_map, keys=keys, feature_queue_capacity=feature_queue_capacity, num_queue_runners=num_queue_runners, name=scope) def queue_parsed_features(parsed_features, keys=None, feature_queue_capacity=100, num_queue_runners=2, name=None): """Speeds up parsing by using queues to do it asynchronously. This function adds the tensors in `parsed_features` to a queue, which allows the parsing (or any other expensive op before this) to be asynchronous wrt the rest of the training graph. This greatly improves read latency and speeds up training since the data will already be parsed and ready when each step of training needs it. All queue runners are added to the queue runners collection, and may be started via `start_queue_runners`. All ops are added to the default graph. Args: parsed_features: A dict of string key to `Tensor` or `SparseTensor` objects. keys: `Tensor` of string keys. feature_queue_capacity: Capacity of the parsed features queue. num_queue_runners: Number of queue runners to start for the feature queue, Adding multiple queue runners for the parsed example queue helps maintain a full queue when the subsequent computations overall are cheaper than parsing. name: Name of resulting op. Returns: Returns tuple of: - `Tensor` corresponding to `keys` if provided, otherwise `None`. - A dict of string key to `Tensor` or `SparseTensor` objects corresponding to `parsed_features`. """ args = list(parsed_features.values()) if keys is not None: args += [keys] with ops.name_scope(name, 'queue_parsed_features', args): # Lets also add preprocessed tensors into the queue types for each item of # the queue. tensors_to_enqueue = [] # Each entry contains the key, and a boolean which indicates whether the # tensor was a sparse tensor. tensors_mapping = [] # TODO(sibyl-Aix6ihai): Most of the functionality here is about pushing sparse # tensors into a queue. This could be taken care in somewhere else so others # can reuse it. Also, QueueBase maybe extended to handle sparse tensors # directly. for key in sorted(parsed_features.keys()): tensor = parsed_features[key] if isinstance(tensor, ops.SparseTensor): tensors_mapping.append((key, True)) tensors_to_enqueue.extend([tensor.indices, tensor.values, tensor.shape]) else: tensors_mapping.append((key, False)) tensors_to_enqueue.append(tensor) if keys is not None: tensors_to_enqueue.append(keys) queue_dtypes = [x.dtype for x in tensors_to_enqueue] input_queue = data_flow_ops.FIFOQueue(feature_queue_capacity, queue_dtypes) # Add a summary op to debug if our feature queue is full or not. logging_ops.scalar_summary('queue/parsed_features/%s/fraction_of_%d_full' % (input_queue.name, feature_queue_capacity), math_ops.cast(input_queue.size(), dtypes.float32) * (1. / feature_queue_capacity)) # Add multiple queue runners so that the queue is always full. Adding more # than two queue-runners may hog the cpu on the worker to fill up the queue. for _ in range(num_queue_runners): queue_runner.add_queue_runner( queue_runner.QueueRunner(input_queue, [input_queue.enqueue( tensors_to_enqueue)])) dequeued_tensors = input_queue.dequeue() # Reset shapes on dequeued tensors. for i in range(len(tensors_to_enqueue)): dequeued_tensors[i].set_shape(tensors_to_enqueue[i].get_shape()) # Recreate feature mapping according to the original dictionary. dequeued_parsed_features = {} index = 0 for key, is_sparse_tensor in tensors_mapping: if is_sparse_tensor: # Three tensors are (indices, values, shape). dequeued_parsed_features[key] = ops.SparseTensor( dequeued_tensors[index], dequeued_tensors[index + 1], dequeued_tensors[index + 2]) index += 3 else: dequeued_parsed_features[key] = dequeued_tensors[index] index += 1 dequeued_keys = None if keys is not None: dequeued_keys = dequeued_tensors[-1] return dequeued_keys, dequeued_parsed_features def read_batch_features(file_pattern, batch_size, features, reader, randomize_input=True, num_epochs=None, queue_capacity=10000, feature_queue_capacity=100, reader_num_threads=1, parser_num_threads=1, parse_fn=None, name=None): """Adds operations to read, queue, batch and parse `Example` protos. Given file pattern (or list of files), will setup a queue for file names, read `Example` proto using provided `reader`, use batch queue to create batches of examples of size `batch_size` and parse example given `features` specification. All queue runners are added to the queue runners collection, and may be started via `start_queue_runners`. All ops are added to the default graph. Args: file_pattern: List of files or pattern of file paths containing `Example` records. See `tf.gfile.Glob` for pattern rules. batch_size: An int or scalar `Tensor` specifying the batch size to use. features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` values. reader: A function or class that returns an object with `read` method, (filename tensor) -> (example tensor). randomize_input: Whether the input should be randomized. num_epochs: Integer specifying the number of times to read through the dataset. If None, cycles through the dataset forever. NOTE - If specified, creates a variable that must be initialized, so call tf.initialize_local_variables() as shown in the tests. queue_capacity: Capacity for input queue. feature_queue_capacity: Capacity of the parsed features queue. Set this value to a small number, for example 5 if the parsed features are large. reader_num_threads: The number of threads to read examples. parser_num_threads: The number of threads to parse examples. records to read at once parse_fn: Parsing function, takes `Example` Tensor returns parsed representation. If `None`, no parsing is done. name: Name of resulting op. Returns: A dict of `Tensor` or `SparseTensor` objects for each in `features`. Raises: ValueError: for invalid inputs. """ _, features = read_keyed_batch_features( file_pattern, batch_size, features, reader, randomize_input=randomize_input, num_epochs=num_epochs, queue_capacity=queue_capacity, feature_queue_capacity=feature_queue_capacity, reader_num_threads=reader_num_threads, parser_num_threads=parser_num_threads, parse_fn=parse_fn, name=name) return features def read_batch_record_features(file_pattern, batch_size, features, randomize_input=True, num_epochs=None, queue_capacity=10000, reader_num_threads=1, parser_num_threads=1, name='dequeue_record_examples'): """Reads TFRecord, queues, batches and parses `Example` proto. See more detailed description in `read_examples`. Args: file_pattern: List of files or pattern of file paths containing `Example` records. See `tf.gfile.Glob` for pattern rules. batch_size: An int or scalar `Tensor` specifying the batch size to use. features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` values. randomize_input: Whether the input should be randomized. num_epochs: Integer specifying the number of times to read through the dataset. If None, cycles through the dataset forever. NOTE - If specified, creates a variable that must be initialized, so call tf.initialize_local_variables() as shown in the tests. queue_capacity: Capacity for input queue. reader_num_threads: The number of threads to read examples. parser_num_threads: The number of threads to parse examples. name: Name of resulting op. Returns: A dict of `Tensor` or `SparseTensor` objects for each in `features`. Raises: ValueError: for invalid inputs. """ return read_batch_features( file_pattern=file_pattern, batch_size=batch_size, features=features, reader=io_ops.TFRecordReader, randomize_input=randomize_input, num_epochs=num_epochs, queue_capacity=queue_capacity, reader_num_threads=reader_num_threads, parser_num_threads=parser_num_threads, name=name)
fb1685195ae352ac08edff4c05fa3f7106b9115a
c4702d1a06640555829b367852138cc93ba4a161
/dym_report_hutang/models/__init__.py
6a1cad99141266f27609c49c92ee3c235f1560e7
[]
no_license
Rizalimami/dym
0ecadf9c049b22ebfebf92e4eab6eaad17dd3e26
af1bcf7b77a3212bc8a8a0e41e6042a134587ed4
refs/heads/master
2020-04-08T10:56:43.605698
2018-11-27T06:44:08
2018-11-27T06:44:08
159,287,876
0
2
null
null
null
null
UTF-8
Python
false
false
28
py
import dym_report_hutang_aml
c807672ac9a93e578d16c491d87364a6512dba76
da3e36172daaf863ef73372f8c36cc2629ec1769
/tuplas/eje03.py
bb54a038752b4310aef8c28e3c48b273a8959656
[]
no_license
mentecatoDev/python
08eef1cb5a6ca2f16b01ee98192ccf1a65b9380a
80ddf541d3d1316ba8375db8f6ec170580e7831b
refs/heads/master
2021-06-30T07:03:51.957376
2021-02-22T09:40:46
2021-02-22T09:40:46
222,322,503
3
4
null
null
null
null
UTF-8
Python
false
false
1,134
py
""" Ejercicio 3 =========== Escribir un programa que lea un archivo e imprima las letras en orden decreciente de frecuencia de aparición. El programa debería convertir toda la entrada a minúsculas y solo contar las letras a-z (excluir la "ñ"). No se debeen contar espacios, dígitos, signos de puntuación o cualquier otro carácter. Buscar textos en diferentes lenguas y ver cómo la frecuencia de las letras varían entre lenguajes. Comparar los resultados con las tablas que se puede encontrar en wikipedia.org/wiki/Letter_frequencies. """ import string try: fhandle = open(input("Introduzca el nombre del fichero: ")) except IOError: print("El fichero no existe") exit() full_string = fhandle.read() alphabet = dict() for letter in full_string: letter = letter.lower() if letter in string.ascii_lowercase: alphabet[letter] = alphabet.get(letter, 0) + 1 total = 0 ordered_list = [] for key, value in alphabet.items(): total += alphabet[key] ordered_list += [(value, key)] ordered_list.sort(reverse=True) for tupla in ordered_list: print("%s %.2f%%" % (tupla[1], tupla[0]*100/total))
3da0ac5df135ea5ca9acb2681b46952392d40bff
ef0f84bcba3ded3624697c5c2a36e5c99cc5e498
/bumpversion/functions.py
b00f726a6650eb78bbecce9bb7197b7618e92fcd
[ "MIT" ]
permissive
lbryio/bumpversion
e502d7a78608f72f6d6817bad451232fc02d75c0
0c8f0e327ac97d896ca3fd2e254628e2afd0a4fe
refs/heads/master
2021-01-11T11:57:20.662827
2018-10-31T15:49:54
2018-10-31T15:49:54
76,699,829
3
1
MIT
2018-10-31T15:49:56
2016-12-17T02:44:18
Python
UTF-8
Python
false
false
2,873
py
import re import datetime class NumericFunction(object): """ This is a class that provides a numeric function for version parts. It simply starts with the provided first_value (0 by default) and increases it following the sequence of integer numbers. The optional value of this function is equal to the first value. This function also supports alphanumeric parts, altering just the numeric part (e.g. 'r3' --> 'r4'). Only the first numeric group found in the part is considered (e.g. 'r3-001' --> 'r4-001'). """ FIRST_NUMERIC = re.compile('([^\d]*)(\d+)(.*)') def __init__(self, first_value=None): if first_value is not None: try: part_prefix, part_numeric, part_suffix = self.FIRST_NUMERIC.search( first_value).groups() except AttributeError: raise ValueError( "The given first value {} does not contain any digit".format(first_value)) else: first_value = 0 self.first_value = str(first_value) self.optional_value = self.first_value def bump(self, value): part_prefix, part_numeric, part_suffix = self.FIRST_NUMERIC.search( value).groups() bumped_numeric = int(part_numeric) + 1 return "".join([part_prefix, str(bumped_numeric), part_suffix]) class ValuesFunction(object): """ This is a class that provides a values list based function for version parts. It is initialized with a list of values and iterates through them when bumping the part. The default optional value of this function is equal to the first value, but may be otherwise specified. When trying to bump a part which has already the maximum value in the list you get a ValueError exception. """ def __init__(self, values, optional_value=None, first_value=None): if len(values) == 0: raise ValueError("Version part values cannot be empty") self._values = values if optional_value is None: optional_value = values[0] if optional_value not in values: raise ValueError("Optional value {0} must be included in values {1}".format( optional_value, values)) self.optional_value = optional_value if first_value is None: first_value = values[0] if first_value not in values: raise ValueError("First value {0} must be included in values {1}".format( first_value, values)) self.first_value = first_value def bump(self, value): try: return self._values[self._values.index(value)+1] except IndexError: raise ValueError( "The part has already the maximum value among {} and cannot be bumped.".format(self._values))
f20cea7c2a92454e78c5329560ee8cf9c53555e6
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
/ml-flask/Lib/site-packages/sklearn/impute/_knn.py
e995c37a08e2e2b058641a63cec0e3dbf58a4303
[ "MIT" ]
permissive
YaminiHP/SimilitudeApp
8cbde52caec3c19d5fa73508fc005f38f79b8418
005c59894d8788c97be16ec420c0a43aaec99b80
refs/heads/master
2023-06-27T00:03:00.404080
2021-07-25T17:51:27
2021-07-25T17:51:27
389,390,951
0
0
null
null
null
null
UTF-8
Python
false
false
130
py
version https://git-lfs.github.com/spec/v1 oid sha256:890c089e7000acacdf6a7598f456bcc2d31f0331adb33d1b18c7cb84120d3758 size 11662
781e20ee60bcb21fe4e6068d568ba9a9e5393d25
8e311f8f94c9d218bd37f81c0badc906d78d6b33
/env/Lib/site-packages/openpyxl/utils/escape.py
52ef33e0a97ffe61274bfc2b8c8686fd8d78d159
[ "MIT" ]
permissive
htwenhe/DJOA
d76307ff8752c1e2a89101de1f74094b94bf9b18
3c2d384a983e42dedfd72561353ecf9370a02115
refs/heads/master
2021-09-03T21:49:28.267986
2018-01-12T08:12:55
2018-01-12T08:12:55
108,937,324
0
1
MIT
2018-01-12T08:06:50
2017-10-31T02:59:26
Python
UTF-8
Python
false
false
828
py
from __future__ import absolute_import # Copyright (c) 2010-2017 openpyxl """ OOXML has non-standard escaping for characters < \031 """ import re def escape(value): r""" Convert ASCII < 31 to OOXML: \n == _x + hex(ord(\n)) +_ """ CHAR_REGEX = re.compile(r"[\001-\031]") def _sub(match): """ Callback to escape chars """ return "_x%04x_" % ord(match.group(0)) # py 2.6 return CHAR_REGEX.sub(_sub, value) def unescape(value): r""" Convert escaped strings to ASCIII: _x000a_ == \n """ ESCAPED_REGEX = re.compile("_x([0-9A-Fa-f]{4})_") def _sub(match): """ Callback to unescape chars """ return chr(int(match.group(1), 16)) if "_x" in value: value = ESCAPED_REGEX.sub(_sub, value) return value
e4832b743c96639a2e1820a18dea177fd0b25045
54da94dce244ab659c8036cafcdc1b326fbfe490
/datoteke-s-predavanj/2015-16/10-nakljucna-stevila/matematiki/ocenipi.py
a001630b6569c0a30461ff211502ce246eb54d9d
[]
no_license
jakamrak/uvod-v-programiranje
640b2738164e2026308d7e60f1478659df79cc40
3c05290f4f23b384ad9063880fffe208c08fc599
refs/heads/master
2022-07-17T16:50:18.563453
2020-05-18T13:54:13
2020-05-18T13:54:13
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,149
py
import random def oceni_pi(stevilo_poskusov): v_krogu = 0 for _ in range(stevilo_poskusov): x, y = random.random(), random.random() if x ** 2 + y ** 2 <= 1: v_krogu += 1 return 4 * v_krogu / stevilo_poskusov def oceni_eno_kocko(stevilo_poskusov): pogostosti = {} for _ in range(stevilo_poskusov): met = random.randint(1, 6) pogostosti[met] = pogostosti.get(met, 0) + 1 return {met: pogostost / stevilo_poskusov for met, pogostost in pogostosti.items()} def oceni_dve_kocki(stevilo_poskusov): pogostosti = {} for _ in range(stevilo_poskusov): met = random.randint(1, 6) + random.randint(1, 6) pogostosti[met] = pogostosti.get(met, 0) + 1 return {met: pogostost / stevilo_poskusov for met, pogostost in pogostosti.items()} def oceni_max_tri_kocke(stevilo_poskusov): pogostosti = {} for _ in range(stevilo_poskusov): met = max(random.randint(1, 6), random.randint(1, 6), random.randint(1, 6)) pogostosti[met] = pogostosti.get(met, 0) + 1 return {met: pogostost / stevilo_poskusov for met, pogostost in pogostosti.items()}
9ff1408db7be8a1136af5560f389a255626ebc8d
f1614f3531701a29a33d90c31ab9dd6211c60c6b
/menu_sun_integration/infrastructure/pernod/translators/pernod_product_translator.py
310121443153e34684dfcbcfcb6ea8f758c223f6
[]
no_license
pfpacheco/menu-sun-api
8a1e11543b65db91d606b2f3098847e3cc5f2092
9bf2885f219b8f75d39e26fd61bebcaddcd2528b
refs/heads/master
2022-12-29T13:59:11.644409
2020-10-16T03:41:54
2020-10-16T03:41:54
304,511,679
0
0
null
null
null
null
UTF-8
Python
false
false
1,581
py
from menu_sun_api.domain.model.product.product import Product, ProductStatus from menu_sun_api.domain.model.seller.seller import Seller from menu_sun_integration.application.translators.interfaces.abstract_product_translator import \ AbstractProductTranslator from menu_sun_integration.infrastructure.pernod.presentations.product.pernod_product_get_request import \ PernodProductGetRequest from menu_sun_integration.infrastructure.pernod.presentations.product.pernod_product_response import \ PernodProductResponse from menu_sun_integration.presentations.interfaces.abstract_platform import AbstractPlatform from menu_sun_integration.presentations.interfaces.abstract_request import AbstractRequest class PernodProductTranslator(AbstractProductTranslator): def bind_product(self, product: PernodProductResponse) -> Product: status = ProductStatus.ENABLED if product.active else ProductStatus.DISABLED return Product(status=status, sku=product.sku, name=product.name, description=product.description, weight=product.weight, ean=product.ean, brand=product.brand, width=product.width, height=product.height, length=product.length) def to_seller_send_format(self, entity: AbstractPlatform) -> AbstractRequest: pass def to_seller_get_format(self, seller: Seller, **kwargs) -> PernodProductGetRequest: return PernodProductGetRequest() def to_domain_format(self, response: PernodProductResponse) -> [Product]: return self.bind_product(response)
f94f884ae27d3ed75f4d10e846d2b7d48fa3ebc8
169f134442ce7f040e8a1878258c90ef9f0cbcc1
/skivvy.py
0ff0ad391b9f1781aa56ece1702caab15635ef0e
[]
no_license
Kazade/skivvy
9d41b0acc934e6059d60b386cc54a0638b14f7cc
41278fa66b559ee507862c729ad89028ccd5ae52
refs/heads/master
2016-09-11T04:41:31.998555
2014-12-05T16:36:10
2014-12-05T16:36:10
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,832
py
import os import sys import argparse import json import re import tempfile import subprocess ENV_REGEX = r"(.+)\[(.+)\]" parser = argparse.ArgumentParser(description="underling task worker") parser.add_argument("--config", type=unicode, default="underling.json") parser.add_argument("--env", type=unicode, default="") parser.add_argument("--watch", type=bool, default=False) parser.add_argument("command", type=unicode) def locate_task(task_dirs, task_name): for task_dir in task_dirs: full_path = os.path.join(task_dir, task_name) if os.path.exists(full_path): return full_path return "" def expand_globs(inputs): pass def replace_constants(string, constants): for constant in constants: if isinstance(constants[constant], list): string = string.replace("{" + constant + "}", " ".join(constants[constant])) else: string = string.replace("{" + constant + "}", constants[constant]) return string def run_command(config, command, namespace=""): task_roots = config.get("__task_dirs__", []) constants = {} constants["PROJECT_ROOT"] = os.getcwd() new_constants = config.get("__constants__", {}) for const in new_constants: if isinstance(new_constants[const], list): new_constants[const] = [ replace_constants(x, constants) for x in new_constants[const] ] else: new_constants[const] = replace_constants(new_constants[const], constants) constants.update(new_constants) task_roots = [ replace_constants(x, constants) for x in task_roots ] commands = config.get("__commands__", {}) if command not in commands: print("Unrecognized command: %s" % command) print("Available commands: \n%s" % "\n\n".join(commands.keys())) sys.exit(1) for dependency in commands[command]: if dependency.startswith("file://"): print("Unhandled dependency type: %s" % dependency) sys.exit(1) dependency_name = dependency dependency = config.get(dependency_name) if not dependency: print("Unrecognized dependency: %s" % dependency_name) sys.exit(1) inputs = "" for task in dependency: if not isinstance(task, dict): print("Pipeline %s should be a list of tasks (dictionaries)" % dependency_name) task_name = task.get("task") has_env = re.match(ENV_REGEX, task_name) if has_env and not has_env.group(2).startswith(namespace): continue # Ignore tasks that don't have this namespace if has_env: task_name = has_env.group(1) inputs = replace_constants(task.get("input", ""), constants).split(" ") or inputs inputs = expand_globs(inputs) if inputs: constants["INPUT_FILES"] = inputs output_file = task.get("output") if not output_file: # No explicit output? Then generate a temporary file _, output_file = tempfile.mkstemp() task = locate_task(task_roots, task_name) if not task: print("Unable to find task: %s" % task_name) sys.exit(1) final_command = [ task, "--output=%s" % output_file ] for input_file in inputs: final_command.append("--input=%s" % input_file) print subprocess.check_output(final_command) inputs = output_file def load_config(config): with open(config, "r") as f: config = json.loads(f.read()) return config def main(): args = parser.parse_args() config = load_config(args.config) run_command(config, args.command) return 0 if __name__ == '__main__': sys.exit(main())
6a761ba24b8b47cb5af1cfae4382ae695c5c1676
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/otherforms/_sultanates.py
18564bf3d0e82aa07511fead77ff8b845972a870
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
234
py
#calss header class _SULTANATES(): def __init__(self,): self.name = "SULTANATES" self.definitions = sultanate self.parents = [] self.childen = [] self.properties = [] self.jsondata = {} self.basic = ['sultanate']
1ccac1f41ac29343126ed052cb5f0f3e98e6ec18
4909bdc40f9d606e336a077908ca16933989f66f
/tensorflow/python/distribute/multi_worker_continuous_run_test.py
19790a0d69fb2eb2bdeee043f96d30c1f8e66767
[ "Apache-2.0" ]
permissive
xhook/tensorflow
9532fcf1b466f65f938aa95aba290d99d2004ad0
978d3b37393a4dd2411e2e6657dff1bbcac81a66
refs/heads/master
2020-09-06T21:00:35.848217
2019-11-08T18:03:54
2019-11-08T18:59:48
220,535,874
0
0
Apache-2.0
2019-11-08T19:39:19
2019-11-08T19:39:18
null
UTF-8
Python
false
false
3,472
py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for continuous runs using cross-worker collective ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os from absl.testing import parameterized import numpy as np from tensorflow.python.distribute import collective_all_reduce_strategy from tensorflow.python.distribute import combinations from tensorflow.python.distribute import multi_process_runner from tensorflow.python.distribute import multi_process_runner_util from tensorflow.python.distribute import multi_worker_test_base as test_base from tensorflow.python.distribute import reduce_util from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import test from tensorflow.python.framework import config from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops # TODO(b/143286947): expand the test to cover fault tolerance and elasticity class MultiWorkerContinuousRunTest(test.TestCase, parameterized.TestCase): @combinations.generate(combinations.combine(mode=['eager'])) def testAllReduceContinuousRun(self, mode): num_workers = 5 tensor_shape = [2, 2] local_device = '/device:CPU:0' if config.list_physical_devices('GPU'): local_device = '/device:GPU:0' def worker_step_fn(): strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy() tf_config = json.loads(os.environ['TF_CONFIG']) worker_id = tf_config['task']['index'] @def_function.function def run_reduce(): with ops.device(local_device): t_in = array_ops.ones(tensor_shape) * worker_id return strategy.reduce(reduce_util.ReduceOp.MEAN, t_in, axis=None) t_out = run_reduce() # Element values from the workers are # 0, 1, ..., (num_workers - 1) expected_mean = (num_workers - 1) / 2 expected_out = np.ones(tensor_shape) * expected_mean self.assertAllClose(t_out, expected_out) def worker_fn(): gpus = config.list_physical_devices('GPU') if gpus: # Set virtual GPU with memory limit of 64MB so that multiple worker # processes can share the physical GPU config.set_logical_device_configuration( gpus[0], [context.LogicalDeviceConfiguration(64)]) for _ in range(100): worker_step_fn() # TODO(b/141948186): Remove this `with` block once b/141948186 is resolved. with multi_process_runner_util.try_run_and_except_connection_error(self): multi_process_runner.MultiProcessRunner().run( worker_fn, cluster_spec=test_base.create_cluster_spec(num_workers=num_workers)) if __name__ == '__main__': multi_process_runner.test_main()
59a16006343239f0fce22a8359cb005c9a563593
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03363/s899204324.py
4e166a6e2b65758d9abb5e6794647c95abe254e6
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
429
py
import bisect import math n = int(input()) a = list(map(int,input().split())) s = [0]*(n+1) for i,e in enumerate(a): s[i+1] = s[i] + e def comb(n,m): return math.factorial(n)//math.factorial(m)//math.factorial(n-m) ans = 0 s = sorted(s) l = 0 r = 0 #print(s) while l < n+1: while r < n+1 and s[l] == s[r]: #print(r,s[r]) r += 1 if r-l >= 2: ans += comb(r-l,2) l = r print(ans)
dcb8a80e5b04d0964118e6cb6917ac60f0a9857e
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
/place_and_point/week_and_next_day/work_or_high_way/take_good_year_with_next_hand/woman/get_small_way.py
5a4a7c6086c5ee148aac418594f1d5b2fa66fbc4
[]
no_license
JingkaiTang/github-play
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
51b550425a91a97480714fe9bc63cb5112f6f729
refs/heads/master
2021-01-20T20:18:21.249162
2016-08-19T07:20:12
2016-08-19T07:20:12
60,834,519
0
0
null
null
null
null
UTF-8
Python
false
false
246
py
#! /usr/bin/env python def place_and_few_case(str_arg): go_next_thing_at_great_way(str_arg) print('old_work') def go_next_thing_at_great_way(str_arg): print(str_arg) if __name__ == '__main__': place_and_few_case('seem_point')
3dccb0ced631f2774613aa58ddedc938d556cee2
083b758356821647b6a2db3f4ae32b355ebd28c4
/Question_91_100/answers/answer_98.py
e98d41c8e6551a43cc8e51166b79b4fd2f798cbf
[ "MIT" ]
permissive
litiangu/Gasyori100knock
0b2e2844748acecba383500f56a25afad5a22973
38305760a4db4c6f8addf176630197960e7fc6a9
refs/heads/master
2020-07-16T18:00:05.756714
2019-09-01T22:58:39
2019-09-01T22:58:39
205,837,921
1
0
MIT
2019-09-02T11:02:30
2019-09-02T11:02:30
null
UTF-8
Python
false
false
5,894
py
import cv2 import numpy as np np.random.seed(0) # read image img = cv2.imread("imori_1.jpg") H, W, C = img.shape # Grayscale gray = 0.2126 * img[..., 2] + 0.7152 * img[..., 1] + 0.0722 * img[..., 0] gt = np.array((47, 41, 129, 103), dtype=np.float32) cv2.rectangle(img, (gt[0], gt[1]), (gt[2], gt[3]), (0,255,255), 1) def iou(a, b): area_a = (a[2] - a[0]) * (a[3] - a[1]) area_b = (b[2] - b[0]) * (b[3] - b[1]) iou_x1 = np.maximum(a[0], b[0]) iou_y1 = np.maximum(a[1], b[1]) iou_x2 = np.minimum(a[2], b[2]) iou_y2 = np.minimum(a[3], b[3]) iou_w = max(iou_x2 - iou_x1, 0) iou_h = max(iou_y2 - iou_y1, 0) area_iou = iou_w * iou_h iou = area_iou / (area_a + area_b - area_iou) return iou def hog(gray): h, w = gray.shape # Magnitude and gradient gray = np.pad(gray, (1, 1), 'edge') gx = gray[1:h+1, 2:] - gray[1:h+1, :w] gy = gray[2:, 1:w+1] - gray[:h, 1:w+1] gx[gx == 0] = 0.000001 mag = np.sqrt(gx ** 2 + gy ** 2) gra = np.arctan(gy / gx) gra[gra<0] = np.pi / 2 + gra[gra < 0] + np.pi / 2 # Gradient histogram gra_n = np.zeros_like(gra, dtype=np.int) d = np.pi / 9 for i in range(9): gra_n[np.where((gra >= d * i) & (gra <= d * (i+1)))] = i N = 8 HH = h // N HW = w // N Hist = np.zeros((HH, HW, 9), dtype=np.float32) for y in range(HH): for x in range(HW): for j in range(N): for i in range(N): Hist[y, x, gra_n[y*4+j, x*4+i]] += mag[y*4+j, x*4+i] ## Normalization C = 3 eps = 1 for y in range(HH): for x in range(HW): #for i in range(9): Hist[y, x] /= np.sqrt(np.sum(Hist[max(y-1,0):min(y+2, HH), max(x-1,0):min(x+2, HW)] ** 2) + eps) return Hist def resize(img, h, w): _h, _w = img.shape ah = 1. * h / _h aw = 1. * w / _w y = np.arange(h).repeat(w).reshape(w, -1) x = np.tile(np.arange(w), (h, 1)) y = (y / ah) x = (x / aw) ix = np.floor(x).astype(np.int32) iy = np.floor(y).astype(np.int32) ix = np.minimum(ix, _w-2) iy = np.minimum(iy, _h-2) dx = x - ix dy = y - iy out = (1-dx) * (1-dy) * img[iy, ix] + dx * (1 - dy) * img[iy, ix+1] + (1 - dx) * dy * img[iy+1, ix] + dx * dy * img[iy+1, ix+1] out[out>255] = 255 return out class NN: def __init__(self, ind=2, w=64, w2=64, outd=1, lr=0.1): self.w1 = np.random.normal(0, 1, [ind, w]) self.b1 = np.random.normal(0, 1, [w]) self.w2 = np.random.normal(0, 1, [w, w2]) self.b2 = np.random.normal(0, 1, [w2]) self.wout = np.random.normal(0, 1, [w2, outd]) self.bout = np.random.normal(0, 1, [outd]) self.lr = lr def forward(self, x): self.z1 = x self.z2 = sigmoid(np.dot(self.z1, self.w1) + self.b1) self.z3 = sigmoid(np.dot(self.z2, self.w2) + self.b2) self.out = sigmoid(np.dot(self.z3, self.wout) + self.bout) return self.out def train(self, x, t): # backpropagation output layer #En = t * np.log(self.out) + (1-t) * np.log(1-self.out) En = (self.out - t) * self.out * (1 - self.out) grad_wout = np.dot(self.z3.T, En) grad_bout = np.dot(np.ones([En.shape[0]]), En) self.wout -= self.lr * grad_wout self.bout -= self.lr * grad_bout # backpropagation inter layer grad_u2 = np.dot(En, self.wout.T) * self.z3 * (1 - self.z3) grad_w2 = np.dot(self.z2.T, grad_u2) grad_b2 = np.dot(np.ones([grad_u2.shape[0]]), grad_u2) self.w2 -= self.lr * grad_w2 self.b2 -= self.lr * grad_b2 grad_u1 = np.dot(grad_u2, self.w2.T) * self.z2 * (1 - self.z2) grad_w1 = np.dot(self.z1.T, grad_u1) grad_b1 = np.dot(np.ones([grad_u1.shape[0]]), grad_u1) self.w1 -= self.lr * grad_w1 self.b1 -= self.lr * grad_b1 def sigmoid(x): return 1. / (1. + np.exp(-x)) # crop and create database Crop_num = 200 L = 60 H_size = 32 F_n = ((H_size // 8) ** 2) * 9 db = np.zeros((Crop_num, F_n+1)) for i in range(Crop_num): x1 = np.random.randint(W-L) y1 = np.random.randint(H-L) x2 = x1 + L y2 = y1 + L crop = np.array((x1, y1, x2, y2)) _iou = iou(gt, crop) if _iou >= 0.5: cv2.rectangle(img, (x1, y1), (x2, y2), (0,0,255), 1) label = 1 else: cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,0), 1) label = 0 crop_area = gray[y1:y2, x1:x2] crop_area = resize(crop_area, H_size, H_size) _hog = hog(crop_area) db[i, :F_n] = _hog.ravel() db[i, -1] = label ## train neural network nn = NN(ind=F_n, lr=0.01) for i in range(10000): nn.forward(db[:, :F_n]) nn.train(db[:, :F_n], db[:, -1][..., None]) # read detect target image img2 = cv2.imread("imori_many.jpg") H2, W2, C2 = img2.shape # Grayscale gray2 = 0.2126 * img2[..., 2] + 0.7152 * img2[..., 1] + 0.0722 * img2[..., 0] # [h, w] recs = np.array(((42, 42), (56, 56), (70, 70)), dtype=np.float32) detects = np.ndarray((0, 5), dtype=np.float32) # sliding window for y in range(0, H2, 4): for x in range(0, W2, 4): for rec in recs: dh = int(rec[0] // 2) dw = int(rec[1] // 2) x1 = max(x-dw, 0) x2 = min(x+dw, W2) y1 = max(y-dh, 0) y2 = min(y+dh, H2) region = gray2[max(y-dh,0):min(y+dh,H2), max(x-dw,0):min(x+dw,W2)] region = resize(region, H_size, H_size) region_hog = hog(region).ravel() score = nn.forward(region_hog) if score >= 0.7: cv2.rectangle(img2, (x1, y1), (x2, y2), (0,0,255), 1) detects = np.vstack((detects, np.array((x1, y1, x2, y2, score)))) print(detects) cv2.imwrite("out.jpg", img2) cv2.imshow("result", img2) cv2.waitKey(0)
31b1d495a4fa66ef1fc0ef77615b43c7dd5e44f3
ef54d37f8a3303013ca7469871a320d303957ed7
/robo4.2/fusion/tests/RIST/OVST/DDQual-API-HW/resources/variables.py
99de24a1b639071c60d76b95b0eded7fbd23e980
[]
no_license
richa92/Jenkin_Regression_Testing
d18badfcf16bda682dfe7bcbbd66f54a9a27a58d
24a74926170cbdfafa47e972644e2fe5b627d8ff
refs/heads/master
2020-07-12T10:01:59.099137
2019-08-27T12:14:53
2019-08-27T12:14:53
null
0
0
null
null
null
null
UTF-8
Python
false
false
16,698
py
""" resources/variables.py - Enclosure variables used for AM-DVT Fusion testing = Usage = | *** Settings *** | | variables | resources/variables.py | | variables | resources/variables.py | ${ENCLOSURE} | | pybot --variablefile resources/variables.py | | pybot --variablefile resources/variables.py:<EnclosureName> | """ import paramiko import re from RoboGalaxyLibrary.utilitylib import logging as logger enclosure_defaults = { "ILO_PASSWORD": "", "GATEWAY_IP": "", "NETMASK_IP": "", "PRIMARY_DNS": "", "ALTERNATE_DNS": "", "DCS": False, "FUSION_SSH_USERNAME": "", "FUSION_SSH_PASSWORD": "", "STANDBY_CIM": "", "ACTIVE_CIM": "", "FUSION_IP": "" # # See resources\defaults.txt for additional variables. # } enclosure_configurations = { "dcs": { "EM_IP": "172.18.8.101", "EM_IPV4": "172.18.8.101", "ENC_SERIAL_NUMBER": "0000A66101", "ENC_UUID": "8c031050-4b30-40b5-8ddd-e55d7a093f2e", "BLADE_DATA": {"1": {"SerialNumber": "2M220101SL", "Model": ""}, "2": {"SerialNumber": "2M220101SL", "Model": ""}, "4": {"SerialNumber": "2M201100GR", "Model": ""}, "5": {"SerialNumber": "2M220103SL", "Model": ""}, "6": {"SerialNumber": "2M220103SL", "Model": ""}, }, "INTERCONNECT_DATA": {"1": {"SerialNumber": "100010010100a", "Model": "Natasha SAS 12Gb Switch"}, "3": {"SerialNumber": "100010010100a", "Model": "HP FlexFabric 40/40Gb Module"}, "4": {"SerialNumber": "100010010101a", "Model": "Natasha SAS 12Gb Switch"}, "6": {"SerialNumber": "100010010100a", "Model": "HP FlexFabric 10GbE Expansion Module"} }, "FUSION_NIC_SUFFIX": "", "DCS": True, "HW_DESCRIPTION_FILE": "hw_tbird_demo.js", "DISCOVER_TRUTH_FILE": "discover_tbird_demo.js" }, "Ring1ActiveCIM": { "ILO_PASSWORD": "", "GATEWAY_IP": "", "NETMASK_IP": "", "PRIMARY_DNS": "", "ALTERNATE_DNS": "", "DCS": False, "FUSION_SSH_USERNAME": "root", "FUSION_SSH_PASSWORD": "hpvse1", "STANDBY_CIM": "16.114.210.202", "ACTIVE_CIM": "16.114.210.201", "FUSION_IP": "16.114.211.32", "MAINTANENCE_IP": "16.114.210.201", # Management IP "FUSION_FQDN": "wpst-tbird-1-oneview.vse.rdlabs.hpecorp.net", "FUSION_IPV6": "", "ILO_IP": "", "ILO_PASSWORD": "", "EM_IP": "fe80::2347:92b:e77:d65b", "EM1_IP": "fe80::3a63:bbff:fe2b:7348", "EM2_IP": "fe80::3a63:bbff:fe2b:f2c0", "EM_IPV4": "16.114.178.166", "ENC_SERIAL_NUMBER": "00HPMPC01A", "BLADE_DATA": {"1": {"SerialNumber": "CN74250H94", "Model": "ProLiant BL460t Gen9", "BLADE_ILO_IP6": "fe80::fe15:b4ff:fe12:adfe", "BLADE_ILO_IP4": "16.114.179.64", "BLADE_ILO_FQDN": "mustang01-ilo.rsn.hp.com", "BLADE_ILO_USER": "Administrator", "BLADE_ILO_PW": "hpvse123", "BLADE_TYPE": "BL460t Gen9", "MEZZ_1": "HP FlexFabric Bronco Gen3 2p 20GbE CNA BCM57840", "MEZZ_2": "", "MEZZ_3": "", }, }, }, "Ring1StandbyCIM": { "ILO_PASSWORD": "", "GATEWAY_IP": "", "NETMASK_IP": "", "PRIMARY_DNS": "", "ALTERNATE_DNS": "", "DCS": False, "FUSION_SSH_USERNAME": "root", "FUSION_SSH_PASSWORD": "hpvse1", "STANDBY_CIM": "16.114.210.202", "ACTIVE_CIM": "16.114.210.201", "FUSION_IP": "16.114.211.32", "MAINTANENCE_IP": "16.114.210.202", # Management IP "FUSION_FQDN": "wpst-tbird-1-oneview.vse.rdlabs.hpecorp.net", "FUSION_IPV6": "fe80::7291:e52e:3554:d067", "ILO_IP": "", "ILO_PASSWORD": "", "EM_IP": "fe80::c634:6bff:fec9:c7b8", "EM1_IP": "fe80::3a63:bbff:fe2b:2318", "EM2_IP": "fe80::3a63:bbff:fe2b:9398", "EM_IPV4": "16.114.178.166", "ENC_SERIAL_NUMBER": "00HPMPC01A", "BLADE_DATA": {"1": {"SerialNumber": "CN74250H94", "Model": "ProLiant BL460t Gen9", "BLADE_ILO_IP6": "fe80::fe15:b4ff:fe12:adfe", "BLADE_ILO_IP4": "16.114.179.64", "BLADE_ILO_FQDN": "mustang01-ilo.rsn.hp.com", "BLADE_ILO_USER": "Administrator", "BLADE_ILO_PW": "hpvse123", "BLADE_TYPE": "BL460t Gen9", "MEZZ_1": "HP FlexFabric Bronco Gen3 2p 20GbE CNA BCM57840", "MEZZ_2": "", "MEZZ_3": "", }, }, }, "Ring2ActiveCIM": { "FUSION_SSH_USERNAME": "root", "FUSION_SSH_PASSWORD": "hpvse1", "STANDBY_CIM": "16.114.210.206", "ACTIVE_CIM": "16.114.210.205", "FUSION_IP": "16.114.211.46", "MAINTANENCE_IP": "16.114.210.205", # Management IP "FUSION_FQDN": "", "FUSION_IPV6": "", "ILO_IP": "", "ILO_PASSWORD": "", "EM_IP": "fe80::ced3:589c:ae89:ebcd" }, "Ring2StandbyCIM": { "FUSION_SSH_USERNAME": "root", "FUSION_SSH_PASSWORD": "hpvse1", "STANDBY_CIM": "16.114.210.206", "ACTIVE_CIM": "16.114.210.205", "FUSION_IP": "16.114.211.46", "MAINTANENCE_IP": "16.114.210.206", # Management IP "FUSION_FQDN": "", "FUSION_IPV6": "", "ILO_IP": "", "ILO_PASSWORD": "", "EM_IP": "fe80::e40a:1958:dd12:3b79" }, "Ring7ActiveCIM": { "FUSION_SSH_USERNAME": "root", "FUSION_SSH_PASSWORD": "hpvse1", "STANDBY_CIM": "16.114.217.118", "ACTIVE_CIM": "16.114.217.117", "FUSION_IP": "16.114.217.116", "MAINTANENCE_IP": "16.114.217.117", # Management IP "FUSION_FQDN": "", "FUSION_IPV6": "", "ILO_IP": "", "ILO_PASSWORD": "", "EM_IP": "fe80::e207:1bff:feef:a680" }, "Ring7StandbyCIM": { "FUSION_SSH_USERNAME": "root", "FUSION_SSH_PASSWORD": "hpvse1", "STANDBY_CIM": "16.114.217.118", "ACTIVE_CIM": "16.114.217.117", "FUSION_IP": "16.114.217.116", "MAINTANENCE_IP": "16.114.217.118", # Management IP "FUSION_FQDN": "", "FUSION_IPV6": "", "ILO_IP": "", "ILO_PASSWORD": "", "EM_IP": "fe80::1602:ecff:fe44:6d78" }, "Ring8ActiveCIM": { "FUSION_SSH_USERNAME": "root", "FUSION_SSH_PASSWORD": "hpvse1", "STANDBY_CIM": "16.114.208.100", "ACTIVE_CIM": "16.114.208.99", "FUSION_IP": "16.114.208.98", "MAINTANENCE_IP": "16.114.208.99", # Management IP "FUSION_FQDN": "", "FUSION_IPV6": "", "ILO_IP": "", "ILO_PASSWORD": "", "EM_IP": "fe80::e207:1bff:feef:27f8" }, "Ring8StandbyCIM": { "FUSION_SSH_USERNAME": "root", "FUSION_SSH_PASSWORD": "hpvse1", "STANDBY_CIM": "16.114.208.100", "ACTIVE_CIM": "16.114.208.99", "FUSION_IP": "16.114.208.98", "MAINTANENCE_IP": "16.114.208.100", # Management IP "FUSION_FQDN": "", "FUSION_IPV6": "", "ILO_IP": "", "ILO_PASSWORD": "", "EM_IP": "fe80::e207:1bff:feef:a678" }, "Ring9ActiveCIM": { "FUSION_SSH_USERNAME": "root", "FUSION_SSH_PASSWORD": "hpvse1", "STANDBY_CIM": "16.114.216.235", "ACTIVE_CIM": "16.114.216.234", "FUSION_IP": "16.114.216.233", "MAINTANENCE_IP": "16.114.216.234", # Management IP "FUSION_FQDN": "", "FUSION_IPV6": "", "ILO_IP": "", "ILO_PASSWORD": "", "EM_IP": "fe80::e207:1bff:fef1:ea50" }, "Ring9StandbyCIM": { "FUSION_SSH_USERNAME": "root", "FUSION_SSH_PASSWORD": "hpvse1", "STANDBY_CIM": "16.114.216.235", "ACTIVE_CIM": "16.114.216.234", "FUSION_IP": "16.114.216.233", "MAINTANENCE_IP": "16.114.216.235", # Management IP "FUSION_FQDN": "", "FUSION_IPV6": "", "ILO_IP": "", "ILO_PASSWORD": "", "EM_IP": "fe80::e207:1bff:fef1:6b90" }, "BFSActiveCIM": { "FUSION_SSH_USERNAME": "root", "FUSION_SSH_PASSWORD": "hpvse1", "STANDBY_CIM": "10.133.1.17", "ACTIVE_CIM": "10.133.1.16", "FUSION_IP": "10.133.1.15", "MAINTANENCE_IP": "10.133.1.16", # Management IP "FUSION_FQDN": "", "FUSION_IPV6": "", "ILO_IP": "", "ILO_PASSWORD": "", "EM_IP": "fe80::649c:5603:ec39:9b28" }, "BFSStandbyCIM": { "FUSION_SSH_USERNAME": "root", "FUSION_SSH_PASSWORD": "hpvse1", "STANDBY_CIM": "10.133.1.17", "ACTIVE_CIM": "10.133.1.16", "FUSION_IP": "10.133.1.15", "MAINTANENCE_IP": "10.133.1.17", # Management IP "FUSION_FQDN": "", "FUSION_IPV6": "", "ILO_IP": "", "ILO_PASSWORD": "", "EM_IP": "fe80::6c7:c7cf:644a:4e1c" }, "mustang": { "ILO_PASSWORD": "AcmeAcme", "GATEWAY_IP": "10.87.0.1", "NETMASK_IP": "255.255.0.0", "PRIMARY_DNS": "10.87.0.11", "ALTERNATE_DNS": "", "DCS": False, "FUSION_SSH_USERNAME": "root", "FUSION_SSH_PASSWORD": "hpvse1", "STANDBY_CIM": "10.87.1.19", "ACTIVE_CIM": "10.87.1.18", "FUSION_IP": "10.87.1.17", "MAINTANENCE_IP": "10.87.1.19", # Management IP "FUSION_FQDN": "mustang-cim1.rsn.hp.com", "FUSION_IPV6": "fe80::9eb6:54ff:fe97:5cc8", "ILO_IP": "16.114.179.124", "ILO_PASSWORD": "hpvse123", "EM_IP": "fe80::c634:6bff:fec9:c7b8", "EM1_IP": "fe80::c634:6bff:fec9:c7b8", "EM2_IP": "fe80::c634:6bff:fec9:b7f0", "EM_IPV4": "16.114.178.166", "ENC_SERIAL_NUMBER": "00HPMPC01A", "BLADE_DATA": {"1": {"SerialNumber": "CN74250H94", "Model": "ProLiant BL460t Gen9", "BLADE_ILO_IP6": "fe80::fe15:b4ff:fe12:adfe", "BLADE_ILO_IP4": "16.114.179.64", "BLADE_ILO_FQDN": "mustang01-ilo.rsn.hp.com", "BLADE_ILO_USER": "Administrator", "BLADE_ILO_PW": "hpvse123", "BLADE_TYPE": "BL460t Gen9", "MEZZ_1": "HP FlexFabric Bronco Gen3 2p 20GbE CNA BCM57840", "MEZZ_2": "", "MEZZ_3": "", }, }, }, "tesla": { "ILO_PASSWORD": "AcmeAcme", "GATEWAY_IP": "10.87.0.1", "NETMASK_IP": "255.255.0.0", "PRIMARY_DNS": "10.87.0.11", "ALTERNATE_DNS": "", "DCS": False, "FUSION_SSH_USERNAME": "root", "FUSION_SSH_PASSWORD": "hpvse1", "STANDBY_CIM": "10.87.1.19", "ACTIVE_CIM": "10.87.1.18", "FUSION_IP": "10.87.1.17", "MAINTANENCE_IP": "10.87.1.18", "FUSION_FQDN": "tesla-cim1.rsn.hp.com", "FUSION_IPV6": "fe80::9eb6:54ff:fe97:bc98", "ILO_IP": "tesla-cim1-ilo.rsn.hp.com", "ILO_PASSWORD": "hpvse123", "EM_IP": "fe80::23a9:c127:ede6:7258", "EM1_IP": "fe80::c634:6bff:feb0:3f70", "EM2_IP": "", "EM_IPV4": "16.114.179.116", "HW_DESCRIPTION_FILE": "hw_tesla.js", "ENC_SERIAL_NUMBER": "CN75450625", "ENC_UUID": "00000000HPMP0E7E", "BLADE_DATA": {"1": {"SerialNumber": "CN74250H66", "Model": "Synergy 480 Gen9", "BLADE_ILO_IP6": "FE80::FE15:B4FF:FE12:BD30", "BLADE_ILO_IP4": "16.114.179.176", "BLADE_ILO_FQDN": "tesla01-ilo.rsn.hp.com", "BLADE_ILO_USER": "Administrator", "BLADE_ILO_PW": "hpvse123", "BLADE_TYPE": "BL460t Gen9", "MEZZ_1": "HP FlexFabric Bronco Gen3 2p 20GbE CNA BCM57840", "MEZZ_2": "", "MEZZ_3": "", }, }, "INTERCONNECT_DATA": {"1": {"SerialNumber": "TWA4280042", "Model": "HP VC SE 40Gb F8 Module"}, }, "DISCOVER_TRUTH_FILE": "discover_tesla.js" }, "jeep": { "ILO_PASSWORD": "AcmeAcme", "GATEWAY_IP": "10.93.0.1", "NETMASK_IP": "255.255.0.0", "PRIMARY_DNS": "10.93.0.11", "ALTERNATE_DNS": '', "DCS": False, "FUSION_SSH_USERNAME": "root", "FUSION_SSH_PASSWORD": "hpvse1", "STANDBY_CIM": "10.93.1.17", "ACTIVE_CIM": "10.93.1.16", "FUSION_IP": "10.93.1.15", "EM_IP": "fe80:0:0:0:18a8:7e13:c9d5:1152", "EM1_IP": "fe80:0:0:0:18a8:7e13:c9d5:1152", "EM2_IP": "fe80::b21c:b47e:1ead:d433", }, } def get_variables(enclosure_name=None): """ Variable files can have a special get_variables method that returns variables as a mapping. """ variables = enclosure_defaults # Get enclosure configuration if enclosure_name is not None: print "enclosure name: %s" % enclosure_name enclosure_configuration = get_enclosure_configuration(enclosure_name) if enclosure_configuration is not None: for key in enclosure_configuration: variables[key] = enclosure_configuration[key] origIP = variables['EM_IP'] print "EM_IP is Static: %s." % variables['EM_IP'] variables['EM_IP'] = get_enclosure_manager_ip(variables) if variables['EM_IP'] is None: variables['EM_IP'] = origIP print "EM_IP is FloatingIp: %s." % variables['EM_IP'] else: print "WARNING: Enclosure '%s' is not known configuration." % enclosure_name return variables def get_enclosure_configuration(enclosure_name): """ Returns Enclosure Manager configuration information from specified enclosure name. Example: get_serial_dl_configuration("tesla-em.rsn.hp.com") """ for name in enclosure_configurations: if enclosure_name == name: return enclosure_configurations[name] return None def get_enclosure_manager_ip(variables): """ Get the floating IPv6 address of the active EM by logging into the CI and extracting the lldp data. """ if 'FUSION_IP' in variables: try: # Connect to the CI Manager. ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(variables['FUSION_IP'], username=variables['FUSION_SSH_USERNAME'], password=variables['FUSION_SSH_PASSWORD']) # We're connected. Let's run the command and get the output. print "SSH to CiMgr succeeded." stdin, stdout, stderr = ssh.exec_command("lldpcli show neighbor") output = stdout.read() # Find 'MgmtIP' followed by the IPv6 address. matches = re.search(r'MgmtIP:\s*(\S*:\S*:\S*:\S*:\S*:\S*)', output, re.MULTILINE) if matches: print "lldpcli call and regex match succeeded." return matches.group(1) except paramiko.BadHostKeyException: logger._warn("Could not connect to %s because of BadKeyException. Need to clean up .ssh directory?" % variables['FUSION_IP']) except Exception as e: logger._warn("Could not connect to %s to determine EM_IP address. \n%s" % (variables['FUSION_IP'], e)) return None if __name__ == "__main__": """ Test Program """ import pprint import sys enclosure_name = "" # Default to no name if len(sys.argv) > 1: enclosure_name = sys.argv[1] variables = get_variables(enclosure_name) print "\nVariables: %s\n" % pprint.pformat(variables)
0c12edb269940c014aef162b5d110b539b105f38
165e706d485e90f4e4f63cfb9f2c35acda14cfc0
/solver1d/setup.py
43d90808245998eaf72f1a296070e72c855444fc
[ "Apache-2.0" ]
permissive
Tarkiyah/googleResearch
65581f3bbbe2ffe248c9e613c0ea7eac336d5372
dea327aa9e7ef7f7bca5a6c225dbdca1077a06e9
refs/heads/master
2022-12-07T12:04:44.153221
2019-11-21T16:03:48
2019-11-21T16:18:28
223,229,888
11
2
Apache-2.0
2022-11-21T21:39:10
2019-11-21T17:38:31
Jupyter Notebook
UTF-8
Python
false
false
1,148
py
# coding=utf-8 # Copyright 2019 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Install solver1d.""" import os import setuptools # Read in requirements with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as f: requirements = [r.strip() for r in f] setuptools.setup( name='solver1d', version='0.0.0', license='Apache 2.0', author='Google LLC', author_email='[email protected]', install_requires=requirements, url='https://github.com/google-research/google-research/' 'tree/master/solver1d', packages=setuptools.find_packages(), python_requires='>=2.7')
0146a04104dc8ddd02af43bba75341ee40c82792
e73e318c099c5c71b750269ee84f1bbe09fffac2
/promis/admin.py
9e1f17c2da686b0bb73e82d7a11c462b1f97fda6
[]
no_license
ArietNyshanbaev/deputat
f304f2deb0807241d691a8838d2d11f3e7328c46
6a744969a2ee7a811f31b03994eced9d571c4be2
refs/heads/master
2021-03-23T23:47:39.893956
2016-09-12T16:46:15
2016-09-12T16:46:15
66,534,121
0
0
null
null
null
null
UTF-8
Python
false
false
769
py
# Импорт стандартных пакетов Django from django.contrib import admin # Импорт моделей из баззы данных from .models import Promis, PromisRank, Result, Comments class PromisAdmin(admin.ModelAdmin): """ Класс Регистрации Админки для Клеинтов """ list_display = ('title', 'person', 'date', 'is_approved') list_filter = ('date','is_approved') admin.site.register(Promis, PromisAdmin) class PromisRankAdmin(admin.ModelAdmin): """ Класс Регистрации Админки для Движимости """ list_display = ('promis', 'user') list_filter = ('promis',) admin.site.register(PromisRank, PromisRankAdmin) admin.site.register(Result) admin.site.register(Comments)
6f74581e0d8602c2f47bea02cff088e3ce4eadb6
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
/python/python_11059.py
9815daed4b4f8901097b703a4e2b8c9c3ac7fe0a
[]
no_license
AK-1121/code_extraction
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
5297a4a3aab3bb37efa24a89636935da04a1f8b6
refs/heads/master
2020-05-23T08:04:11.789141
2015-10-22T19:19:40
2015-10-22T19:19:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
142
py
# How to Handle JSON with escaped Unicode characters using python json module? data = jsDat.get('data') data = data.encode('ascii', 'ignore')
17968e3caa7ee7355836b9479e64c6bc5051f7d8
2bd8fbe6e2ee2511d00479440aa589249234c2d8
/01-Supervised/17-neuralNetwork/day19/day19-1-neuralNetwork-2-MLPClassifier.py
23e1e91ceac3ac54f709ffc568dc722516ce7512
[]
no_license
LeenonGo/sklearn-learn
71d21f9b26cfb5cc6d65a22883127db873a31091
460d6e75e82943c802f7c025a03c821d02b5d232
refs/heads/master
2023-07-13T18:42:17.510938
2021-08-18T11:34:06
2021-08-18T11:34:06
371,628,997
0
0
null
null
null
null
UTF-8
Python
false
false
1,497
py
# -*- coding: utf-8 -*- # @Author : Lee # @Time : 2021/7/23 9:20 # @Function: https://www.scikitlearn.com.cn/0.21.3/18/#1172 # from sklearn.neural_network import MLPClassifier X = [[0., 0.], [1., 1.]] y = [0, 1] clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1) clf.fit(X, y) print(clf.predict([[2., 2.], [-1., -2.]])) print([coef.shape for coef in clf.coefs_]) # clf.coefs_ 包含了构建模型的权值矩阵 # 目前, MLPClassifier 只支持交叉熵损失函数,通过运行 predict_proba 方法进行概率估计。 # 使用了通过反向传播计算得到的梯度和某种形式的梯度下降来进行训练 # 最小化交叉熵损失函数,为每个样本 x 给出一个向量形式的概率估计 P(y|x) print(clf.predict_proba([[2., 2.], [1., 2.]])) # [[1.96718015e-04 9.99803282e-01] # [1.96718015e-04 9.99803282e-01]] # 表示预测[2., 2.]为标签0的概率为1.96718015e-04, 为标签1的概率为9.99803282e-01 # 此外,该模型支持 多标签分类 ,一个样本可能属于多个类别。 # 对于每个类,原始输出经过 logistic 函数变换后,大于或等于 0.5 的值将进为 1,否则为 0。 # 对于样本的预测输出,值为 1 的索引位置表示该样本的分类类别 X = [[0., 0.], [1., 1.]] y = [[0, 1], [1, 1]] clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(15,), random_state=1) clf.fit(X, y) print(clf.predict([[1., 2.]])) print(clf.predict([[0., 0.]]))
e60c0abdc0b7ac4f9ffc02f61ca16a77fee99345
61ba1d073fdbb34ad2ae4e9d0ae1b88083faeb02
/Sem I/Programarea Algoritmilor/laboratoare/lab2/problema6.py
82197831572d9292a03a2e658d6479b36dae91a4
[]
no_license
CosminHorjea/fmi
d899b639a085203963413918d2c508307cb9ba60
6b4911cdec64929710d984223385c1e8e36d5c7c
refs/heads/master
2023-07-09T01:04:24.418286
2023-06-22T21:50:00
2023-06-22T21:50:00
213,064,779
7
1
null
null
null
null
UTF-8
Python
false
false
1,120
py
''' 6. Jurnalul electronic al Anei conține, în fiecare zi, câte o frază cu informații despre cheltuielile pe care ea le-a efectuat în ziua respectivă. Scrieți un program care să citească o frază de acest tip din jurnalul Anei și apoi să afișeze suma totală cheltuită de ea în ziua respectivă. De exemplu, pentru fraza “Astăzi am cumpărat pâine de 5 RON, pe lapte am dat 10 RON, iar de 15 RON am cumpărat niște cașcaval. De asemenea, mi-am cumpărat și niște papuci cu 50 RON!”, programul trebuie să afișeze suma totală de 80 RON. Fraza se consideră corectă, adică toate numerele care apar în ea sunt numere naturale reprezentând sume cheltuite de Ana în ziua respectivă! ''' s = "Astăzi am cumpărat pâine de 5 RON, pe lapte am dat 10 RON, iar de 15 RON am cumpărat niște cașcaval. De asemenea, mi-am cumpărat și niște papuci cu 50 RON!" poz = s.find("RON") suma=0 for i in range(poz): while(not s[i:poz-1].isnumeric()): i += 1 # print(s[i:poz]) suma+=int(s[i:poz]) s = s[poz+3:] poz = s.find("RON") if(poz is -1): break print(suma)
5ae0ec49d2a08f1c7a2d6df28e40d28902ff9950
1d450f4655ae63240e88b474b8a17c1e711e1a81
/AttributesAndMethods/DocumentManagement/project/topic.py
1244ef4688a709dd1f29d1fbdede9e2fc76badc3
[]
no_license
RuzhaK/PythonOOP
f2cb396390349e2aac605c90fd7a18039653cf5e
68cbf321b5947b376459d7397aed36554347d256
refs/heads/master
2023-05-30T06:17:42.618984
2021-06-17T08:04:46
2021-06-17T08:04:46
371,284,629
0
0
null
null
null
null
UTF-8
Python
false
false
381
py
class Topic: def __init__(self,id, topic,storage_folder): self.id=id self.topic=topic self.storage_folder=storage_folder def edit(self,new_topic: str, new_storage_folder: str): self.topic=new_topic self.storage_folder=new_storage_folder def __repr__(self): return f"Topic {self.id}: {self.topic} in {self.storage_folder}"
9aaf56b91989e82c66fa113d95de30112ec46571
bf2d60eaa66d20f4a463999aca15bc43a026db22
/app/email.py
6f81dd2a55cde5d2582978572a2654ce5caa7d47
[]
no_license
kfinn6561/covid_map
2c60fdea3787b73b5f754eed51c56d3e276473fb
52d3f36375f0d610cc9b852b485f5397cb2ab642
refs/heads/main
2023-03-24T19:54:35.251710
2021-03-23T10:18:23
2021-03-23T10:18:23
350,666,986
1
0
null
null
null
null
UTF-8
Python
false
false
458
py
''' Created on 7 Jun 2020 @author: kieran ''' from flask_mail import Message from app import app,mail from threading import Thread def send_async_email(app,msg): with app.app_context(): mail.send(msg) def send_email(subject, sender, recipients, text_body, html_body): msg = Message(subject, sender=sender, recipients=recipients) msg.body = text_body msg.html = html_body Thread(target=send_async_email,args=(app,msg)).start()
2dbbce0ffe811caa9084b2700bb291b04f1da200
b372bc13b4715e87f39f80c1c2465fc6d93f3609
/register/tipmac.py
56092803f1532810fb9f495adae1b494bb9538a3
[ "WTFPL" ]
permissive
Retenodus/Maiznet
4dff822f0ab3d8c08196d09f08ef169357e37c2f
f7fa2c4ee964cab1cc2e33feadeed826f18489b7
refs/heads/master
2021-01-24T00:02:49.168595
2012-06-25T23:13:02
2012-06-25T23:13:02
2,130,383
1
0
null
null
null
null
UTF-8
Python
false
false
1,864
py
######################################################################## # vim: fileencoding=utf-8 ts=8 noexpandtab : # # ~~~~ Maiznet.fr ~~~~ # # -> register/tipmac.py # # # Copyright 2011 Grégoire Leroy <[email protected]> # Copyright 2011 Rémy Sanchez <[email protected]> # # This file is distributed under the terms of the WTFPL. For more # informations, see http://sam.zoy.org/wtfpl/COPYING ######################################################################## import socket import re from django.conf import settings def isMac(mac): """ Retourne True si la valeur est une adresse MAC, False sinon. """ X = '([a-fA-F0-9]{2}[:\-]){5}[a-fA-F0-9]{2}' # this is the regex if re.compile(X).search(mac): return True return False def ip_to_mac(ip): """ Effectue une requête auprès du serveur tipmac, et retourne la MAC correspondant à l'IP. Si la MAC est incorrecte, une exception est levée. Cette fonction dépend de paramètres à définir dans le settings.py : - **MAIZ_IP_GUEST**, la plage d'IP des invités. Si l'adresse n'est pas dans cette plage, la fonction retourne tout de suite une exception. À l'heure où ces lignes sont écrites, cette plage est 172.17.192.0/18. - **TIPMAC_SERVER**, l'adresse IP du serveur tipmac. - **TIPMAC_PORT**, le port du serveur. """ # Teste si l'adresse IP est un invité de Maiz import IPy if ip == None or IPy.IPint(settings.MAIZ_IP_GUEST).overlaps(ip) != 1: raise Exception("IP not in guest subnet") ip_server = settings.TIPMAC_SERVER port = settings.TIPMAC_PORT s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(3) s.connect((ip_server,port)) s.send(ip) try : mac = s.recv(17) except : raise Exception("No data received") if not isMac(mac): raise Exception("Error : invalid MAC") s.close() return mac
1d6a50cd2453611e0a4132a1cb10f5c1d4fbffc1
7bededcada9271d92f34da6dae7088f3faf61c02
/pypureclient/flasharray/FA_2_23/models/network_interface_eth.py
3190b8ae65d39d8a5813962e70c371dac0b1d779
[ "BSD-2-Clause" ]
permissive
PureStorage-OpenConnect/py-pure-client
a5348c6a153f8c809d6e3cf734d95d6946c5f659
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
refs/heads/master
2023-09-04T10:59:03.009972
2023-08-25T07:40:41
2023-08-25T07:40:41
160,391,444
18
29
BSD-2-Clause
2023-09-08T09:08:30
2018-12-04T17:02:51
Python
UTF-8
Python
false
false
6,348
py
# coding: utf-8 """ FlashArray REST API No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: 2.23 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re import six import typing from ....properties import Property if typing.TYPE_CHECKING: from pypureclient.flasharray.FA_2_23 import models class NetworkInterfaceEth(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'address': 'str', 'gateway': 'str', 'mac_address': 'str', 'mtu': 'int', 'netmask': 'str', 'subinterfaces': 'list[FixedReferenceNoId]', 'subnet': 'ReferenceNoId', 'subtype': 'str', 'vlan': 'int' } attribute_map = { 'address': 'address', 'gateway': 'gateway', 'mac_address': 'mac_address', 'mtu': 'mtu', 'netmask': 'netmask', 'subinterfaces': 'subinterfaces', 'subnet': 'subnet', 'subtype': 'subtype', 'vlan': 'vlan' } required_args = { } def __init__( self, address=None, # type: str gateway=None, # type: str mac_address=None, # type: str mtu=None, # type: int netmask=None, # type: str subinterfaces=None, # type: List[models.FixedReferenceNoId] subnet=None, # type: models.ReferenceNoId subtype=None, # type: str vlan=None, # type: int ): """ Keyword args: address (str): The IPv4 or IPv6 address to be associated with the specified network interface. gateway (str): The IPv4 or IPv6 address of the gateway through which the specified network interface is to communicate with the network. mac_address (str): The media access control address associated with the specified network interface. mtu (int): Maximum message transfer unit (packet) size for the network interface, in bytes. MTU setting cannot exceed the MTU of the corresponding physical interface. netmask (str): Netmask of the specified network interface that, when combined with the address of the interface, determines the network address of the interface. subinterfaces (list[FixedReferenceNoId]): List of network interfaces configured to be a subinterface of the specified network interface. subnet (ReferenceNoId): Subnet that is associated with the specified network interface. subtype (str): The subtype of the specified network interface. Only interfaces of subtype `virtual` can be created. Configurable on POST only. Valid values are `failover_bond`, `lacp_bond`, `physical`, and `virtual`. vlan (int): VLAN ID """ if address is not None: self.address = address if gateway is not None: self.gateway = gateway if mac_address is not None: self.mac_address = mac_address if mtu is not None: self.mtu = mtu if netmask is not None: self.netmask = netmask if subinterfaces is not None: self.subinterfaces = subinterfaces if subnet is not None: self.subnet = subnet if subtype is not None: self.subtype = subtype if vlan is not None: self.vlan = vlan def __setattr__(self, key, value): if key not in self.attribute_map: raise KeyError("Invalid key `{}` for `NetworkInterfaceEth`".format(key)) self.__dict__[key] = value def __getattribute__(self, item): value = object.__getattribute__(self, item) if isinstance(value, Property): raise AttributeError else: return value def __getitem__(self, key): if key not in self.attribute_map: raise KeyError("Invalid key `{}` for `NetworkInterfaceEth`".format(key)) return object.__getattribute__(self, key) def __setitem__(self, key, value): if key not in self.attribute_map: raise KeyError("Invalid key `{}` for `NetworkInterfaceEth`".format(key)) object.__setattr__(self, key, value) def __delitem__(self, key): if key not in self.attribute_map: raise KeyError("Invalid key `{}` for `NetworkInterfaceEth`".format(key)) object.__delattr__(self, key) def keys(self): return self.attribute_map.keys() def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): if hasattr(self, attr): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(NetworkInterfaceEth, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, NetworkInterfaceEth): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
ab2bda27234228c0eddad94ca062b9b2d0cf30ea
53fab060fa262e5d5026e0807d93c75fb81e67b9
/backup/user_242/ch16_2020_03_23_19_51_17_162358.py
4c14a0bbe8dc96d55dae4bea543ccd289b62c78f
[]
no_license
gabriellaec/desoft-analise-exercicios
b77c6999424c5ce7e44086a12589a0ad43d6adca
01940ab0897aa6005764fc220b900e4d6161d36b
refs/heads/main
2023-01-31T17:19:42.050628
2020-12-16T05:21:31
2020-12-16T05:21:31
306,735,108
0
0
null
null
null
null
UTF-8
Python
false
false
185
py
v = float(input('Qual o valor da conta?:')) def valor_da_conta(v): resultado = v * 1.1 return resultado print('Valor da conta com 10%: R$ {0:.2f}'.format(valor_da_conta(v)))
1a5fe7c6d9e954247f9388fd4b723a23fe944586
5286255a93db21ea9defc1f8f6fc71990c3c2fa9
/testing/scripts/.svn/text-base/xclean_unit_tests.py.svn-base
5c8066c963806e62aba75180468dffb41a007d7e
[]
no_license
brynmathias/AnalysisV2
1367767dbf22eef6924700c4b0a00581ea8ed965
ee17c019bb04243876a51c7ef7719cc58a52adea
refs/heads/master
2021-01-01T19:20:27.277628
2012-04-17T13:34:26
2012-04-17T13:34:26
2,600,415
0
2
null
null
null
null
UTF-8
Python
false
false
436
#!/usr/bin/env python import setupSUSY import unittest #import xclean.clonevector_tests #import xclean.basic_tests if __name__ == '__main__': testcases=["xclean.clonevector_tests.TestCloneVector", "xclean.basic_tests.TestBasic"] suite = unittest.TestLoader().loadTestsFromNames(testcases) print "="*50 print "Cross Cleaner Unit Tests" print "="*50 unittest.TextTestRunner(verbosity=2).run(suite)
65062af86c81873c11336b2bac695d78088412cc
f08f09b23dbf3ce3b967c46c5f109b792c5c8993
/visual_dynamics/envs/env_spec.py
4145f5c2af567b74f0d3173f709f5f31f27471e0
[ "MIT" ]
permissive
saadmahboob/visual_dynamics
5df6ea0f53144f0de8fdc991c9f00d78ac98c680
90227bb0d0aebb1989117b5c25ca311655ca7cc7
refs/heads/master
2021-06-17T05:50:22.661375
2017-05-12T02:53:47
2017-05-12T02:53:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
990
py
from visual_dynamics.spaces.base import Space from visual_dynamics.utils.config import ConfigObject class EnvSpec(ConfigObject): def __init__(self, action_space, observation_space): self._action_space = action_space self._observation_space = observation_space @property def observation_space(self): return self._observation_space @property def action_space(self): return self._action_space def _get_config(self): config = super(EnvSpec, self)._get_config() action_space = self.action_space if not isinstance(action_space, ConfigObject): action_space = Space.create(action_space) observation_space = self.observation_space if not isinstance(observation_space, ConfigObject): observation_space = Space.create(observation_space) config.update({'action_space': action_space, 'observation_space': observation_space}) return config
42fa0316fcea9a59c8394e31d472a79f370cde1f
d4fe2607c25e514df42831ddae3f9509057c2d46
/USBApplication/tasks/serial_task.py
17f72ecaadd8834d1c0fcb55d28f2c8e6acdfd52
[]
no_license
bxm156/EECS398
8cdbb1057f8d7d2fd8764df4309dd4712799d766
aa638d81fea008d467118691882cee73cefde147
refs/heads/master
2021-01-01T05:36:00.159758
2013-12-05T17:11:09
2013-12-05T17:11:09
12,497,895
0
1
null
null
null
null
UTF-8
Python
false
false
142
py
from base_task import BaseTask class SerialTask(BaseTask): def set_parameters(self, param_dict): self.parameters = param_dict
2b7259f9c38153470debef2a76db963f8b051486
660c72411c148507b0b04c517f154df7d0396281
/wiki/middleware.py
7ad75cf3cf8b65314f7ee3a31ae1213d0886b9e0
[]
no_license
xuguangzong/WIKI
e38dc3d434470c3238ebdf552768e42a7becb292
ac6b573ff6a658977fc97508ff90f004df3169a1
refs/heads/main
2023-06-26T15:29:55.675667
2021-07-23T08:39:50
2021-07-23T08:39:50
386,569,512
0
0
null
null
null
null
UTF-8
Python
false
false
1,417
py
import time import logging from wiki.documents import ELASTICSEARCH_ENABLED, ElaspedTimeDocumentManager logger = logging.getLogger(__name__) class OnlineMiddleware(object): def __init__(self, get_response=None): self.get_response = get_response super().__init__() def __call__(self, request): """ page render time :param request: :return: """ start_time = time.time() response = self.get_response(request) http_user_agent = request.META.get('HTTP_USER_AGENT', '') if 'spider'.upper() not in http_user_agent.upper(): try: cast_time = time.time() - start_time if ELASTICSEARCH_ENABLED: time_taken = round((cast_time) * 1000, 2) url = request.path from django.utils import timezone ElaspedTimeDocumentManager.create( url=url, time_taken=time_taken, log_datetime=timezone.now(), type='wiki', useragent=http_user_agent) response.content = response.content.replace( b'<!!LOAD_TIMES!!>', str.encode(str(cast_time)[:5])) except Exception as e: logger.error("Error OnlineMiddleware: %s" % e) return response
d7a1d6e57acc930c4bd99451bd5df4c5e3bc6cfe
fb8ee3a962f6d690badd02409206be0724e6a659
/examples/bScript.py
a85a71e0fb6d3ca27b6c1f220cfa141ea50ff6fa
[]
no_license
chonlei/SanPy
3efbfcfd46b5223c52724a57d1640a241cbde82b
fb9f399527adcdf01dad49011d7ecfd47ac77139
refs/heads/master
2023-07-11T03:46:37.249897
2021-08-11T01:54:44
2021-08-11T01:54:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,015
py
import sys sys.path.append("..") # Adds higher directory to python modules path. import matplotlib matplotlib.use("TkAgg") import matplotlib.pyplot as plt from sanpy import bAnalysis # open an abf file into a bAnalysis object myFile = '../data/19114001.abf' ba = bAnalysis.bAnalysis(myFile) # detect spikes myThreshold = 100 myMedianFilter = 3 halfHeights = [20, 50, 80] ba.spikeDetect(dVthresholdPos=myThreshold, medianFilter=myMedianFilter, halfHeights=halfHeights) # ba now has a number of spikes, they are all in a list called ba.spikeDict print('number of spikes detected:', len(ba.spikeDict)) # each spike in the list is a python dictionary # lets look at one spike mySpikeNumber= 5 print(ba.spikeDict[mySpikeNumber]) # each spike has a number of keys (e.g. the name of the stat) and for each of those a 'value' for key,value in ba.spikeDict[mySpikeNumber].items(): print(key, value) for spike in ba.spikeDict: print(spike['thresholdVal']) # plot spike threshold (mV) versus spike time (seconds)
b407fac8430611e58e38951ad808096c8da0eb7f
3f394cd47a1aaf0ae2f8de5ab9854f52341e017a
/clay/helpers.py
df83597113a8b027b38f28f310309ccb1a9a6517
[ "MIT" ]
permissive
devildeveloper/Clay
e3771d97d23ae3ba7d866d8921102d50e95a6562
ca419ee4cfe191724ed68e3507515a5b258bb4bb
refs/heads/master
2021-01-18T02:27:22.094481
2013-11-18T20:24:02
2013-11-18T20:24:02
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,628
py
# -*- coding: utf-8 -*- from datetime import datetime import errno from fnmatch import fnmatch import io import os import shutil import unicodedata def to_unicode(txt, encoding='utf8'): if not isinstance(txt, basestring): txt = str(txt) if isinstance(txt, unicode): return txt return unicode(txt, encoding) def unormalize(text, form='NFD'): return unicodedata.normalize(form, text) def fullmatch(path, pattern): path = unormalize(path) name = os.path.basename(path) return fnmatch(name, pattern) or fnmatch(path, pattern) def read_content(path, **kwargs): kwargs.setdefault('mode', 'rt') with io.open(path, **kwargs) as f: return f.read() def make_dirs(*lpath): path = os.path.join(*lpath) try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST: raise return path def create_file(path, content, encoding='utf8'): if not isinstance(content, unicode): content = unicode(content, encoding) with io.open(path, 'w+t', encoding=encoding) as f: f.write(content) def copy_if_updated(path_in, path_out): if os.path.exists(path_out): newt = os.path.getmtime(path_in) currt = os.path.getmtime(path_out) if currt >= newt: return shutil.copy2(path_in, path_out) def get_updated_datetime(path): ut = os.path.getmtime(path) return datetime.fromtimestamp(ut) def sort_paths_dirs_last(paths): def dirs_last(a, b): return cmp(a[0].count('/'), b[0].count('/')) or cmp(a[0], b[0]) return sorted(paths, cmp=dirs_last)
849c3e365db5121e9b999f5684403461c40b7bfd
2467b5d4a6d8d6ffeff547478a8dd7fa3d4d9234
/chapter04/demo_4.4.py
efcf5f6dae4cb9cfb3a2035c834d797028778720
[ "MIT" ]
permissive
NetworkRanger/tensorflow-ml-exercise
6c92ec3cf87a6def0c1d7818e59c83585cc1aebe
d0c46c10bfc3ee06c211ebe2f25489f8407c369f
refs/heads/master
2020-04-02T22:54:57.268384
2018-12-22T13:52:01
2018-12-22T13:52:01
154,848,809
3
2
null
null
null
null
UTF-8
Python
false
false
5,629
py
#!/usr/bin/python2.7 # -*- coding:utf-8 -*- # Author: NetworkRanger # Date: 2018/11/5 下午10:44 # 4.4 TensorFlow上核函数的使用 # 1. 导入必要编程库,创建一个计算图会话 import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from sklearn import datasets sess = tf.Session() # 2. 生成模拟数。生成的数据是一两个同心圆数据,每个不同的环代表不同的类,确保只有类-1或者1。为了让绘图方便,这里将每类数据分成x值和y值 (x_vals, y_vals) = datasets.make_circles(n_samples=500, factor=.5, noise=.1) y_vals = np.array([1 if y==1 else -1 for y in y_vals]) class1_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i] == 1] class1_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i] == 1] class2_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i] == -1] class2_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i] == -1] # 3. 声明批量大小、占位符,创建模型变量b。对于SVM算法,为了让每次迭代训练不波动,得到一个稳定的训练模型,这时批量大小得取更大。注意,本例为预测数据点声明有额外的占位符。最后创建彩色的网格来可视化不同的区域代表不同的类别 batch_size = 250 x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32) y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) prediction_grid = tf.placeholder(shape=[None, 2], dtype=tf.float32) b = tf.Variable(tf.random_normal(shape=[1, batch_size])) # 4. 创建高斯核函数。该核函数用矩阵操作来表示 gamma = tf.constant(-50.0) dist = tf.reduce_mean(tf.square(x_data), 1) dist = tf.reshape(dist, [-1, 1]) sq_dists = tf.add(tf.subtract(dist, tf.multiply(2., tf.matmul(x_data, tf.transpose(x_data)))), tf.transpose(dist)) my_kernel = tf.exp(tf.multiply(gamma, tf.abs(sq_dists))) """ 注意,在sq_dists中应用广播加法和减法操作。线性核函数可以表示为: my_kernel = tf.matmul(x_data, tf.transpose(x_data))。 """ # 5. 声明对偶问题。为了最大化,这里采用最小化损失函数的负数: tf.negative() model_output = tf.matmul(b, my_kernel) first_term = tf.reduce_mean(b) b_vec_cross = tf.matmul(tf.transpose(b), b) y_target_cross = tf.matmul(y_target, tf.transpose(y_target)) second_term = tf.reduce_mean(tf.multiply(my_kernel, tf.multiply(b_vec_cross, y_target_cross))) loss = tf.negative(tf.subtract(first_term, second_term)) # 6. 创建预测函数和准确度函数。先创建一个预测核函数,但用预测数据点的核函数用模拟数据点的核函数。预测值是模型输出的符号函数值 rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1), [-1, 1]) rB = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1), [-1, 1]) pred_sq_dist = tf.add(tf.subtract(rA, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB)) pred_kernel = tf.exp(tf.multiply(gamma, tf.abs(pred_sq_dist))) prediction_output = tf.matmul(tf.multiply(tf.transpose(y_target), b), pred_kernel) prediction = tf.sign(prediction_output-tf.reduce_mean(prediction_output)) accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.squeeze(prediction), tf.squeeze(y_target)), tf.float32)) """ 为了实现线性预测核函数,将预测核函数改为: pred_kernel = tf.matmul(x_data, tf.transpose(prediction_grid))。 """ # 7. 创建优化器函数,初始化所有的变量 my_opt = tf.train.GradientDescentOptimizer(0.001) train_step = my_opt.minimize(loss) init = tf.initialize_all_variables() sess.run(init) # 8. 开始迭代训练 loss_vec = [] batch_accuracy = [] for i in range(500): rand_index = np.random.choice(len(x_vals), size=batch_size) rand_x = x_vals[rand_index] rand_y = np.transpose([y_vals[rand_index]]) sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y}) loss_vec.append(temp_loss) acc_temp = sess.run(accuracy, feed_dict={x_data: rand_x, y_target: rand_y, prediction_grid: rand_x}) batch_accuracy.append(acc_temp) if (i+1) % 100 == 0: print('Step #' + str(i+1)) print('Loss = ' + str(temp_loss)) # 9. 输出结果如下 """ Step #100 Loss = -0.040872738 Step #200 Loss = -0.04066868 Step #300 Loss = -0.04294016 Step #400 Loss = -0.042239938 Step #500 Loss = -0.043024104 """ # 10. 为了能够在整个数据空间可视化分类返回结果,我们将创建预测数据点的网格,在其上进行预测 x_min, x_max = x_vals[:, 0].min() - 1, x_vals[:, 0].max() + 1 y_min, y_max = x_vals[:, 1].min() - 1, x_vals[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02)) grid_points = np.c_[xx.ravel(), yy.ravel()] [grid_prediction] = sess.run(prediction, feed_dict={x_data: rand_x, y_target: rand_y, prediction_grid: grid_points}) grid_prediction = grid_prediction.reshape(xx.shape) # 11. 下面绘制预测结果、批量准确度和损失函数 plt.contourf(xx, yy, grid_prediction, cmap=plt.cm.Paired, alpha=0.8) plt.plot(class1_x, class1_y, 'ro', label='Class 1') plt.plot(class2_x, class2_y, 'kx', label='Class -1') plt.legend(loc='lower right') plt.ylim([-1.5, 1.5]) plt.xlim([-1.5, 1.5]) plt.show() plt.plot(batch_accuracy, 'k-', label='Accuracy') plt.title('Batch Accuracy') plt.xlabel('Generation') plt.ylabel('Accuracy') plt.legend(loc='lower right') plt.show() plt.plot(loss_vec, 'k-') plt.title('Loss per Generation') plt.xlabel('Generation') plt.ylabel('Loss') plt.show() # 12. 简单扼要,这里只显示训练结果图,不过也可以分开运行绘图代码展示其他效果
6f9704f52133fd74a784d3d12df74871c7595eff
70538979b952b8afc380bd19ac565b3967178b87
/docker_odoo_env/commands/command.py
e95e7d0446d250b93fb8a576afb260b1bd0a060c
[ "MIT" ]
permissive
sebatista/docker_odoo_env
b693b9111e68f4162784ee77910c034b5dcb0b21
57963fb677257a71c2d9bfbf0400a78eaa62fd10
refs/heads/master
2020-04-25T06:59:31.635170
2019-02-25T01:42:22
2019-02-25T01:42:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
392
py
# -*- coding: utf-8 -*- # For copyright and license notices, see __manifest__.py file in module root from docker_odoo_env.messages import Msg msg = Msg() class Command(object): def __init__(self, config): self._config = config def execute(self): raise NotImplementedError def show_doc(self): msg.text(self._config.args.get('command')) exit()
3b55ba6c605dd0fc783bf9b32b031d064c3a3e25
32bbbd6dbd100bbb9a2282f69ac3b7b34516347f
/Study/keras/keras40_mnist3_dnn.py
fb46b91e2a7ed5c39bd84d7fe794c8c87742301f
[]
no_license
kimjh1753/AIA_Academy_Study
2162d4d4f1a6b8ca1870f86d540df45a8742f359
6022718ae7f9e5170a19c4786d096c8042894ead
refs/heads/master
2023-05-07T12:29:12.920693
2021-06-05T01:09:33
2021-06-05T01:09:33
324,136,796
0
0
null
null
null
null
UTF-8
Python
false
false
3,065
py
# 주말과제 # dense 모델로 구성 input_shape=(28*28, ) # 인공지능계의 hello world라 불리는 mnist!!! import numpy as np import matplotlib.pyplot as plt # 1. 데이터 from tensorflow.keras.datasets import mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() print(x_train.shape, y_train.shape) # (60000, 28, 28) (60000,) print(x_test.shape, y_test.shape) # (10000, 28, 28) (10000,) print(x_train[0]) print("y_train[0] : ", y_train[0]) print(x_train[0].shape) # (28, 28) # plt.imshow(x_train[0], 'gray') # plt.imshow(x_train[0]) # plt.show() x_train = x_train.reshape(60000, 28*28) x_test = x_test.reshape(10000, 28*28) # (x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1)) print(x_train.shape, x_test.shape) # (60000, 784) (10000, 784) # OnHotEncoding # 여러분이 하시오!!!!! from tensorflow.keras.utils import to_categorical y_train = to_categorical(y_train) y_test = to_categorical(y_test) print(y_train.shape) # (60000, 10) print(y_test.shape) # (10000, 10) # 2. 모델 구성 from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense model = Sequential() model.add(Dense(200, activation='relu', input_shape=(784,))) model.add(Dense(200, activation='relu')) model.add(Dense(200, activation='relu')) model.add(Dense(200, activation='relu')) model.add(Dense(200, activation='relu')) model.add(Dense(200, activation='relu')) model.add(Dense(10, activation='softmax')) # 실습!! 완성하시오!!! # 지표는 acc /// 0.985 이상 # 응용 # y_test 10개와 y_pred 10개를 출력하시오 # y_test[:10] = (?,?,?,?,?,?,?,?,?,?,?) # y_pred[:10] = (?,?,?,?,?,?,?,?,?,?,?) # 3. 컴파일 훈련 model.compile(loss='mse', optimizer='adam', metrics=['acc']) from tensorflow.keras.callbacks import EarlyStopping es = EarlyStopping(monitor='loss', patience=30, mode='auto') model.fit(x_train, y_train, epochs=2000, validation_split=0.2, batch_size=2000, callbacks=[es]) # 4. 평가, 예측 loss, acc = model.evaluate(x_test, y_test, batch_size=1) print("loss : ", loss) print("acc : ", acc) y_test = np.array(model.predict(x_train[:1])) print(y_test[:10]) print("============") y_pred = np.array(model.predict(x_test[:1])) print(y_pred[:10]) # keras40_mnist2_cnn # loss : 0.00260396976955235 # acc : 0.9854999780654907 # [[8.6690171e-08 2.8707976e-08 9.1137373e-09 9.6521189e-06 4.6547077e-09 # 9.9998856e-01 7.6187533e-08 5.5741470e-08 1.3864026e-06 2.0224462e-07]] # ============ # [[7.0327958e-30 2.2413428e-23 6.9391834e-21 9.2217209e-22 5.1841172e-22 # 8.7506048e-26 2.4799229e-27 1.0000000e+00 8.0364114e-26 3.3208760e-17]] # keras40_mnist3_dnn # loss : 0.005172424484044313 # acc : 0.9724000096321106 # [[9.4863184e-15 2.2668929e-19 1.8625454e-22 5.9676188e-07 2.5733180e-25 # 9.9999940e-01 1.5588427e-20 7.8994310e-23 5.6835017e-22 2.6443269e-20]] # ============ # [[3.0520350e-26 2.7246760e-23 4.5444517e-25 3.6449811e-28 1.3460386e-28 # 2.1042897e-27 6.9805158e-30 1.0000000e+00 1.8761058e-26 2.6409651e-25]]
ea5831164ca916edd5d87547c9867c3506951b19
a46d135ba8fd7bd40f0b7d7a96c72be446025719
/packages/python/plotly/plotly/validators/scattermapbox/marker/colorbar/_ticksuffix.py
f10a7f2ec190f24f5ab3753b5e8bbd093bd710b0
[ "MIT" ]
permissive
hugovk/plotly.py
5e763fe96f225d964c4fcd1dea79dbefa50b4692
cfad7862594b35965c0e000813bd7805e8494a5b
refs/heads/master
2022-05-10T12:17:38.797994
2021-12-21T03:49:19
2021-12-21T03:49:19
234,146,634
0
0
MIT
2020-01-15T18:33:43
2020-01-15T18:33:41
null
UTF-8
Python
false
false
465
py
import _plotly_utils.basevalidators class TicksuffixValidator(_plotly_utils.basevalidators.StringValidator): def __init__( self, plotly_name="ticksuffix", parent_name="scattermapbox.marker.colorbar", **kwargs ): super(TicksuffixValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), **kwargs )
c5e6b35ebc6e4bdbfceaaeb25098ab18f8b9b869
667f153e47aec4ea345ea87591bc4f5d305b10bf
/Solutions/Ch5Ex113.py
c53cc96c6971b3a0a5a3f6418b386e6ee3f1b5fb
[]
no_license
Parshwa-P3/ThePythonWorkbook-Solutions
feb498783d05d0b4e5cbc6cd5961dd1e611f5f52
5694cb52e9e9eac2ab14b1a3dcb462cff8501393
refs/heads/master
2022-11-15T20:18:53.427665
2020-06-28T21:50:48
2020-06-28T21:50:48
275,670,813
1
0
null
2020-06-28T21:50:49
2020-06-28T21:26:01
Python
UTF-8
Python
false
false
583
py
# Ch5Ex113.py # Author: Parshwa Patil # ThePythonWorkbook Solutions # Exercise No. 113 # Title: Formatting a List from Ch5Ex107 import inputWordList def formatWordList(words): if len(words) == 1: return str(words[0]) elif len(words) == 2: return str(words[0]) + " and " + str(words[1]) elif len(words) >= 3: res = "" for w in words[:-2]: res += str(w) + ", " res += formatWordList(words[-2:]) return res else: return "" def main(): words = inputWordList() res = formatWordList(words) print(res) if __name__ == "__main__": main()
67cf3b7a3529607eabe8db685af3c5341070b75b
b2a14c4398c3e3917ac0c5e4bf4f80ca0c39c7a4
/backend/manage.py
a0889d37f699f6b1c500e493be1b41aaa0babe05
[]
no_license
crowdbotics-apps/twentyonem-dev-2089
44303b993b32e82b1e0c54ab85b57168513adc59
a33e29a7393f202889dbd6b387621e97689c914d
refs/heads/master
2022-04-09T06:46:32.455007
2020-03-23T13:42:16
2020-03-23T13:42:16
248,944,609
0
0
null
null
null
null
UTF-8
Python
false
false
639
py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault("DJANGO_SETTINGS_MODULE", "twentyonem_dev_2089.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == "__main__": main()
d1330779a375b8df3f50246f86dbb910325224aa
b441503bcdb484d098885b19a989932b8d053a71
/neural_sp/models/seq2seq/encoders/conformer.py
37c2d75300f54d407e4e33e7a613a214d7d3638d
[ "Apache-2.0" ]
permissive
entn-at/neural_sp
a266594b357b175b0fea18253433e32adc62810c
9dbbb4ab3985b825f8e9120a603a6caa141c8bdd
refs/heads/master
2020-08-28T05:48:28.928667
2020-06-22T19:17:53
2020-06-22T19:17:53
217,611,439
0
0
null
2019-10-25T20:40:18
2019-10-25T20:40:18
null
UTF-8
Python
false
false
21,405
py
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2020 Kyoto University (Hirofumi Inaguma) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Conformer encoder.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import logging import math import random import torch import torch.nn as nn from neural_sp.models.modules.conformer_convolution import ConformerConvBlock from neural_sp.models.modules.positional_embedding import XLPositionalEmbedding from neural_sp.models.modules.positionwise_feed_forward import PositionwiseFeedForward as FFN from neural_sp.models.modules.relative_multihead_attention import RelativeMultiheadAttentionMechanism as RelMHA from neural_sp.models.seq2seq.encoders.conv import ConvEncoder from neural_sp.models.seq2seq.encoders.encoder_base import blockwise from neural_sp.models.seq2seq.encoders.encoder_base import EncoderBase from neural_sp.models.torch_utils import make_pad_mask from neural_sp.models.torch_utils import tensor2np random.seed(1) logger = logging.getLogger(__name__) class ConformerEncoder(EncoderBase): """Conformer encoder. Args: input_dim (int): dimension of input features (freq * channel) enc_type (str): type of encoder n_heads (int): number of heads for multi-head attention kernel_size (int): kernel size for depthwise convolution in convolution module n_layers (int): number of blocks n_layers_sub1 (int): number of layers in the 1st auxiliary task n_layers_sub2 (int): number of layers in the 2nd auxiliary task d_model (int): dimension of MultiheadAttentionMechanism d_ff (int): dimension of PositionwiseFeedForward ffn_bottleneck_dim (int): bottleneck dimension for the light-weight FFN layer last_proj_dim (int): dimension of the last projection layer pe_type (str): type of positional encoding layer_norm_eps (float): epsilon value for layer normalization ffn_activation (str): nonolinear function for PositionwiseFeedForward dropout_in (float): dropout probability for input-hidden connection dropout (float): dropout probabilities for linear layers dropout_att (float): dropout probabilities for attention distributions dropout_layer (float): LayerDrop probability for layers n_stacks (int): number of frames to stack n_splices (int): frames to splice. Default is 1 frame. conv_in_channel (int): number of channels of input features conv_channels (int): number of channles in the CNN blocks conv_kernel_sizes (list): size of kernels in the CNN blocks conv_strides (list): number of strides in the CNN blocks conv_poolings (list): size of poolings in the CNN blocks conv_batch_norm (bool): apply batch normalization only in the CNN blocks conv_layer_norm (bool): apply layer normalization only in the CNN blocks conv_bottleneck_dim (int): dimension of the bottleneck layer between CNN and self-attention layers conv_param_init (float): only for CNN layers before Conformer layers task_specific_layer (bool): add a task specific layer for each sub task param_init (str): parameter initialization method chunk_size_left (int): left chunk size for time-restricted Conformer encoder chunk_size_current (int): current chunk size for time-restricted Conformer encoder chunk_size_right (int): right chunk size for time-restricted Conformer encoder """ def __init__(self, input_dim, enc_type, n_heads, kernel_size, n_layers, n_layers_sub1, n_layers_sub2, d_model, d_ff, ffn_bottleneck_dim, last_proj_dim, pe_type, layer_norm_eps, ffn_activation, dropout_in, dropout, dropout_att, dropout_layer, n_stacks, n_splices, conv_in_channel, conv_channels, conv_kernel_sizes, conv_strides, conv_poolings, conv_batch_norm, conv_layer_norm, conv_bottleneck_dim, conv_param_init, task_specific_layer, param_init, chunk_size_left, chunk_size_current, chunk_size_right): super(ConformerEncoder, self).__init__() if n_layers_sub1 < 0 or (n_layers_sub1 > 1 and n_layers < n_layers_sub1): raise ValueError('Set n_layers_sub1 between 1 to n_layers.') if n_layers_sub2 < 0 or (n_layers_sub2 > 1 and n_layers_sub1 < n_layers_sub2): raise ValueError('Set n_layers_sub2 between 1 to n_layers_sub1.') self.d_model = d_model self.n_layers = n_layers self.n_heads = n_heads self.pe_type = pe_type self.scale = math.sqrt(d_model) # for streaming encoder self.chunk_size_left = chunk_size_left self.chunk_size_current = chunk_size_current self.chunk_size_right = chunk_size_right self.latency_controlled = chunk_size_left > 0 or chunk_size_current > 0 or chunk_size_right > 0 # for hierarchical encoder self.n_layers_sub1 = n_layers_sub1 self.n_layers_sub2 = n_layers_sub2 self.task_specific_layer = task_specific_layer # for bridge layers self.bridge = None self.bridge_sub1 = None self.bridge_sub2 = None # for attention plot self.aws_dict = {} self.data_dict = {} # Setting for CNNs if conv_channels: assert n_stacks == 1 and n_splices == 1 self.conv = ConvEncoder(input_dim, in_channel=conv_in_channel, channels=conv_channels, kernel_sizes=conv_kernel_sizes, strides=conv_strides, poolings=conv_poolings, dropout=0., batch_norm=conv_batch_norm, layer_norm=conv_layer_norm, layer_norm_eps=layer_norm_eps, residual=False, bottleneck_dim=d_model, param_init=conv_param_init) self._odim = self.conv.output_dim else: self.conv = None self._odim = input_dim * n_splices * n_stacks self.embed = nn.Linear(self._odim, d_model) # calculate subsampling factor self._factor = 1 if self.conv is not None: self._factor *= self.conv.subsampling_factor self.pos_emb = XLPositionalEmbedding(d_model, dropout) # TODO: dropout_in? assert pe_type == 'relative' self.layers = nn.ModuleList([copy.deepcopy(ConformerEncoderBlock( d_model, d_ff, n_heads, kernel_size, dropout, dropout_att, dropout_layer, layer_norm_eps, ffn_activation, param_init, ffn_bottleneck_dim=ffn_bottleneck_dim)) for _ in range(n_layers)]) self.norm_out = nn.LayerNorm(d_model, eps=layer_norm_eps) self._odim = d_model if n_layers_sub1 > 0: if task_specific_layer: self.layer_sub1 = ConformerEncoderBlock( d_model, d_ff, n_heads, kernel_size, dropout, dropout_att, dropout_layer, layer_norm_eps, ffn_activation, param_init, ffn_bottleneck_dim=ffn_bottleneck_dim) self.norm_out_sub1 = nn.LayerNorm(d_model, eps=layer_norm_eps) if last_proj_dim > 0 and last_proj_dim != self.output_dim: self.bridge_sub1 = nn.Linear(self._odim, last_proj_dim) if n_layers_sub2 > 0: if task_specific_layer: self.layer_sub2 = ConformerEncoderBlock( d_model, d_ff, n_heads, kernel_size, dropout, dropout_att, dropout_layer, layer_norm_eps, ffn_activation, param_init, ffn_bottleneck_dim=ffn_bottleneck_dim) self.norm_out_sub2 = nn.LayerNorm(d_model, eps=layer_norm_eps) if last_proj_dim > 0 and last_proj_dim != self.output_dim: self.bridge_sub2 = nn.Linear(self._odim, last_proj_dim) if last_proj_dim > 0 and last_proj_dim != self.output_dim: self.bridge = nn.Linear(self._odim, last_proj_dim) self._odim = last_proj_dim self.reset_parameters(param_init) @staticmethod def add_args(parser, args): """Add arguments.""" group = parser.add_argument_group("Transformer encoder") if 'conv' in args.enc_type: parser = ConvEncoder.add_args(parser, args) # Transformer common if not hasattr(args, 'transformer_d_model'): group.add_argument('--transformer_d_model', type=int, default=256, help='number of units in the MHA layer') group.add_argument('--transformer_d_ff', type=int, default=2048, help='number of units in the FFN layer') group.add_argument('--transformer_d_ff_bottleneck_dim', type=int, default=0, help='bottleneck dimension in the FFN layer') group.add_argument('--transformer_n_heads', type=int, default=4, help='number of heads in the MHA layer') group.add_argument('--transformer_layer_norm_eps', type=float, default=1e-12, help='epsilon value for layer normalization') group.add_argument('--transformer_ffn_activation', type=str, default='relu', choices=['relu', 'gelu', 'gelu_accurate', 'glu', 'swish'], help='nonlinear activation for the FFN layer') group.add_argument('--transformer_param_init', type=str, default='xavier_uniform', choices=['xavier_uniform', 'pytorch'], help='parameter initializatin') # NOTE: These checks are important to avoid conflict with args in Transformer decoder # Conformer encoder specific group.add_argument('--transformer_enc_pe_type', type=str, default='relative', choices=['relative'], help='type of positional encoding for the Transformer encoder') group.add_argument('--conformer_kernel_size', type=int, default=32, help='kernel size for depthwise convolution in convolution module for Conformer encoder layers') group.add_argument('--dropout_enc_layer', type=float, default=0.0, help='LayerDrop probability for Conformer encoder layers') # streaming group.add_argument('--lc_chunk_size_left', type=int, default=0, help='left chunk size for latency-controlled Conformer encoder') group.add_argument('--lc_chunk_size_current', type=int, default=0, help='current chunk size (and hop size) for latency-controlled Conformer encoder') group.add_argument('--lc_chunk_size_right', type=int, default=0, help='right chunk size for latency-controlled Conformer encoder') return parser def reset_parameters(self, param_init): """Initialize parameters.""" if param_init == 'xavier_uniform': logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__) if self.conv is None: nn.init.xavier_uniform_(self.embed.weight) nn.init.constant_(self.embed.bias, 0.) if self.bridge is not None: nn.init.xavier_uniform_(self.bridge.weight) nn.init.constant_(self.bridge.bias, 0.) if self.bridge_sub1 is not None: nn.init.xavier_uniform_(self.bridge_sub1.weight) nn.init.constant_(self.bridge_sub1.bias, 0.) if self.bridge_sub2 is not None: nn.init.xavier_uniform_(self.bridge_sub2.weight) nn.init.constant_(self.bridge_sub2.bias, 0.) def forward(self, xs, xlens, task, use_cache=False, streaming=False): """Forward computation. Args: xs (FloatTensor): `[B, T, input_dim]` xlens (list): `[B]` task (str): not supported now use_cache (bool): streaming (bool): streaming encoding Returns: eouts (dict): xs (FloatTensor): `[B, T, d_model]` xlens (list): `[B]` """ eouts = {'ys': {'xs': None, 'xlens': None}, 'ys_sub1': {'xs': None, 'xlens': None}, 'ys_sub2': {'xs': None, 'xlens': None}} N_l = self.chunk_size_left N_c = self.chunk_size_current N_r = self.chunk_size_right bs, xmax, idim = xs.size() if self.latency_controlled: xs = blockwise(xs, N_l, N_c, N_r) if self.conv is None: xs = self.embed(xs) else: # Path through CNN blocks xs, xlens = self.conv(xs, xlens) if not self.training: self.data_dict['elens'] = tensor2np(xlens) if self.latency_controlled: # streaming Conformer encoder _N_l = max(0, N_l // self.subsampling_factor) _N_c = N_c // self.subsampling_factor n_blocks = xs.size(0) // bs emax = xmax // self.subsampling_factor if xmax % self.subsampling_factor != 0: emax += 1 xs = xs * self.scale pos_idxs = torch.arange(xs.size(1) - 1, -1, -1.0, dtype=torch.float) pos_embs = self.pos_emb(pos_idxs, self.device_id) xx_mask = None # NOTE: no mask for lth, layer in enumerate(self.layers): xs = layer(xs, xx_mask, pos_embs=pos_embs) if not self.training: n_heads = layer.xx_aws.size(1) xx_aws = layer.xx_aws[:, :, _N_l:_N_l + _N_c, _N_l:_N_l + _N_c] xx_aws = xx_aws.view(bs, n_blocks, n_heads, _N_c, _N_c) xx_aws_center = xx_aws.new_zeros(bs, n_heads, emax, emax) for blc_id in range(n_blocks): offset = blc_id * _N_c emax_blc = xx_aws_center[:, :, offset:offset + _N_c].size(2) xx_aws_chunk = xx_aws[:, blc_id, :, :emax_blc, :emax_blc] xx_aws_center[:, :, offset:offset + _N_c, offset:offset + _N_c] = xx_aws_chunk self.aws_dict['xx_aws_layer%d' % lth] = tensor2np(xx_aws_center) # Extract the center region xs = xs[:, _N_l:_N_l + _N_c] # `[B * n_blocks, _N_c, d_model]` xs = xs.contiguous().view(bs, -1, xs.size(2)) xs = xs[:, :emax] else: bs, xmax, idim = xs.size() xs = xs * self.scale # Create the self-attention mask xx_mask = make_pad_mask(xlens, self.device_id).unsqueeze(2).repeat([1, 1, xmax]) pos_idxs = torch.arange(xmax - 1, -1, -1.0, dtype=torch.float) pos_embs = self.pos_emb(pos_idxs, self.device_id) for lth, layer in enumerate(self.layers): xs = layer(xs, xx_mask, pos_embs=pos_embs) if not self.training: self.aws_dict['xx_aws_layer%d' % lth] = tensor2np(layer.xx_aws) # Pick up outputs in the sub task before the projection layer if lth == self.n_layers_sub1 - 1: xs_sub1 = self.layer_sub1( xs, xx_mask, pos_embs=pos_embs) if self.task_specific_layer else xs.clone() xs_sub1 = self.norm_out_sub1(xs_sub1) if self.bridge_sub1 is not None: xs_sub1 = self.bridge_sub1(xs_sub1) if task == 'ys_sub1': eouts[task]['xs'], eouts[task]['xlens'] = xs_sub1, xlens return eouts if lth == self.n_layers_sub2 - 1: xs_sub2 = self.layer_sub2( xs, xx_mask, pos_embs=pos_embs) if self.task_specific_layer else xs.clone() xs_sub2 = self.norm_out_sub2(xs_sub2) if self.bridge_sub2 is not None: xs_sub2 = self.bridge_sub2(xs_sub2) if task == 'ys_sub2': eouts[task]['xs'], eouts[task]['xlens'] = xs_sub2, xlens return eouts xs = self.norm_out(xs) # Bridge layer if self.bridge is not None: xs = self.bridge(xs) if task in ['all', 'ys']: eouts['ys']['xs'], eouts['ys']['xlens'] = xs, xlens if self.n_layers_sub1 >= 1 and task == 'all': eouts['ys_sub1']['xs'], eouts['ys_sub1']['xlens'] = xs_sub1, xlens if self.n_layers_sub2 >= 1 and task == 'all': eouts['ys_sub2']['xs'], eouts['ys_sub2']['xlens'] = xs_sub2, xlens return eouts class ConformerEncoderBlock(nn.Module): """A single layer of the Conformer encoder. Args: d_model (int): input dimension of MultiheadAttentionMechanism and PositionwiseFeedForward d_ff (int): hidden dimension of PositionwiseFeedForward n_heads (int): number of heads for multi-head attention kernel_size (int): kernel size for depthwise convolution in convolution module dropout (float): dropout probabilities for linear layers dropout_att (float): dropout probabilities for attention distributions dropout_layer (float): LayerDrop probability layer_norm_eps (float): epsilon parameter for layer normalization ffn_activation (str): nonolinear function for PositionwiseFeedForward param_init (str): parameter initialization method ffn_bottleneck_dim (int): bottleneck dimension for the light-weight FFN layer """ def __init__(self, d_model, d_ff, n_heads, kernel_size, dropout, dropout_att, dropout_layer, layer_norm_eps, ffn_activation, param_init, ffn_bottleneck_dim=0): super(ConformerEncoderBlock, self).__init__() self.n_heads = n_heads self.fc_factor = 0.5 # first half position-wise feed-forward self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.feed_forward1 = FFN(d_model, d_ff, dropout, ffn_activation, param_init, ffn_bottleneck_dim) # conv module self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.conv = ConformerConvBlock(d_model, kernel_size, param_init) # self-attention self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.self_attn = RelMHA(kdim=d_model, qdim=d_model, adim=d_model, odim=d_model, n_heads=n_heads, dropout=dropout_att, param_init=param_init) # second half position-wise feed-forward self.norm4 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.feed_forward2 = FFN(d_model, d_ff, dropout, ffn_activation, param_init, ffn_bottleneck_dim) self.dropout = nn.Dropout(dropout) self.dropout_layer = dropout_layer self.reset_visualization() @property def xx_aws(self): return self._xx_aws def reset_visualization(self): self._xx_aws = None def forward(self, xs, xx_mask=None, pos_embs=None, u=None, v=None): """Conformer encoder layer definition. Args: xs (FloatTensor): `[B, T, d_model]` xx_mask (ByteTensor): `[B, T, T]` pos_embs (LongTensor): `[L, 1, d_model]` u (FloatTensor): global parameter for relative positinal embedding v (FloatTensor): global parameter for relative positinal embedding Returns: xs (FloatTensor): `[B, T, d_model]` """ self.reset_visualization() # LayerDrop if self.dropout_layer > 0 and self.training and random.random() >= self.dropout_layer: return xs # first half FFN residual = xs xs = self.norm1(xs) xs = self.feed_forward1(xs) xs = self.fc_factor * self.dropout(xs) + residual # Macaron FFN # conv residual = xs xs = self.norm2(xs) xs = self.conv(xs) xs = self.dropout(xs) + residual # self-attention residual = xs xs = self.norm3(xs) # relative positional encoding memory = None xs, self._xx_aws = self.self_attn(xs, xs, memory, pos_embs, xx_mask, u, v) xs = self.dropout(xs) + residual # second half FFN residual = xs xs = self.norm4(xs) xs = self.feed_forward2(xs) xs = self.fc_factor * self.dropout(xs) + residual # Macaron FFN return xs
b2a8c7cfd3bf211f2448c0dc1cf0653aeca34ce4
a37e8dfac12bab3d78567dca9643eb802be24ebe
/stripe/wsgi.py
dd39a606114920521b7cc51b33bf1444e47495e0
[]
no_license
mltuts1998/Django-Stripe
228928cf9b591cda7df3634199278cbd580c43dd
184fde11db61bc3f68b903c010f8883ac609638d
refs/heads/master
2022-09-10T00:08:56.149413
2020-05-31T18:53:53
2020-05-31T18:53:53
268,315,474
0
0
null
null
null
null
UTF-8
Python
false
false
389
py
""" WSGI config for stripe project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'stripe.settings') application = get_wsgi_application()
3ad1936f9886833ee45e5a08262baef20d9c7826
8049dd81d52e0659054b574323887cf06dbb03a9
/api/audit/__init__.py
9a6d49f35270ff09a98c36e94c5bf5628c104561
[ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference" ]
permissive
funetes/flagsmith
b6bfdb79e023a6e8832e898b13635e36486e5aa2
97dba4175fac4e723e7bc4208e004d61e748eed7
refs/heads/main
2023-06-21T08:18:53.531171
2021-07-26T16:47:01
2021-07-26T16:47:01
389,799,101
1
0
NOASSERTION
2021-07-26T23:57:23
2021-07-26T23:57:23
null
UTF-8
Python
false
false
46
py
default_app_config = "audit.apps.AuditConfig"
46a4911acf850993eefe9e0829a86a80d6eff49a
65510e35b6908d9365b69d8cc467ce4950bbfb11
/manage.py
041854b26ef3a1e4c42fffec3cfbe3e8d30c5ca7
[]
no_license
Jolin-blank/vsphere_cobbler
7824d68da903c88ea03624687b601f5b32057609
dc49674e98c19b7a3d7bf87c3319e5571a5ec0c0
refs/heads/master
2022-11-20T02:46:32.176336
2020-07-26T06:25:27
2020-07-26T06:25:27
282,589,937
0
0
null
null
null
null
UTF-8
Python
false
false
647
py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ippool.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
40ac2a92a23c0cf29aaa67a5f202967f7527b8dc
62e58c051128baef9452e7e0eb0b5a83367add26
/x12/4040/494004040.py
8b2f5f83eb14d0b75d277c1ae37a8190e141fa8f
[]
no_license
dougvanhorn/bots-grammars
2eb6c0a6b5231c14a6faf194b932aa614809076c
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
refs/heads/master
2021-05-16T12:55:58.022904
2019-05-17T15:22:23
2019-05-17T15:22:23
105,274,633
0
0
null
2017-09-29T13:21:21
2017-09-29T13:21:21
null
UTF-8
Python
false
false
998
py
from bots.botsconfig import * from records004040 import recorddefs syntax = { 'version' : '00403', #version of ISA to send 'functionalgroup' : 'TP', } structure = [ {ID: 'ST', MIN: 1, MAX: 1, LEVEL: [ {ID: 'REN', MIN: 1, MAX: 1}, {ID: 'DK', MIN: 1, MAX: 1}, {ID: 'PI', MIN: 1, MAX: 8}, {ID: 'PR', MIN: 0, MAX: 200}, {ID: 'SS', MIN: 0, MAX: 1}, {ID: 'SA', MIN: 1, MAX: 1}, {ID: 'CD', MIN: 0, MAX: 150}, {ID: 'GY', MIN: 0, MAX: 150}, {ID: 'RAB', MIN: 0, MAX: 12}, {ID: 'PT', MIN: 0, MAX: 50}, {ID: 'LX', MIN: 0, MAX: 1, LEVEL: [ {ID: 'N4', MIN: 0, MAX: 1}, {ID: 'PI', MIN: 0, MAX: 15}, ]}, {ID: 'R9', MIN: 0, MAX: 10, LEVEL: [ {ID: 'R2B', MIN: 0, MAX: 10, LEVEL: [ {ID: 'R2C', MIN: 0, MAX: 10}, ]}, ]}, {ID: 'SCL', MIN: 0, MAX: 999, LEVEL: [ {ID: 'RD', MIN: 0, MAX: 6}, ]}, {ID: 'SE', MIN: 1, MAX: 1}, ]} ]
19dad8cafa255111037ec4d564a616e8fe94fe5d
163bbb4e0920dedd5941e3edfb2d8706ba75627d
/Code/CodeRecords/2819/60580/293103.py
a5bcdd091887b7679ef45973eb5fbcfc0d5f124a
[]
no_license
AdamZhouSE/pythonHomework
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
ffc5606817a666aa6241cfab27364326f5c066ff
refs/heads/master
2022-11-24T08:05:22.122011
2020-07-28T16:21:24
2020-07-28T16:21:24
259,576,640
2
1
null
null
null
null
UTF-8
Python
false
false
842
py
import math size = int(input()) tempList = input().split() intList = [] for var in tempList: intList.append(int(var)) d = {} for var in intList: if var in d.keys(): d[var] += 1 else: d[var] = 1 l = sorted(d.keys()) realD = {} for i in l: realD[i] = d[i] result = 0 resultD = {} resultD[1] = 0 resultD[2] = 0 resultD[3] = 0 for key, value in realD.items(): if key == 4: result += value if key == 2: result += value // 2 resultD[2] = value % 2 if key == 3 or key == 1: resultD[key] = value result += resultD[3] resultD[1] = resultD[1] - resultD[3] if resultD[1] <= 0: if resultD[2] == 1: result += 1 else: if resultD[2] == 1: resultD[1] -= 2 result += 1 if resultD[1] > 0: result += math.ceil(resultD[1] / 4) print(result)
1d8c1e36a64b2846dae7a9d01729e5c129613ed3
c84d807bd359ae58ed5e115a51fb85be5ac93262
/11_binary_search/9_search_rotated_array_duplicates.py
e650cdc1c68f500010263519962cbcf78ade74c2
[]
no_license
terrifyzhao/educative
bc35d1d10ea280ddc50b1a2708c0e22a7a5cd6d0
7a5c82abeb7853a9a1262e28b2fe58a20f547802
refs/heads/master
2020-11-28T04:25:30.425519
2020-03-05T10:23:09
2020-03-05T10:23:09
229,702,388
0
0
null
null
null
null
UTF-8
Python
false
false
1,708
py
def search_rotated_with_duplicates(arr, key): start, end = 0, len(arr) - 1 while start <= end: mid = start + (end - start) // 2 if arr[mid] == key: return mid # the only difference from the previous solution, # if numbers at indexes start, mid, and end are same, we can't choose a side # the best we can do, is to skip one number from both ends as key != arr[mid] if arr[start] == arr[mid] and arr[end] == arr[mid]: start += 1 end -= 1 elif arr[start] <= arr[mid]: # left side is sorted in ascending order if key >= arr[start] and key < arr[mid]: end = mid - 1 else: # key > arr[mid] start = mid + 1 else: # right side is sorted in ascending order if key > arr[mid] and key <= arr[end]: start = mid + 1 else: end = mid - 1 # we are not able to find the element in the given array return -1 def search_rotated_with_duplicates2(arr, key): start, end = 0, len(arr) - 1 while start <= end: mid = start + (end - start) // 2 if arr[mid] == key: return mid while arr[start] == arr[mid] == arr[end]: start += 1 end -= 1 if arr[start] <= arr[mid]: if arr[start] <= key < arr[mid]: end = mid - 1 else: start = mid else: if arr[mid] < key <= arr[end]: start = mid + 1 else: end = mid return -1 def main(): print(search_rotated_with_duplicates([3, 7, 3, 3, 3], 7)) main()
8a3938da7e8df9e31d0c8249ff2d5bd9dcdbeb84
80c8d4e84f2ea188a375ff920a4adbd9edaed3a1
/third/opencv/gaussian_mix.py
f7aebe86c7fe07415fabff67ac7d4fa8ccebf641
[ "MIT" ]
permissive
Birkid/penter
3a4b67801d366db15ca887c31f545c8cda2b0766
0200f40c9d01a84c758ddcb6a9c84871d6f628c0
refs/heads/master
2023-08-22T14:05:43.106499
2021-10-20T07:10:10
2021-10-20T07:10:10
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,141
py
#!/usr/bin/env python # Python 2/3 compatibility from __future__ import print_function import sys PY3 = sys.version_info[0] == 3 if PY3: xrange = range import numpy as np import cv2 as cv from numpy import random def make_gaussians(cluster_n, img_size): points = [] ref_distrs = [] for _i in range(cluster_n): mean = (0.1 + 0.8*random.rand(2)) * img_size a = (random.rand(2, 2)-0.5)*img_size*0.1 cov = np.dot(a.T, a) + img_size*0.05*np.eye(2) n = 100 + random.randint(900) pts = random.multivariate_normal(mean, cov, n) points.append( pts ) ref_distrs.append( (mean, cov) ) points = np.float32( np.vstack(points) ) return points, ref_distrs def draw_gaussain(img, mean, cov, color): x, y = np.int32(mean) w, u, _vt = cv.SVDecomp(cov) ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi) s1, s2 = np.sqrt(w)*3.0 cv.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv.LINE_AA) def main(): cluster_n = 5 img_size = 512 print('press any key to update distributions, ESC - exit\n') while True: print('sampling distributions...') points, ref_distrs = make_gaussians(cluster_n, img_size) print('EM (opencv) ...') em = cv.ml.EM_create() em.setClustersNumber(cluster_n) em.setCovarianceMatrixType(cv.ml.EM_COV_MAT_GENERIC) em.trainEM(points) means = em.getMeans() covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232 found_distrs = zip(means, covs) print('ready!\n') img = np.zeros((img_size, img_size, 3), np.uint8) for x, y in np.int32(points): cv.circle(img, (x, y), 1, (255, 255, 255), -1) for m, cov in ref_distrs: draw_gaussain(img, m, cov, (0, 255, 0)) for m, cov in found_distrs: draw_gaussain(img, m, cov, (0, 0, 255)) cv.imshow('gaussian mixture', img) ch = cv.waitKey(0) if ch == 27: break print('Done') if __name__ == '__main__': print(__doc__) main() cv.destroyAllWindows()
f7cc3625a1915ecc9b75645903de41058b2871d8
41c0d29efcb3ac0e22237bd3fadc5cdf550698cd
/homeassistant/helpers/check_config.py
4052a94b9de9b74054a64a28de168d2b2c8172b9
[ "Apache-2.0" ]
permissive
EthanW1215/home-assistant
7c19ce668821f3063b3d46f9e9a0ef5a6e102689
a48ac4d18fab253572780671f896b3a417322699
refs/heads/master
2020-09-05T09:02:59.513681
2019-11-05T18:57:08
2019-11-05T18:57:08
220,050,094
2
0
Apache-2.0
2019-11-06T17:13:33
2019-11-06T17:13:32
null
UTF-8
Python
false
false
6,303
py
"""Helper to check the configuration file.""" from collections import OrderedDict, namedtuple from typing import List import attr import voluptuous as vol from homeassistant import loader from homeassistant.core import HomeAssistant from homeassistant.config import ( CONF_CORE, CORE_CONFIG_SCHEMA, CONF_PACKAGES, merge_packages_config, _format_config_error, find_config_file, load_yaml_config_file, extract_domain_configs, config_per_platform, ) from homeassistant.requirements import ( async_get_integration_with_requirements, RequirementsNotFound, ) import homeassistant.util.yaml.loader as yaml_loader from homeassistant.exceptions import HomeAssistantError # mypy: allow-untyped-calls, allow-untyped-defs, no-warn-return-any CheckConfigError = namedtuple("CheckConfigError", "message domain config") @attr.s class HomeAssistantConfig(OrderedDict): """Configuration result with errors attribute.""" errors: List[CheckConfigError] = attr.ib(default=attr.Factory(list)) def add_error(self, message, domain=None, config=None): """Add a single error.""" self.errors.append(CheckConfigError(str(message), domain, config)) return self @property def error_str(self) -> str: """Return errors as a string.""" return "\n".join([err.message for err in self.errors]) async def async_check_ha_config_file(hass: HomeAssistant) -> HomeAssistantConfig: """Load and check if Home Assistant configuration file is valid. This method is a coroutine. """ config_dir = hass.config.config_dir result = HomeAssistantConfig() def _pack_error(package, component, config, message): """Handle errors from packages: _log_pkg_error.""" message = "Package {} setup failed. Component {} {}".format( package, component, message ) domain = f"homeassistant.packages.{package}.{component}" pack_config = core_config[CONF_PACKAGES].get(package, config) result.add_error(message, domain, pack_config) def _comp_error(ex, domain, config): """Handle errors from components: async_log_exception.""" result.add_error(_format_config_error(ex, domain, config), domain, config) # Load configuration.yaml try: config_path = await hass.async_add_executor_job(find_config_file, config_dir) if not config_path: return result.add_error("File configuration.yaml not found.") config = await hass.async_add_executor_job(load_yaml_config_file, config_path) except FileNotFoundError: return result.add_error(f"File not found: {config_path}") except HomeAssistantError as err: return result.add_error(f"Error loading {config_path}: {err}") finally: yaml_loader.clear_secret_cache() # Extract and validate core [homeassistant] config try: core_config = config.pop(CONF_CORE, {}) core_config = CORE_CONFIG_SCHEMA(core_config) result[CONF_CORE] = core_config except vol.Invalid as err: result.add_error(err, CONF_CORE, core_config) core_config = {} # Merge packages await merge_packages_config( hass, config, core_config.get(CONF_PACKAGES, {}), _pack_error ) core_config.pop(CONF_PACKAGES, None) # Filter out repeating config sections components = set(key.split(" ")[0] for key in config.keys()) # Process and validate config for domain in components: try: integration = await async_get_integration_with_requirements(hass, domain) except (RequirementsNotFound, loader.IntegrationNotFound) as ex: result.add_error(f"Component error: {domain} - {ex}") continue try: component = integration.get_component() except ImportError as ex: result.add_error(f"Component error: {domain} - {ex}") continue config_schema = getattr(component, "CONFIG_SCHEMA", None) if config_schema is not None: try: config = config_schema(config) result[domain] = config[domain] except vol.Invalid as ex: _comp_error(ex, domain, config) continue component_platform_schema = getattr( component, "PLATFORM_SCHEMA_BASE", getattr(component, "PLATFORM_SCHEMA", None), ) if component_platform_schema is None: continue platforms = [] for p_name, p_config in config_per_platform(config, domain): # Validate component specific platform schema try: p_validated = component_platform_schema(p_config) except vol.Invalid as ex: _comp_error(ex, domain, config) continue # Not all platform components follow same pattern for platforms # So if p_name is None we are not going to validate platform # (the automation component is one of them) if p_name is None: platforms.append(p_validated) continue try: p_integration = await async_get_integration_with_requirements( hass, p_name ) platform = p_integration.get_platform(domain) except ( loader.IntegrationNotFound, RequirementsNotFound, ImportError, ) as ex: result.add_error(f"Platform error {domain}.{p_name} - {ex}") continue # Validate platform specific schema platform_schema = getattr(platform, "PLATFORM_SCHEMA", None) if platform_schema is not None: try: p_validated = platform_schema(p_validated) except vol.Invalid as ex: _comp_error(ex, f"{domain}.{p_name}", p_validated) continue platforms.append(p_validated) # Remove config for current component and add validated config back in. for filter_comp in extract_domain_configs(config, domain): del config[filter_comp] result[domain] = platforms return result
43b38e5883d24cf3c27a2a95b78dae01d48f23f0
e0565926a5e94d1ea51e88876d509cff4cf04011
/www/webframe.py
44eb43d907ddbdaee6fa078f75f1988148f47bc4
[]
no_license
MaxWhut2017/awesome-python3-webapp
1bd69d174a415bc1909ef64aaf42ee3c299147d4
a68b84973f1852c19cb85161f0d465e66893a199
refs/heads/master
2020-05-16T10:22:05.342081
2017-10-09T05:42:11
2017-10-09T05:42:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
16,633
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ 基于aiohttp的web框架,进一步简化Web开发 aiohttp相对比较底层,想要使用框架时编写更少的代码,就只能在aiohttp框架上封装一个更高级的框架 Web框架的设计是完全从使用者出发,目的是让框架使用者编写尽可能少的代码 """ __author__ = 'Hk4Fun' import asyncio # 用来修改文件模块路径 import os # 用来获取函数的参数信息 import inspect import logging # 用来还原被装饰函数的属性,如__name__ import functools # 用来解析url的查询参数 from urllib import parse from aiohttp import web # 引用自己的模块,检测api调用错误,这里可以先忽略它 from apis import APIError # 这是个装饰器,在handlers模块中被引用,其作用是给http请求添加请求方法和请求路径这两个属性 # 这是个三层嵌套的decorator(装饰器),目的是可以在decorator本身传入参数 # 这个装饰器将一个函数映射为一个URL处理函数 def get(path): def decorator(func): # 传入参数是函数 # python内置的functools.wraps装饰器作用是把装饰后的函数的__name__属性变为原始的属性,即func的属性 # 因为当不使用该装饰器时函数的__name__为wrapper,而不是func @functools.wraps(func) def wrapper(*args, **kw): return func(*args, **kw) wrapper.__method__ = 'GET' # 给原始函数添加请求方法 “GET” wrapper.__route__ = path # 给原始函数添加请求路径 path return wrapper return decorator # 这样,一个函数通过@get(path)的装饰就附带了URL信息 # 同get(path) def post(path): def decorator(func): @functools.wraps(func) def wrapper(*args, **kw): return func(*args, **kw) wrapper.__method__ = 'POST' wrapper.__route__ = path return wrapper return decorator # 关于inspect.Parameter 的 kind 类型有5种: # POSITIONAL_ONLY 只是位置参数 # POSITIONAL_OR_KEYWORD 可以是位置参数也可以是关键字参数 # VAR_POSITIONAL 相当于 *args # KEYWORD_ONLY 相当于 *,key # VAR_KEYWORD 相当于 **kw # 具体可参考:http://blog.csdn.net/weixin_35955795/article/details/53053762 # 函数的参数fn本身就是个函数,下面五个函数是针对fn函数的参数做一些处理判断 # 这个函数将得到fn函数中的没有默认值的KEYWORD_ONLY的元组 def get_required_kw_args(fn): args = [] # 定义一个空的list,用来储存符合条件的fn的参数名 params = inspect.signature(fn).parameters # 返回一个关于函数参数的键值字典(映射mapping) for name, param in params.items(): # 参数类型为KEYWORD_ONLY且没有指定默认值,inspect.Parameter.empty表示参数的默认值为空 if param.kind == inspect.Parameter.KEYWORD_ONLY and param.default == inspect.Parameter.empty: args.append(name) # 只是将参数名添加进去 return tuple(args) #和上一个函数基本一样,唯一的区别就是不需要满足没有默认值这个条件,也就是说这个函数把fn的所有的KEYWORD_ONLY参数名都提取出来 def get_named_kw_args(fn): args = [] params = inspect.signature(fn).parameters for name, param in params.items(): if param.kind == inspect.Parameter.KEYWORD_ONLY: args.append(name) return tuple(args) #判断fn有没有KEYWORD_ONLY def has_named_kw_args(fn): params = inspect.signature(fn).parameters for name, param in params.items(): if param.kind == inspect.Parameter.KEYWORD_ONLY: return True #判断fn有没有**kw(变长关键字参数) def has_var_kw_arg(fn): params = inspect.signature(fn).parameters for name, param in params.items(): if param.kind == inspect.Parameter.VAR_KEYWORD: return True # 判断是否存在一个参数叫做request,并且该参数要在其他普通的位置参数之后, # 即fn(POSITIONAL_ONLY, request, VAR_POSITIONAL, KEYWORD_ONLY, VAR_KEYWORD) # 当然,这里request可以为VAR_POSITIONAL, KEYWORD_ONLY, VAR_KEYWORD中的一种 def has_request_arg(fn): sig = inspect.signature(fn) # 这边之所以拆成两行,是因为后面raise语句要用到sig params = sig.parameters found = False # 默认没有找到 for name, param in params.items(): if name == 'request': found = True continue # 为什么不是break?因为还得接着往下检查其他参数,确保request为最后一个位置参数 # 或者是VAR_POSITIONAL, KEYWORD_ONLY, VAR_KEYWORD中的一种 if found and (param.kind != inspect.Parameter.VAR_POSITIONAL and param.kind != inspect.Parameter.KEYWORD_ONLY and param.kind != inspect.Parameter.VAR_KEYWORD): raise ValueError('request parameter must be the last named parameter in function: %s%s' % (fn.__name__, str(sig))) return found # RequestHandler目的就是从URL函数中分析其需要接收的参数 # 进而从request中获取必要的参数构造成字典以**kw传给该URL函数并调用 class RequestHandler(object): # 初始化自身的属性,从fn中获取必要的参数信息 def __init__(self, app, fn): self._app = app self._func = fn self._has_request_arg = has_request_arg(fn) self._has_var_kw_arg = has_var_kw_arg(fn) self._has_named_kw_args = has_named_kw_args(fn) self._named_kw_args = get_named_kw_args(fn) self._required_kw_args = get_required_kw_args(fn) # 定义了__call__方法后这个类的实例就相当于一个函数可以直接调用了 # 为什么要这么做呢?因为后面app.router.add_route()中需要传入一个回调函数 # 而这个回调函数我们本来可以直接把handlers里的函数传进来 # 但为了方便开发(构造框架),我们对该函数进行了一系列的封装处理 # 这样也使得框架使用者尽管往handlers里添加实现业务逻辑的函数(handler)就行了,不必修改其他的模块,实现了透明化 # __call__方法的代码逻辑: # 1.定义kw对象,用于保存参数 # 2.判断request对象是否存在符合条件的参数,如果存在则根据是POST还是GET方法将参数内容保存到kw # 3.如果kw为空(说明request没有传递参数),则将match_info列表里面的资源映射表(在装饰器参数里的url路径有表示)赋值给kw; # 如果不为空则把命名关键字参数的内容给kw # 4.完善_has_request_arg和_required_kw_args属性 # app.router.add_route()调用回调函数时会往该函数传递request参数 async def __call__(self, request): kw = None # 如果fn有(**kw)或者(KEYWORD_ONLY) # 这说明fn需要传参,这些参数的值来自于request提交的数据 # 这里不考虑POSITIONAL_OR_KEYWORD和VAR_POSITIONAL, # 因为用不到VAR_POSITIONAL,而且要求handlers中的url函数参数除了match_info和request其他的参数必须为KEYWORD_ONLY if self._has_var_kw_arg or self._has_named_kw_args: # POST/GET方法下解析request提交的数据类型并提取 # method为post的处理 if request.method == 'POST': # POST提交请求的类型通过content_type获取,可参考:http://www.cnblogs.com/aaronjs/p/4165049.html if not request.content_type:# 判断是否存在Content-Type,不存在则无法根据数据类型获取解析提交的数据 return web.HTTPBadRequest('Missing Content-Type!') ct = request.content_type.lower() #统一小写,方便检测 if ct.startswith('application/json'): # 这里用的是startswith而不是直接比较,因为后面可能还会有charset=utf-8,但我们并不关心 params = await request.json() # 如果是json数据格式就用json()来读取json信息 if not isinstance(params, dict):# 序列化后应该为dict,否则说明提交的json数据格式本身是有错误的 return web.HTTPBadRequest('JSON body must be object!') kw = params # 正确的话把request的参数信息给kw(已经序列化成一个字典了) elif ct.startswith('application/x-www-form-urlencoded') or ct.startswith('multipart/form-data'): # 传统的浏览器提交表单格式 params = await request.post() # 浏览器表单信息用post方法来读取 kw = dict(**params) # 将表单信息转换成字典给kw else:#提交的数据类型既不是json对象,又不是浏览器表单,那就只能返回不支持该消息主体类型,其实就是不支持xml return web.HTTPBadRequest('Unsupported Content-Type: %s' % request.content_type) # method为get的处理 if request.method == 'GET': # get方法比较简单,直接在url后面加上查询参数来请求服务器上的资源 # request.query_string表示url中的查询字符串 # 比如我百度ReedSun,得到网址为https://www.baidu.com/s?ie=UTF-8&wd=ReedSun # 其中‘ie=UTF-8&wd=ReedSun’就是查询字符串 qs = request.query_string if qs: # 如果存在查询字符串 kw = dict() # parse.parse_qs(qs, keep_blank_values=False, strict_parsing=False)函数的作用是解析一个给定的字符串 # keep_blank_values默认为False,指示是否忽略空白值,True不忽略,False忽略 # strict_parsing如果是True,遇到错误是会抛出ValueError错误,如果是False会忽略错误 # 这个函数将返回一个字典,其中key是等号之前的字符串,value是等号之后的字符串但会是列表 # 比如上面的例子就会返回{'ie': ['UTF-8'], 'wd': ['ReedSun']} for k, v in parse.parse_qs(qs, True).items(): kw[k] = v[0] # 经过以上处理参数仍为空说明没有从Request中获取到数据或者fn没有符合的参数类型 # 则将match_info列表里面的资源映射表(在装饰器参数里的url路径有表示)赋值给kw if kw is None: # Resource may have variable path also. For instance, a resource # with the path '/a/{name}/c' would match all incoming requests # with paths such as '/a/b/c', '/a/1/c', and '/a/etc/c'. # A variable part is specified in the form {identifier}, where the # identifier can be used later in a request handler to access the # matched value for that part. This is done by looking up the # identifier in the Request.match_info mapping: kw = dict(**request.match_info) # kw不为空时,则进一步处理kw else: # 当fn没有**kw且有KEYWORD_ONLY时,kw中只留下KEYWORD_ONLY的参数,其他的都删除,否则传参过多 if (not self._has_var_kw_arg) and self._has_named_kw_args: copy = dict() for name in self._named_kw_args:# 遍历fn中每一个KEYWORD_ONLY参数 if name in kw:#如果该参数在kw中也有则复制到copy中 copy[name] = kw[name] kw = copy#将筛选出来的KEYWORD_ONLY参数覆盖掉原来的kw,这样kw中只留下KEYWORD_ONLY参数 # 再将match_info中的数据放入kw,同时检查是否与kw中的数据命名重复,这里优先选择match_info for k, v in request.match_info.items(): if k in kw: logging.warning('Duplicate arg name in kw args (choose match_info\'s): %s' % k) kw[k] = v # 别漏了request,如果有request这个参数,则把request加入 # 注意,这里说明fn不需要request时我们是可以不传的 # 而如果没有这个框架则url函数必须要有request参数 # 因为app.router.add_route()会强行传递request给它,再次看出框架的屏蔽性与透明化 if self._has_request_arg: kw['request'] = request # 没有默认值的KEYWORD_ONLY参数必须要有值传给它,否则会报错 if self._required_kw_args: for name in self._required_kw_args: if name not in kw: return web.HTTPBadRequest('Missing argument: %s' % name) logging.info('call with args: %s' % str(kw)) # 打印出最终传递给fn的参数 try: return (await self._func(**kw)) except APIError as e: # 捕捉遇到的api错误并返回给用户 return dict(error=e.error, data=e.data, message=e.message) # 向app中添加静态文件路径 def add_static(app): # os.path.abspath(__file__), 返回当前脚本的绝对路径(包括文件名) # os.path.dirname(), 去掉文件名,返回目录路径 # os.path.join(), 将分离的各部分组合成一个路径名 # 因此以下操作就是将本文件同目录下的static目录加入到app的路由管理器中 path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static') app.router.add_static('/static/',path) logging.info('add static %s => %s' % ('/static/', path)) # 注册URL处理函数 def add_route(app, fn): # 获取'__method__'和'__route__'属性,如果为空则抛出异常 method = getattr(fn, '__method__', None) path = getattr(fn, '__route__', None) if path is None or method is None: raise ValueError('@get or @post not defined in %s.' % str(fn)) # 判断fn是不是协程(即@asyncio.coroutine修饰的)并且判断是不是一个生成器(generator function) if not asyncio.iscoroutine(fn) and not inspect.isgeneratorfunction(fn): fn = asyncio.coroutine(fn)# 都不是的话,转换为协程 logging.info('add route : method = %s, path = %s, fn = %s (%s)' % ( method, path, fn.__name__, ', '.join(inspect.signature(fn).parameters.keys()))) # 注册为相应的url处理方法(回调函数),回调函数为RequestHandler的自省函数 '__call__' app.router.add_route(method, path, RequestHandler(app, fn)) def add_routes(app, module_name): # 自动搜索传入的module_name的module的url处理函数 # 检查传入的module_name是否有'.' # Python rfind() 返回字符串最后一个'.'出现的索引位置(从右边开始寻找),如果没有匹配项则返回-1 n = module_name.rfind('.') # 没有'.',说明模块在当前目录下,直接导入 if n == (-1): # __import__的作用类似import,import是为当前模块导入另一个模块,而__import__则是返回一个对象 # __import__(name, globals=None, locals=None, fromlist=(), level=0) # name -- 模块名 # globals, locals -- determine how to interpret the name in package context # fromlist -- name表示的模块的子模块或对象名列表 # level -- 绝对导入还是相对导入,默认值为0, 即使用绝对导入,正数值表示相对导入时,导入目录的父目录的层数 mod = __import__(module_name, globals(), locals()) logging.info('globals = %s', globals()['__name__']) else: name = module_name[n+1:] # 取得子模块名 # 以下语句表示, 先用__import__表达式导入模块以及子模块 # 再通过getattr()方法取得子模块, 如handlers.handler mod = getattr(__import__(module_name[:n], globals(), locals(), [name]), name) for attr in dir(mod):# 遍历mod的方法和属性 if attr.startswith('_'):# 如果是以'_'开头的,一律pass,我们定义的处理方法不是以'_'开头的 continue fn = getattr(mod, attr)# 获取到非'_'开头的属性或方法 if callable(fn):# 能调用的说明是方法 # 检测'__method__'和'__route__'属性 method = getattr(fn, '__method__', None) path = getattr(fn, '__route__', None) if method and path:# 如果都有,说明是我们定义的url处理方法,注册到app的route中 add_route(app, fn)
7ee2477300774d84db1d5704b72267bb02468b6a
ca44cdd205d27fc5cfabaaa349e93afddd7c902b
/dyskretka/try.py
fd2fb9dcd0fb84b7f9e1f87a82a8fe90afe00cbc
[]
no_license
SOFIAshyn/BaseProgramming_course_Basic_Python
8402b7c2eff570e7102ba1f9b0b6636a6f0b881a
cf4d0d204a836367ee51e329828a53072aef20e9
refs/heads/master
2021-10-21T08:02:35.611635
2019-03-03T15:46:59
2019-03-03T15:46:59
173,553,760
0
0
null
null
null
null
UTF-8
Python
false
false
97
py
n = input("here1: ") lst = [] while n != "": n = input("here: ") lst.append(n) print(lst)
d7272c54660fa14d98ac4d2516403bfb2e29ff54
1284718203be50b23dcd1f6159746cfa42a04163
/python_visual_mpc/visual_mpc_core/agent/create_configs_agent.py
a2b76acca59fe3533af98db31afeec8a2b65ee17
[]
no_license
febert/robustness_via_retrying
8fe4106d7705228ff339f9643518a80c0a243d36
1def282dc22f24b72c51ff1ef9ea1a7a83291369
refs/heads/master
2020-03-31T19:33:39.664525
2018-11-07T21:52:56
2018-11-07T21:52:56
152,502,702
17
2
null
null
null
null
UTF-8
Python
false
false
1,042
py
""" This agent is responsible for creating experiment configurations for benchmarks """ from .general_agent import GeneralAgent class CreateConfigAgent(GeneralAgent): def __init__(self, hyperparams): super().__init__(hyperparams) def rollout(self, policy, i_trial, i_traj): # Take the sample. self._init() agent_data, policy_outputs = {}, [] agent_data['traj_ok'] = True initial_env_obs, reset_state = self.env.reset() agent_data['reset_state'] = reset_state obs = self._post_process_obs(initial_env_obs, agent_data, initial_obs=True) for t in range(self._hyperparams['T']): self.env.move_arm() # should look into creating one "generate task" function for long term.... self.env.move_objects() try: obs = self._post_process_obs(self.env.current_obs(), agent_data) except ValueError: return {'traj_ok': False}, None, None return agent_data, obs, policy_outputs
4c751b204ddfbb53faf33d7dc3ac55f1264ffed0
66276325d623c894c9e6344bb161f3c25974a838
/LeetCode/1000.Minimum-Cost-To-Merge-Stones/Minimum-Cost-To-Merge-Stones.py
28e75801dcc20fa763cdee38273e25d1f9a94f16
[]
no_license
htingwang/HandsOnAlgoDS
034b5199b394ca82fd4fb16614ddabb45f3325e2
5b14b6f42baf59b04cbcc8e115df4272029b64c8
refs/heads/master
2021-07-11T15:50:30.944794
2020-09-27T05:08:02
2020-09-27T05:08:02
192,391,446
12
2
null
2019-07-03T04:09:35
2019-06-17T17:36:01
Jupyter Notebook
UTF-8
Python
false
false
874
py
import heapq class Solution(object): def mergeStones(self, stones, K): """ :type stones: List[int] :type K: int :rtype: int """ n = len(stones) if (n - 1) % (K - 1): return -1 pre_sum = [0] * (n + 1) for i in range(1, n + 1): pre_sum[i] = pre_sum[i - 1] + stones[i - 1] dp = [[0] * n for _ in range(n)] for m in range(K, n + 1): for i in range(n - m + 1): j = i + m - 1 dp[i][j] = float('inf') for mid in range(i, j, K - 1): dp[i][j] = min(dp[i][j], dp[i][mid] + dp[mid + 1][j]) if (j - i) % (K - 1) == 0: dp[i][j] += (pre_sum[j + 1] - pre_sum[i]) return dp[0][n - 1]
48131bd43563d096580819b13c5c6e9caf79dc10
a2ac73af04a07bb070cd85c88778608b561dd3e4
/addons/hr_recruitment/__openerp__.py
aea1ab4cf0fdbb0eb6b423829fb9ec35600c7615
[]
no_license
sannareddy/openerp-heimai
c849586d6099cc7548dec8b3f1cc7ba8be49594a
58255ecbcea7bf9780948287cf4551ed6494832a
refs/heads/master
2021-01-15T21:34:46.162550
2014-05-13T09:20:37
2014-05-13T09:20:37
null
0
0
null
null
null
null
UTF-8
Python
false
false
64
py
/usr/share/pyshared/openerp/addons/hr_recruitment/__openerp__.py
6f0e65141279b2df4349a3b475b32f0b180420c6
41f960a830752877bf2248bb2c620491752ccfe5
/python_grammar/time.py
3b5ee777c9a9abb2e41eda0e63a623296b6c51ca
[]
no_license
drakhero/python_base
368617032d2d3d5388f22de2cb2ca0af81de00ec
90848ef630ab607a1b5563f773e1b4ca7eaef08f
refs/heads/master
2020-04-24T09:51:09.596139
2019-02-23T03:52:35
2019-02-23T03:52:35
171,875,042
0
0
null
null
null
null
UTF-8
Python
false
false
122
py
import time old = time.mktime((2019, 1, 15, 0, 0, 0, 0, 0, 0)) now = time.time() days = (now - old)/3600/24 print(days)
0665059974ce35e023c58e61975a130e51d29396
6d24fb1c67771e7285dea61840f9766013589dd1
/manage.py
d7031ecdad34e4d427ed940b884e8d8c8477c930
[]
no_license
PandaBalu/gglobal
b045fb66f7daea8eeb8d6c62f5dc872ff0b1b246
c063c44c30d023bf562c0b4b39d10161540e7a92
refs/heads/master
2020-12-10T03:14:00.710720
2017-06-26T12:22:05
2017-06-26T12:22:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,028
py
#!/usr/bin/env python import os import sys if __name__ == '__main__': os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') try: from django.core.management import execute_from_command_line except ImportError: # The above import may fail for some other reason. Ensure that the # issue is really that Django is missing to avoid masking other # exceptions on Python 2. try: import django # noqa except ImportError: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) raise # This allows easy placement of apps within the interior # gglobal directory. current_path = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.join(current_path, 'gglobal')) execute_from_command_line(sys.argv)
3738ae3586b803368bac15fdb7da8a6778e9353d
473645b727129e33ab12b42ecece255db73dfcfc
/PatObjectOwnRefProducer/patobjectownrefproducer_cfg.py
bcd95a65ab955d338995758f308c9b88bc41d278
[]
no_license
jpata/AnalysisModules
b3c17ff60ec31b76798ff8a473397b5728e96ca7
02d9d3e28f937c683616c7be4efeddf8874f571c
refs/heads/master
2021-01-21T07:39:34.632912
2014-07-02T07:47:48
2014-07-02T07:47:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,644
py
import FWCore.ParameterSet.Config as cms import os inFile = os.environ["TESTING_FILE"] process = cms.Process("OWNPARTICLES") process.load("FWCore.MessageLogger.MessageLogger_cfi") process.MessageLogger=cms.Service("MessageLogger", destinations=cms.untracked.vstring('cout'), debugModules=cms.untracked.vstring('patJetsPuCleaned'), cout=cms.untracked.PSet(threshold=cms.untracked.string('DEBUG')) ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) process.source = cms.Source("PoolSource", # replace 'myfile.root' with the source file you want to use fileNames = cms.untracked.vstring( inFile ) ) process.patJetsWithOwnRef = cms.EDProducer('PatObjectOwnRefProducer<pat::Jet>', src=cms.InputTag("selectedPatJets") ) process.patJetsPuCleaned = cms.EDProducer('CleanNoPUJetProducer', # jetSrc = cms.InputTag("patJetsWithOwnRef"), jetSrc = cms.InputTag("selectedPatJets"), PUidMVA = cms.InputTag("puJetMva", "fullDiscriminant", "PAT"), PUidFlag = cms.InputTag("puJetMva", "fullId", "PAT"), PUidVars = cms.InputTag("puJetId", "", "PAT"), isOriginal = cms.bool(True) ) process.simpleAnalyzer = cms.EDAnalyzer( 'SimpleEventAnalyzer', interestingCollections = cms.untracked.VInputTag([ "selectedPatJets", "patJetsWithOwnRef", ]), maxObjects=cms.untracked.uint32(1) ) process.out = cms.OutputModule("PoolOutputModule", fileName = cms.untracked.string('test.root') ) process.p = cms.Path( process.patJetsWithOwnRef * process.simpleAnalyzer * process.patJetsPuCleaned ) process.e = cms.EndPath(process.out)
dfe1d9dd9dfdd2d164927e535fef2fab31bc1835
5e8342e4f6e48688f4a0079310e8f0b5e5386044
/POO/Factura/factura.py
9f45e3faba5e32f086d57287d8428a3e6b3f8bb6
[]
no_license
fernado1981/python_
27a154406b5fba7e18da418bc5f75c58f3ccc24f
7d846cd332405464fa14707ea3f2286a918fc9de
refs/heads/master
2023-02-15T19:30:02.257345
2021-01-21T10:35:46
2021-01-21T10:35:46
277,186,729
0
0
null
null
null
null
UTF-8
Python
false
false
978
py
from InteractuarFactura import InteractuarFactura # Escribir un programa que gestione las facturas pendientes de cobro de una empresa. # Las facturas se almacenarán en un diccionario donde la clave de cada factura será el número de factura # y el valor el coste de la factura. El programa debe preguntar al usuario si quiere añadir una nueva factura, # pagar una existente o terminar. Si desea añadir una nueva factura se preguntará por el número de factura # y su coste y se añadirá al diccionario. Si se desea pagar una factura se preguntará por el número de factura # y se eliminará del diccionario. Después de cada operación el programa debe mostrar por pantalla la cantidad # cobrada hasta el momento y la cantidad pendiente de cobro. class factura: clave = int(input("Numero de Factura: ")) valor = float(input("Coste de la factura: ")) fact = InteractuarFactura(clave, valor) fact.anadirOrden() fact.pagarOrden() fact.terminar()
6c2097075bea78e867fd80fe82401f7083691e3f
e8defe94b483fb29fe42563876cf36587cb351b0
/lib/sqlalchemy/testing/plugin/pytestplugin.py
f2e7d706f6c37756b3b14d88192dab3c07585418
[ "MIT" ]
permissive
graingert/sqlalchemy
e5a453065614996f351d9a5f137b26bf2b3ac7f6
b289fb032ead703eb131ad1b0d0e0dcba8617ca3
refs/heads/master
2022-02-20T13:57:37.483599
2020-04-13T22:39:47
2020-04-13T22:39:47
255,667,496
0
0
MIT
2021-08-02T18:45:04
2020-04-14T16:46:31
null
UTF-8
Python
false
false
16,122
py
try: # installed by bootstrap.py import sqla_plugin_base as plugin_base except ImportError: # assume we're a package, use traditional import from . import plugin_base import argparse import collections from functools import update_wrapper import inspect import itertools import operator import os import re import sys import pytest try: import typing except ImportError: pass else: if typing.TYPE_CHECKING: from typing import Sequence try: import xdist # noqa has_xdist = True except ImportError: has_xdist = False def pytest_addoption(parser): group = parser.getgroup("sqlalchemy") def make_option(name, **kw): callback_ = kw.pop("callback", None) if callback_: class CallableAction(argparse.Action): def __call__( self, parser, namespace, values, option_string=None ): callback_(option_string, values, parser) kw["action"] = CallableAction zeroarg_callback = kw.pop("zeroarg_callback", None) if zeroarg_callback: class CallableAction(argparse.Action): def __init__( self, option_strings, dest, default=False, required=False, help=None, # noqa ): super(CallableAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, const=True, default=default, required=required, help=help, ) def __call__( self, parser, namespace, values, option_string=None ): zeroarg_callback(option_string, values, parser) kw["action"] = CallableAction group.addoption(name, **kw) plugin_base.setup_options(make_option) plugin_base.read_config() def pytest_configure(config): if hasattr(config, "slaveinput"): plugin_base.restore_important_follower_config(config.slaveinput) plugin_base.configure_follower(config.slaveinput["follower_ident"]) else: if config.option.write_idents and os.path.exists( config.option.write_idents ): os.remove(config.option.write_idents) plugin_base.pre_begin(config.option) plugin_base.set_coverage_flag( bool(getattr(config.option, "cov_source", False)) ) plugin_base.set_fixture_functions(PytestFixtureFunctions) def pytest_sessionstart(session): plugin_base.post_begin() def pytest_sessionfinish(session): plugin_base.final_process_cleanup() if has_xdist: import uuid def pytest_configure_node(node): # the master for each node fills slaveinput dictionary # which pytest-xdist will transfer to the subprocess plugin_base.memoize_important_follower_config(node.slaveinput) node.slaveinput["follower_ident"] = "test_%s" % uuid.uuid4().hex[0:12] from sqlalchemy.testing import provision provision.create_follower_db(node.slaveinput["follower_ident"]) def pytest_testnodedown(node, error): from sqlalchemy.testing import provision provision.drop_follower_db(node.slaveinput["follower_ident"]) def pytest_collection_modifyitems(session, config, items): # look for all those classes that specify __backend__ and # expand them out into per-database test cases. # this is much easier to do within pytest_pycollect_makeitem, however # pytest is iterating through cls.__dict__ as makeitem is # called which causes a "dictionary changed size" error on py3k. # I'd submit a pullreq for them to turn it into a list first, but # it's to suit the rather odd use case here which is that we are adding # new classes to a module on the fly. rebuilt_items = collections.defaultdict( lambda: collections.defaultdict(list) ) items[:] = [ item for item in items if isinstance(item.parent, pytest.Instance) and not item.parent.parent.name.startswith("_") ] test_classes = set(item.parent for item in items) for test_class in test_classes: for sub_cls in plugin_base.generate_sub_tests( test_class.cls, test_class.parent.module ): if sub_cls is not test_class.cls: per_cls_dict = rebuilt_items[test_class.cls] # in pytest 5.4.0 # for inst in pytest.Class.from_parent( # test_class.parent.parent, name=sub_cls.__name__ # ).collect(): for inst in pytest.Class( sub_cls.__name__, parent=test_class.parent.parent ).collect(): for t in inst.collect(): per_cls_dict[t.name].append(t) newitems = [] for item in items: if item.parent.cls in rebuilt_items: newitems.extend(rebuilt_items[item.parent.cls][item.name]) else: newitems.append(item) # seems like the functions attached to a test class aren't sorted already? # is that true and why's that? (when using unittest, they're sorted) items[:] = sorted( newitems, key=lambda item: ( item.parent.parent.parent.name, item.parent.parent.name, item.name, ), ) def pytest_pycollect_makeitem(collector, name, obj): if inspect.isclass(obj) and plugin_base.want_class(name, obj): # in pytest 5.4.0 # return [ # pytest.Class.from_parent(collector, # name=parametrize_cls.__name__) # for parametrize_cls in _parametrize_cls(collector.module, obj) # ] return [ pytest.Class(parametrize_cls.__name__, parent=collector) for parametrize_cls in _parametrize_cls(collector.module, obj) ] elif ( inspect.isfunction(obj) and isinstance(collector, pytest.Instance) and plugin_base.want_method(collector.cls, obj) ): # None means, fall back to default logic, which includes # method-level parametrize return None else: # empty list means skip this item return [] _current_class = None def _parametrize_cls(module, cls): """implement a class-based version of pytest parametrize.""" if "_sa_parametrize" not in cls.__dict__: return [cls] _sa_parametrize = cls._sa_parametrize classes = [] for full_param_set in itertools.product( *[params for argname, params in _sa_parametrize] ): cls_variables = {} for argname, param in zip( [_sa_param[0] for _sa_param in _sa_parametrize], full_param_set ): if not argname: raise TypeError("need argnames for class-based combinations") argname_split = re.split(r",\s*", argname) for arg, val in zip(argname_split, param.values): cls_variables[arg] = val parametrized_name = "_".join( # token is a string, but in py2k py.test is giving us a unicode, # so call str() on it. str(re.sub(r"\W", "", token)) for param in full_param_set for token in param.id.split("-") ) name = "%s_%s" % (cls.__name__, parametrized_name) newcls = type.__new__(type, name, (cls,), cls_variables) setattr(module, name, newcls) classes.append(newcls) return classes def pytest_runtest_setup(item): # here we seem to get called only based on what we collected # in pytest_collection_modifyitems. So to do class-based stuff # we have to tear that out. global _current_class if not isinstance(item, pytest.Function): return # ... so we're doing a little dance here to figure it out... if _current_class is None: class_setup(item.parent.parent) _current_class = item.parent.parent # this is needed for the class-level, to ensure that the # teardown runs after the class is completed with its own # class-level teardown... def finalize(): global _current_class class_teardown(item.parent.parent) _current_class = None item.parent.parent.addfinalizer(finalize) test_setup(item) def pytest_runtest_teardown(item): # ...but this works better as the hook here rather than # using a finalizer, as the finalizer seems to get in the way # of the test reporting failures correctly (you get a bunch of # py.test assertion stuff instead) test_teardown(item) def test_setup(item): plugin_base.before_test( item, item.parent.module.__name__, item.parent.cls, item.name ) def test_teardown(item): plugin_base.after_test(item) def class_setup(item): plugin_base.start_test_class(item.cls) def class_teardown(item): plugin_base.stop_test_class(item.cls) def getargspec(fn): if sys.version_info.major == 3: return inspect.getfullargspec(fn) else: return inspect.getargspec(fn) def _pytest_fn_decorator(target): """Port of langhelpers.decorator with pytest-specific tricks.""" from sqlalchemy.util.langhelpers import format_argspec_plus from sqlalchemy.util.compat import inspect_getfullargspec def _exec_code_in_env(code, env, fn_name): exec(code, env) return env[fn_name] def decorate(fn, add_positional_parameters=()): spec = inspect_getfullargspec(fn) if add_positional_parameters: spec.args.extend(add_positional_parameters) metadata = dict(target="target", fn="fn", name=fn.__name__) metadata.update(format_argspec_plus(spec, grouped=False)) code = ( """\ def %(name)s(%(args)s): return %(target)s(%(fn)s, %(apply_kw)s) """ % metadata ) decorated = _exec_code_in_env( code, {"target": target, "fn": fn}, fn.__name__ ) if not add_positional_parameters: decorated.__defaults__ = getattr(fn, "im_func", fn).__defaults__ decorated.__wrapped__ = fn return update_wrapper(decorated, fn) else: # this is the pytest hacky part. don't do a full update wrapper # because pytest is really being sneaky about finding the args # for the wrapped function decorated.__module__ = fn.__module__ decorated.__name__ = fn.__name__ return decorated return decorate class PytestFixtureFunctions(plugin_base.FixtureFunctions): def skip_test_exception(self, *arg, **kw): return pytest.skip.Exception(*arg, **kw) _combination_id_fns = { "i": lambda obj: obj, "r": repr, "s": str, "n": operator.attrgetter("__name__"), } def combinations(self, *arg_sets, **kw): """facade for pytest.mark.paramtrize. Automatically derives argument names from the callable which in our case is always a method on a class with positional arguments. ids for parameter sets are derived using an optional template. """ from sqlalchemy.testing import exclusions if sys.version_info.major == 3: if len(arg_sets) == 1 and hasattr(arg_sets[0], "__next__"): arg_sets = list(arg_sets[0]) else: if len(arg_sets) == 1 and hasattr(arg_sets[0], "next"): arg_sets = list(arg_sets[0]) argnames = kw.pop("argnames", None) def _filter_exclusions(args): result = [] gathered_exclusions = [] for a in args: if isinstance(a, exclusions.compound): gathered_exclusions.append(a) else: result.append(a) return result, gathered_exclusions id_ = kw.pop("id_", None) tobuild_pytest_params = [] has_exclusions = False if id_: _combination_id_fns = self._combination_id_fns # because itemgetter is not consistent for one argument vs. # multiple, make it multiple in all cases and use a slice # to omit the first argument _arg_getter = operator.itemgetter( 0, *[ idx for idx, char in enumerate(id_) if char in ("n", "r", "s", "a") ] ) fns = [ (operator.itemgetter(idx), _combination_id_fns[char]) for idx, char in enumerate(id_) if char in _combination_id_fns ] for arg in arg_sets: if not isinstance(arg, tuple): arg = (arg,) fn_params, param_exclusions = _filter_exclusions(arg) parameters = _arg_getter(fn_params)[1:] if param_exclusions: has_exclusions = True tobuild_pytest_params.append( ( parameters, param_exclusions, "-".join( comb_fn(getter(arg)) for getter, comb_fn in fns ), ) ) else: for arg in arg_sets: if not isinstance(arg, tuple): arg = (arg,) fn_params, param_exclusions = _filter_exclusions(arg) if param_exclusions: has_exclusions = True tobuild_pytest_params.append( (fn_params, param_exclusions, None) ) pytest_params = [] for parameters, param_exclusions, id_ in tobuild_pytest_params: if has_exclusions: parameters += (param_exclusions,) param = pytest.param(*parameters, id=id_) pytest_params.append(param) def decorate(fn): if inspect.isclass(fn): if has_exclusions: raise NotImplementedError( "exclusions not supported for class level combinations" ) if "_sa_parametrize" not in fn.__dict__: fn._sa_parametrize = [] fn._sa_parametrize.append((argnames, pytest_params)) return fn else: if argnames is None: _argnames = getargspec(fn).args[1:] # type: Sequence(str) else: _argnames = re.split( r", *", argnames ) # type: Sequence(str) if has_exclusions: _argnames += ["_exclusions"] @_pytest_fn_decorator def check_exclusions(fn, *args, **kw): _exclusions = args[-1] if _exclusions: exlu = exclusions.compound().add(*_exclusions) fn = exlu(fn) return fn(*args[0:-1], **kw) def process_metadata(spec): spec.args.append("_exclusions") fn = check_exclusions( fn, add_positional_parameters=("_exclusions",) ) return pytest.mark.parametrize(_argnames, pytest_params)(fn) return decorate def param_ident(self, *parameters): ident = parameters[0] return pytest.param(*parameters[1:], id=ident) def fixture(self, *arg, **kw): return pytest.fixture(*arg, **kw) def get_current_test_name(self): return os.environ.get("PYTEST_CURRENT_TEST")
0f5bee18ae85a0d5b0a6a43a1130f89838d0cf3c
49d419d657d4fc29b486fb97c4409b904fe43012
/pytorch/synaptic/gan.py
0efae754f48b139ef3a96e9884a969c73b9f460a
[]
no_license
anantguptadbl/python
660101e7284fb24bd269659bb8f461f7f13d47b6
4954efbe52ff3190201a8c0836d80015d13d4d15
refs/heads/master
2022-03-23T03:52:47.956875
2022-02-24T12:26:29
2022-02-24T12:26:29
114,909,673
3
3
null
null
null
null
UTF-8
Python
false
false
5,581
py
import os import numpy as np import torch import torchvision from torch import nn from torch.autograd import Variable from torch.utils.data import DataLoader from torchvision import transforms, datasets # FIRST SHAPE GAN learning_rate=0.001 n_classes=4 embedding_dim=10 latent_dim=10 # CONDITIONAL GAN class Generator(nn.Module): def __init__(self): super(Generator, self).__init__() self.label_conditioned_generator = nn.Sequential( nn.Embedding(n_classes, embedding_dim), nn.Linear(embedding_dim, 16) ) self.latent = nn.Sequential( nn.Linear(latent_dim, 4*4*512), nn.LeakyReLU(0.2, inplace=True) ) self.model = nn.Sequential(nn.ConvTranspose2d(513, 64*8, 4, 2, 1, bias=False), nn.BatchNorm2d(64*8, momentum=0.1, eps=0.8), nn.ReLU(True), nn.ConvTranspose2d(64*8, 64*4, 4, 2, 1,bias=False), nn.BatchNorm2d(64*4, momentum=0.1, eps=0.8), nn.ReLU(True), nn.ConvTranspose2d(64*4, 64*2, 4, 2, 1,bias=False), nn.BatchNorm2d(64*2, momentum=0.1, eps=0.8), nn.ReLU(True), nn.ConvTranspose2d(64*2, 64*1, 4, 2, 1,bias=False), nn.BatchNorm2d(64*1, momentum=0.1, eps=0.8), nn.ReLU(True), nn.ConvTranspose2d(64*1, 3, 4, 2, 1, bias=False), nn.Tanh() ) def forward(self, inputs): noise_vector, label = inputs label_output = self.label_conditioned_generator(label) label_output = label_output.view(-1, 1, 4, 4) latent_output = self.latent(noise_vector) latent_output = latent_output.view(-1, 512,4,4) concat = torch.cat((latent_output, label_output), dim=1) image = self.model(concat) #print(image.size()) return image class Discriminator(nn.Module): def __init__(self): super(Discriminator, self).__init__() self.label_condition_disc = nn.Sequential(nn.Embedding(n_classes, embedding_dim), nn.Linear(embedding_dim, 3*128*128)) self.model = nn.Sequential(nn.Conv2d(6, 64, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64, 64*2, 4, 3, 2, bias=False), nn.BatchNorm2d(64*2, momentum=0.1, eps=0.8), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64*2, 64*4, 4, 3,2, bias=False), nn.BatchNorm2d(64*4, momentum=0.1, eps=0.8), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64*4, 64*8, 4, 3, 2, bias=False), nn.BatchNorm2d(64*8, momentum=0.1, eps=0.8), nn.LeakyReLU(0.2, inplace=True), nn.Flatten(), nn.Dropout(0.4), nn.Linear(4608, 1), nn.Sigmoid() ) def forward(self, inputs): img, label = inputs label_output = self.label_condition_disc(label) label_output = label_output.view(-1, 3, 128, 128) concat = torch.cat((img, label_output), dim=1) output = self.model(concat) return output batch_size=16 train_transform = transforms.Compose([ transforms.Resize(128), transforms.ToTensor(), transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])]) train_dataset = datasets.ImageFolder(root='rps', transform=train_transform) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) # Models discriminator = Discriminator() generator = Generator() # Loss discriminator_loss = nn.BCELoss() generator_loss = nn.MSELoss() # Optimizers D_optimizer = torch.optim.Adam(discriminator.parameters(), lr=learning_rate, weight_decay=1e-5) G_optimizer = torch.optim.Adam(generator.parameters(), lr=learning_rate, weight_decay=1e-5) num_epochs = 200 device='cpu' for epoch in range(1, num_epochs+1): D_loss_list, G_loss_list = [], [] for index, (real_images, labels) in enumerate(train_loader): D_optimizer.zero_grad() real_images = real_images.to(device) labels = labels.to(device) labels = labels.unsqueeze(1).long() real_target = Variable(torch.ones(real_images.size(0), 1).to(device)) fake_target = Variable(torch.zeros(real_images.size(0), 1).to(device)) D_real_loss = discriminator_loss(discriminator((real_images, labels)), real_target) # print(discriminator(real_images)) #D_real_loss.backward() noise_vector = torch.randn(real_images.size(0), latent_dim, device=device) noise_vector = noise_vector.to(device) generated_image = generator((noise_vector, labels)) output = discriminator((generated_image.detach(), labels)) D_fake_loss = discriminator_loss(output, fake_target) # train with fake #D_fake_loss.backward() D_total_loss = (D_real_loss + D_fake_loss) / 2 D_loss_list.append(D_total_loss) D_total_loss.backward() D_optimizer.step() # Train generator with real labels G_optimizer.zero_grad() G_loss = generator_loss(discriminator((generated_image, labels)), real_target) G_loss_list.append(G_loss) G_loss.backward() G_optimizer.step() print("Epoch {0} Gen loss {1} Discrim loss {2}".format(epoch, G_loss, D_total_loss))
1cc02218743180f0e7cfee9382aa8e4dfc3a14f4
b1018e272ed284ab70ffe6055b90726e879004b3
/MIDI Remote Scripts/Push2/observable_property_alias.py
9425fff2c4cd6cac938da28aa0e083b11d2b675d
[]
no_license
aumhaa/livepy_diff
8e593ffb30b1e7909225352f3a0084d4de2e51e6
266a7380c4d5a162c051c23f534f74cb7eace538
refs/heads/master
2020-04-12T03:17:56.545373
2017-04-24T02:02:07
2017-04-24T02:02:07
13,946,086
3
0
null
null
null
null
UTF-8
Python
false
false
1,299
py
from __future__ import absolute_import, print_function from ableton.v2.base import EventObject, Slot class ObservablePropertyAlias(EventObject): def __init__(self, alias_host, property_host = None, property_name = '', alias_name = None, getter = None, *a, **k): super(ObservablePropertyAlias, self).__init__(*a, **k) self._alias_host = alias_host self._alias_name = alias_name or property_name self._property_host = property_host self._property_name = property_name self._property_slot = None self._setup_alias(getter) def _get_property_host(self): return self._property_host def _set_property_host(self, host): self._property_host = host self._property_slot.subject = host property_host = property(_get_property_host, _set_property_host) def _setup_alias(self, getter): aliased_prop = property(getter or self._get_property) setattr(self._alias_host.__class__, self._alias_name, aliased_prop) notifier = getattr(self._alias_host, 'notify_' + self._alias_name) self._property_slot = self.register_slot(Slot(self.property_host, notifier, self._property_name)) def _get_property(self, _): return getattr(self.property_host, self._property_name, None)
0cb91498a8fec68c9a95ee15d7048009ec9e4165
ca17757a2c9140a11e4e10e581c4e5f086c92177
/elit/version.py
ece3c23fd05a8776f1d524dba26bfa30286875d2
[ "Apache-2.0" ]
permissive
archanatikayatray19/elit
22aebbfbed3a2b56a1c45faea0f2e6c6d5aa0a0f
f0fd5181ee6613231e5086c82df0241fc3e8e434
refs/heads/main
2023-03-09T17:02:33.731637
2021-02-05T20:14:56
2021-02-05T20:14:56
null
0
0
null
null
null
null
UTF-8
Python
false
false
802
py
# ======================================================================== # Copyright 2020 Emory University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ======================================================================== # -*- coding:utf-8 -*- # Author: hankcs __version__ = '2.0.0-alpha.0'
3988164b11a41d709645f15e7415f92b1cdbc978
ec164c8b16fdca016c03ef24aaa9ac623e873fa6
/zentral/core/events/base.py
d271721c94aa77a2b87f1262c56e3785b7c764ad
[ "Apache-2.0" ]
permissive
coreservice/zentral
a67c2139e7aaadb7bce14938a3e668e867373cd2
24a00b0ff15aee531027c2d0ff39931a318eff9e
refs/heads/master
2021-06-17T12:35:20.260526
2017-05-15T17:57:44
2017-05-15T17:57:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
9,738
py
from datetime import datetime import logging import os.path import re import uuid from dateutil import parser from django.utils.functional import cached_property from django.utils.text import slugify from zentral.contrib.inventory.models import MetaMachine from zentral.core.queues import queues from .template_loader import TemplateLoader from . import register_event_type logger = logging.getLogger('zentral.core.events.base') template_loader = TemplateLoader([os.path.join(os.path.dirname(__file__), 'templates')]) def render_notification_part(ctx, event_type, part): template = template_loader.load(event_type, part) if template: return template.render(ctx) else: msg = 'Missing template event_type: {} part: {}'.format(event_type, part) logger.error(msg) return msg class EventRequest(object): user_agent_str_length = 50 def __init__(self, user_agent, ip): self.user_agent = user_agent self.ip = ip def serialize(self): return {k: v for k, v in (("user_agent", self.user_agent), ("ip", self.ip)) if v} def __str__(self): l = [] if self.ip: l.append(self.ip) if self.user_agent: user_agent = self.user_agent if len(user_agent) > self.user_agent_str_length: user_agent = "{}…".format( user_agent[:self.user_agent_str_length - 1].strip() ) l.append(user_agent) return " - ".join(l) class EventMetadata(object): def __init__(self, event_type, **kwargs): self.event_type = event_type self.uuid = kwargs.pop('uuid', uuid.uuid4()) if isinstance(self.uuid, str): self.uuid = uuid.UUID(self.uuid) self.index = int(kwargs.pop('index', 0)) self.created_at = kwargs.pop('created_at', None) if self.created_at is None: self.created_at = datetime.utcnow() elif isinstance(self.created_at, str): self.created_at = parser.parse(self.created_at) self.machine_serial_number = kwargs.pop('machine_serial_number', None) if self.machine_serial_number: self.machine = MetaMachine(self.machine_serial_number) else: self.machine = None self.request = kwargs.pop('request', None) self.tags = kwargs.pop('tags', []) @classmethod def deserialize(cls, event_d_metadata): kwargs = event_d_metadata.copy() kwargs['event_type'] = kwargs.pop('type') kwargs['uuid'] = kwargs.pop('id') request_d = kwargs.pop('request', None) if request_d: kwargs['request'] = EventRequest(**request_d) return cls(**kwargs) def serialize(self, machine_metadata=True): d = {'created_at': self.created_at.isoformat(), 'id': str(self.uuid), 'index': self.index, 'type': self.event_type, } if self.request: d['request'] = self.request.serialize() if self.tags: d['tags'] = self.tags if self.machine_serial_number: d['machine_serial_number'] = self.machine_serial_number if not machine_metadata or not self.machine: return d machine_d = {} for ms in self.machine.snapshots: source = ms.source ms_d = {'name': ms.get_machine_str()} if ms.business_unit: if not ms.business_unit.is_api_enrollment_business_unit(): ms_d['business_unit'] = {'reference': ms.business_unit.reference, 'key': ms.business_unit.get_short_key(), 'name': ms.business_unit.name} if ms.os_version: ms_d['os_version'] = str(ms.os_version) for group in ms.groups.all(): ms_d.setdefault('groups', []).append({'reference': group.reference, 'key': group.get_short_key(), 'name': group.name}) key = slugify(source.name) if key in ms_d: # TODO: earlier warning in conf check ? logger.warning('Inventory source slug %s exists already', key) machine_d[key] = ms_d for tag in self.machine.tags: machine_d.setdefault('tags', []).append({'id': tag.id, 'name': tag.name}) for meta_business_unit in self.machine.meta_business_units: machine_d.setdefault('meta_business_units', []).append({ 'name': meta_business_unit.name, 'id': meta_business_unit.id }) if self.machine.platform: machine_d['platform'] = self.machine.platform if self.machine.type: machine_d['type'] = self.machine.type if machine_d: d['machine'] = machine_d return d class BaseEvent(object): event_type = "base" tags = [] heartbeat_timeout = None payload_aggregations = [] @classmethod def build_from_machine_request_payloads(cls, msn, ua, ip, payloads, get_created_at=None): if ua or ip: request = EventRequest(ua, ip) else: request = None metadata = EventMetadata(cls.event_type, machine_serial_number=msn, request=request, tags=cls.tags) for index, payload in enumerate(payloads): metadata.index = index if get_created_at: try: metadata.created_at = get_created_at(payload) except: logger.exception("Could not extract created_at from payload") yield cls(metadata, payload) @classmethod def post_machine_request_payloads(cls, msn, user_agent, ip, payloads, get_created_at=None): for event in cls.build_from_machine_request_payloads(msn, user_agent, ip, payloads, get_created_at): event.post() def __init__(self, metadata, payload): self.metadata = metadata self.payload = payload def _key(self): return (self.event_type, self.metadata.uuid, self.metadata.index) def __eq__(self, other): return self._key() == other._key() @classmethod def get_event_type_display(cls): return cls.event_type.replace("_", " ") def __str__(self): return self.get_event_type_display() @classmethod def get_app_display(cls): module = cls.__module__ if module.startswith("zentral.core"): return "Zentral" else: try: return module.split(".")[-2].capitalize() except IndexError: return module @classmethod def deserialize(cls, event_d): payload = event_d.copy() metadata = EventMetadata.deserialize(payload.pop('_zentral')) return cls(metadata, payload) def serialize(self, machine_metadata=True): event_d = self.payload.copy() event_d['_zentral'] = self.metadata.serialize(machine_metadata) return event_d def post(self): queues.post_event(self) def extra_probe_checks(self, probe): return True # notification methods @cached_property def base_notification_context(self): return {'event': self, 'metadata': self.metadata, 'payload': self.payload, 'machine': self.metadata.machine} def get_notification_context(self, probe): ctx = self.base_notification_context.copy() ctx["probe"] = probe return ctx def get_notification_subject(self, probe): ctx = self.get_notification_context(probe) return render_notification_part(ctx, self.event_type, 'subject') def get_notification_body(self, probe): ctx = self.get_notification_context(probe) return render_notification_part(ctx, self.event_type, 'body') # aggregations @classmethod def get_payload_aggregations(cls): for _, val in cls.payload_aggregations: if "event_type" not in val: val["event_type"] = cls.event_type return cls.payload_aggregations register_event_type(BaseEvent) # Zentral Commands class CommandEvent(BaseEvent): COMMAND_RE = re.compile(r"^zentral\$(?P<command>[a-zA-Z\-_ ]+)" "(?P<serial_numbers>(?:\$[a-zA-Z0-9\-_]+)+)" "(?P<args>(?:#[a-zA-Z0-9\-_ ]+)+)?$") event_type = "zentral_command" tags = ["zentral"] register_event_type(CommandEvent) def post_command_events(message, source, tags): if not message: return for line in message.splitlines(): line = line.strip() m = CommandEvent.COMMAND_RE.match(line) if m: payload = {'command': m.group('command'), 'source': source} args = m.group('args') if args: payload['args'] = [arg for arg in args.split('#') if arg] for serial_number in m.group('serial_numbers').split('$'): if serial_number: metadata = EventMetadata(CommandEvent.event_type, machine_serial_number=serial_number, tags=CommandEvent.tags + tags) event = CommandEvent(metadata, payload.copy()) event.post()
3dec57ae1f1dbea2ae89aaa75b6bee067092dd1f
865bd0c84d06b53a39943dd6d71857e9cfc6d385
/179-largest-number/largest-number.py
4bc8f0d54937b5992d4b0409c3c534c6cb19937f
[]
no_license
ANDYsGUITAR/leetcode
1fd107946f4df50cadb9bd7189b9f7b7128dc9f1
cbca35396738f1fb750f58424b00b9f10232e574
refs/heads/master
2020-04-01T18:24:01.072127
2019-04-04T08:38:44
2019-04-04T08:38:44
153,473,780
0
0
null
null
null
null
UTF-8
Python
false
false
766
py
# Given a list of non negative integers, arrange them such that they form the largest number. # # Example 1: # # # Input: [10,2] # Output: "210" # # Example 2: # # # Input: [3,30,34,5,9] # Output: "9534330" # # # Note: The result may be very large, so you need to return a string instead of an integer. # class Solution: def largestNumber(self, nums: List[int]) -> str: import functools def cmp(a,b): if int(a + b) > int(b + a): return 1 elif int(a + b) < int(b + a): return -1 else: return 0 nums = list(map(str, nums)) nums.sort(key = functools.cmp_to_key(cmp), reverse = True) return ''.join(nums) if nums[0] != '0' else '0'
51a0b57a4843284c84f38263e5747348b00f6de2
d7016f69993570a1c55974582cda899ff70907ec
/sdk/security/azure-mgmt-security/azure/mgmt/security/v2020_01_01/aio/operations/_external_security_solutions_operations.py
180037abff08cde8f2d7c069462cc835a36ff72c
[ "LicenseRef-scancode-generic-cla", "MIT", "LGPL-2.1-or-later" ]
permissive
kurtzeborn/azure-sdk-for-python
51ca636ad26ca51bc0c9e6865332781787e6f882
b23e71b289c71f179b9cf9b8c75b1922833a542a
refs/heads/main
2023-03-21T14:19:50.299852
2023-02-15T13:30:47
2023-02-15T13:30:47
157,927,277
0
0
MIT
2022-07-19T08:05:23
2018-11-16T22:15:30
Python
UTF-8
Python
false
false
13,540
py
# pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import sys from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar import urllib.parse from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, ResourceNotModifiedError, map_error, ) from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.core.tracing.decorator_async import distributed_trace_async from azure.core.utils import case_insensitive_dict from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models from ..._vendor import _convert_request from ...operations._external_security_solutions_operations import ( build_get_request, build_list_by_home_region_request, build_list_request, ) if sys.version_info >= (3, 8): from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports else: from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class ExternalSecuritySolutionsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.mgmt.security.v2020_01_01.aio.SecurityCenter`'s :attr:`external_security_solutions` attribute. """ models = _models def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client = input_args.pop(0) if input_args else kwargs.pop("client") self._config = input_args.pop(0) if input_args else kwargs.pop("config") self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def list(self, **kwargs: Any) -> AsyncIterable["_models.ExternalSecuritySolution"]: """Gets a list of external security solutions for the subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ExternalSecuritySolution or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.security.v2020_01_01.models.ExternalSecuritySolution] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: Literal["2020-01-01"] cls = kwargs.pop("cls", None) # type: ClsType[_models.ExternalSecuritySolutionList] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) def prepare_request(next_link=None): if not next_link: request = build_list_request( subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.list.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("ExternalSecuritySolutionList", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data) list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Security/externalSecuritySolutions"} # type: ignore @distributed_trace def list_by_home_region( self, asc_location: str, **kwargs: Any ) -> AsyncIterable["_models.ExternalSecuritySolution"]: """Gets a list of external Security Solutions for the subscription and location. :param asc_location: The location where ASC stores the data of the subscription. can be retrieved from Get locations. Required. :type asc_location: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ExternalSecuritySolution or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.security.v2020_01_01.models.ExternalSecuritySolution] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: Literal["2020-01-01"] cls = kwargs.pop("cls", None) # type: ClsType[_models.ExternalSecuritySolutionList] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) def prepare_request(next_link=None): if not next_link: request = build_list_by_home_region_request( asc_location=asc_location, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.list_by_home_region.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("ExternalSecuritySolutionList", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data) list_by_home_region.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Security/locations/{ascLocation}/ExternalSecuritySolutions"} # type: ignore @distributed_trace_async async def get( self, resource_group_name: str, asc_location: str, external_security_solutions_name: str, **kwargs: Any ) -> _models.ExternalSecuritySolution: """Gets a specific external Security Solution. :param resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. Required. :type resource_group_name: str :param asc_location: The location where ASC stores the data of the subscription. can be retrieved from Get locations. Required. :type asc_location: str :param external_security_solutions_name: Name of an external security solution. Required. :type external_security_solutions_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: ExternalSecuritySolution or the result of cls(response) :rtype: ~azure.mgmt.security.v2020_01_01.models.ExternalSecuritySolution :raises ~azure.core.exceptions.HttpResponseError: """ error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: Literal["2020-01-01"] cls = kwargs.pop("cls", None) # type: ClsType[_models.ExternalSecuritySolution] request = build_get_request( resource_group_name=resource_group_name, asc_location=asc_location, external_security_solutions_name=external_security_solutions_name, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.get.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize("ExternalSecuritySolution", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/locations/{ascLocation}/ExternalSecuritySolutions/{externalSecuritySolutionsName}"} # type: ignore
48b99421ac4eec624238b6abb45995860b5a7022
fb64776f71eb2a469395a39c3ff33635eb388357
/apps/accounts/tests/factories/user.py
96bbc988f36a6f82a99567ea1bca2505a79beae1
[ "MIT" ]
permissive
jimialex/django-wise
ec79d21c428fd1eea953362890051d2120e19f9e
3fdc01eabdff459b31e016f9f6d1cafc19c5a292
refs/heads/master
2023-04-30T20:59:51.625190
2021-05-10T06:55:40
2021-05-10T06:55:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
981
py
# -*- coding: utf-8 -*- import factory from faker import Factory from faker.providers import misc, person, profile from apps.accounts.models.user import User fake = Factory.create() fake.add_provider(person) fake.add_provider(profile) fake.add_provider(misc) def fake_username(): return fake.simple_profile()['username'] def generate_user_profile(): user_profile = fake.simple_profile() user_password = fake.uuid4() full_name = fake.name().split(' ') return { 'username': user_profile['username'], 'email': user_profile['mail'], 'firstName': full_name[0], 'lastName': full_name[1], 'password': user_password, } class UserFactory(factory.django.DjangoModelFactory): username = factory.LazyFunction(fake_username) email = factory.LazyFunction(fake.email) first_name = factory.LazyFunction(fake.first_name) last_name = factory.LazyFunction(fake.last_name) class Meta: model = User
c75997f1473800d28b0da9e07ecc158b5a2a2585
1a30cff1f2660ba5cef26898bac56cbc9269e5d4
/server/src/test/unit/weblab/core/test_reservation_processor.py
550556c9dfdbcc0ab2170984cfede4aad59b64cc
[ "BSD-2-Clause", "BSD-3-Clause" ]
permissive
fiodarhancharou/weblabdeusto
0bcfc7628bc793a7fe7364f68461eaba3eb45997
d5a268cbb83ff193b0639db717b11ec448638df8
refs/heads/master
2022-05-24T03:31:34.946845
2020-03-05T16:55:18
2020-03-05T16:55:18
null
0
0
null
null
null
null
UTF-8
Python
false
false
23,654
py
#!/usr/bin/env python #-*-*- encoding: utf-8 -*-*- # # Copyright (C) 2005 onwards University of Deusto # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # # This software consists of contributions made by many individuals, # listed below: # # Author: Pablo Orduña <[email protected]> # Luis Rodriguez <[email protected]> # from __future__ import print_function, unicode_literals import unittest import time import datetime import mocker from voodoo.gen import CoordAddress import voodoo.sessions.session_id as SessionId from test.util.module_disposer import case_uses_module import weblab.core.user_processor as UserProcessor from weblab.core.reservation_processor import ReservationProcessor, EXPERIMENT_POLL_TIME import weblab.core.reservations as Reservation import weblab.core.coordinator.confirmer as Confirmer import weblab.core.coordinator.store as TemporalInformationStore import weblab.data.server_type as ServerType from weblab.core.coordinator.gateway import create as coordinator_create, SQLALCHEMY import weblab.data.command as Command import weblab.data.dto.users as Group from weblab.data.experiments import ExperimentInstanceId, ExperimentId import weblab.data.dto.experiments as Category import weblab.data.dto.experiments as Experiment import weblab.data.dto.experiments as ExperimentAllowed import weblab.data.dto.experiments as ExperimentUse import weblab.data.dto.users as User import weblab.data.dto.users as Role from weblab.data import ValidDatabaseSessionId from weblab.core.coordinator.resource import Resource from weblab.core.coordinator.config_parser import COORDINATOR_LABORATORY_SERVERS import weblab.core.exc as coreExc import weblab.lab.exc as LaboratoryErrors import test.unit.configuration as configuration_module import voodoo.configuration as ConfigurationManager laboratory_coordaddr = CoordAddress.translate( "server:laboratoryserver@labmachine" ) @case_uses_module(Confirmer) class ReservationProcessorTestCase(unittest.TestCase): def setUp(self): self.mocker = mocker.Mocker() self.lab_mock = self.mocker.mock() self.locator = FakeLocator( lab = self.lab_mock ) self.db = FakeDatabase() self.cfg_manager = ConfigurationManager.ConfigurationManager() self.cfg_manager.append_module(configuration_module) self.cfg_manager._set_value(COORDINATOR_LABORATORY_SERVERS, { 'server:laboratoryserver@labmachine' : { 'inst|ud-dummy|Dummy experiments' : 'res_inst@res_type' } }) self.commands_store = TemporalInformationStore.CommandsTemporalInformationStore() self.coordinator = coordinator_create(SQLALCHEMY, self.locator, self.cfg_manager) self.coordinator._clean() self.coordinator.add_experiment_instance_id("server:laboratoryserver@labmachine", ExperimentInstanceId('inst','ud-dummy','Dummy experiments'), Resource("res_type", "res_inst")) self.user_processor = UserProcessor.UserProcessor( self.locator, { 'db_session_id' : ValidDatabaseSessionId('my_db_session_id') }, self.cfg_manager, self.coordinator, self.db, self.commands_store ) def create_reservation_processor(self, faking_response = False): if faking_response: self._fake_simple_lab_response() status = self.user_processor.reserve_experiment( ExperimentId('ud-dummy', 'Dummy experiments'), "{}", "{}", "127.0.0.1", 'uuid') self.reservation_processor = ReservationProcessor( self.cfg_manager, SessionId.SessionId(status.reservation_id.split(';')[0]), { 'session_polling' : (time.time(), ReservationProcessor.EXPIRATION_TIME_NOT_SET), 'latest_timestamp' : 0, 'experiment_id' : ExperimentId('ud-dummy', 'Dummy experiments'), 'creator_session_id' : '', 'reservation_id' : SessionId.SessionId(status.reservation_id.split(';')[0]), }, self.coordinator, self.locator, self.commands_store ) def tearDown(self): self.coordinator.stop() def test_get_info(self): self.create_reservation_processor(True) self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) reservation_info = self.reservation_processor.get_info() self.assertEquals('ud-dummy', reservation_info.exp_name) self.assertEquals('Dummy experiments', reservation_info.cat_name) def test_is_polling(self): self.create_reservation_processor(True) self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.assertTrue( self.reservation_processor.is_polling() ) self.reservation_processor.finish() self.assertFalse( self.reservation_processor.is_polling() ) def test_is_expired_didnt_expire(self): self.create_reservation_processor(True) self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.assertFalse( self.reservation_processor.is_expired() ) self.reservation_processor.finish() self.assertTrue( self.reservation_processor.is_expired() ) def test_is_expired_expired_without_expiration_time_set(self): time_mock = self.mocker.mock() time_mock.time() poll_time = self.cfg_manager.get_value(EXPERIMENT_POLL_TIME) added = poll_time + 5 self.mocker.result(time.time() + added) self.mocker.replay() self.create_reservation_processor() self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.reservation_processor.time_module = time_mock self.assertTrue( self.reservation_processor.is_expired() ) def test_is_expired_expired_due_to_expiration_time(self): self._return_reserved() poll_time = self.cfg_manager.get_value(EXPERIMENT_POLL_TIME) added = poll_time - 5 # for example self.db.experiments_allowed[0].time_allowed = poll_time - 10 self.assertTrue( added > 0 ) time_mock = self.mocker.mock() time_mock.time() self.mocker.result(time.time() + added) self.mocker.replay() # # Reserve the experiment self.create_reservation_processor() self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) reservation_status = self.reservation_processor.get_status() self.assertTrue( isinstance(reservation_status, Reservation.ConfirmedReservation) ) self.reservation_processor.time_module = time_mock self.assertTrue( self.reservation_processor.is_expired() ) def test_finished_experiment_ok(self): self.create_reservation_processor(True) self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.reservation_processor.finish() def test_finished_experiment_coordinator_error(self): self.create_reservation_processor(True) self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) # Force the coordinator to fail when invoking finish_reservation self.coordinator.finish_reservation = lambda *args: 10 / 0 self.assertRaises( coreExc.FailedToFreeReservationError, self.reservation_processor.finish) def test_send_async_file_ok(self): file_content = "SAMPLE CONTENT" lab_response = Command.Command("LAB RESPONSE") file_info = 'program' self._return_reserved() self.lab_mock.send_async_file(SessionId.SessionId('my_lab_session_id'), file_content, file_info) self.mocker.result(lab_response) self.mocker.replay() self.create_reservation_processor() self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.reservation_processor.get_status() self.assertFalse( self.reservation_processor.is_expired() ) response = self.reservation_processor.send_async_file(file_content, file_info) self.assertEquals(lab_response, response) self.assertFalse( self.reservation_processor.is_expired() ) self.reservation_processor.finish() self.assertEquals( self.reservation_processor.get_status().status, Reservation.Reservation.POST_RESERVATION ) def test_send_file_ok(self): file_content = "SAMPLE CONTENT" lab_response = Command.Command("LAB RESPONSE") file_info = 'program' self._return_reserved() self.lab_mock.send_file(SessionId.SessionId('my_lab_session_id'), file_content, file_info) self.mocker.result(lab_response) self.mocker.replay() self.create_reservation_processor() self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.reservation_processor.get_status() self.assertFalse( self.reservation_processor.is_expired() ) response = self.reservation_processor.send_file(file_content, file_info) self.assertEquals(lab_response, response) self.assertFalse( self.reservation_processor.is_expired() ) self.reservation_processor.finish() self.assertEquals( self.reservation_processor.get_status().status, Reservation.Reservation.POST_RESERVATION ) def test_send_file_session_not_found_in_lab(self): self._return_reserved() file_content = "SAMPLE CONTENT" file_info = "program" self.lab_mock.send_file(SessionId.SessionId('my_lab_session_id'), file_content, file_info) self.mocker.throw( LaboratoryErrors.SessionNotFoundInLaboratoryServerError("problem@laboratory") ) self.mocker.replay() self.create_reservation_processor() self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.reservation_processor.get_status() self.assertFalse( self.reservation_processor.is_expired() ) self.assertRaises( coreExc.NoCurrentReservationError, self.reservation_processor.send_file, file_content, file_info ) self.assertEquals( self.reservation_processor.get_status().status, Reservation.Reservation.POST_RESERVATION ) def test_send_async_file_session_not_found_in_lab(self): self._return_reserved() file_content = "SAMPLE CONTENT" file_info = "program" self.lab_mock.send_async_file(SessionId.SessionId('my_lab_session_id'), file_content, file_info) self.mocker.throw( LaboratoryErrors.SessionNotFoundInLaboratoryServerError("problem@laboratory") ) self.mocker.replay() self.create_reservation_processor() self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.reservation_processor.get_status() self.assertFalse( self.reservation_processor.is_expired() ) self.assertRaises( coreExc.NoCurrentReservationError, self.reservation_processor.send_async_file, file_content, file_info ) self.assertEquals( self.reservation_processor.get_status().status, Reservation.Reservation.POST_RESERVATION ) def test_send_async_file_failed_to_send(self): self._return_reserved() file_content = "SAMPLE CONTENT" file_info = "program" self.lab_mock.send_async_file(SessionId.SessionId('my_lab_session_id'), file_content, file_info) self.mocker.throw( LaboratoryErrors.FailedToInteractError("problem@laboratory") ) self.mocker.replay() self.create_reservation_processor() self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.reservation_processor.get_status() self.assertFalse( self.reservation_processor.is_expired() ) self.assertRaises( coreExc.FailedToInteractError, self.reservation_processor.send_async_file, file_content, file_info ) self.assertEquals( self.reservation_processor.get_status().status, Reservation.Reservation.POST_RESERVATION ) def test_send_file_failed_to_send(self): self._return_reserved() file_content = "SAMPLE CONTENT" file_info = "program" self.lab_mock.send_file(SessionId.SessionId('my_lab_session_id'), file_content, file_info) self.mocker.throw( LaboratoryErrors.FailedToInteractError("problem@laboratory") ) self.mocker.replay() self.create_reservation_processor() self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.reservation_processor.get_status() self.assertFalse( self.reservation_processor.is_expired() ) self.assertRaises( coreExc.FailedToInteractError, self.reservation_processor.send_file, file_content, file_info ) self.assertEquals( self.reservation_processor.get_status().status, Reservation.Reservation.POST_RESERVATION ) def test_send_async_command_ok(self): self._return_reserved() command = Command.Command("Your command") lab_response = Command.Command("LAB RESPONSE") self.lab_mock.send_async_command(SessionId.SessionId('my_lab_session_id'), command) self.mocker.result(lab_response) self.mocker.replay() self.create_reservation_processor() self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.reservation_processor.get_status() self.assertFalse( self.reservation_processor.is_expired() ) response = self.reservation_processor.send_async_command(command) self.assertEquals(lab_response, response) self.assertFalse( self.reservation_processor.is_expired() ) self.reservation_processor.finish() self.assertEquals( self.reservation_processor.get_status().status, Reservation.Reservation.POST_RESERVATION ) def test_send_command_ok(self): self._return_reserved() command = Command.Command("Your command") lab_response = Command.Command("LAB RESPONSE") self.lab_mock.send_command(SessionId.SessionId('my_lab_session_id'), command) self.mocker.result(lab_response) self.mocker.replay() self.create_reservation_processor() self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.reservation_processor.get_status() self.assertFalse( self.reservation_processor.is_expired() ) response = self.reservation_processor.send_command(command) self.assertEquals(lab_response, response) self.assertFalse( self.reservation_processor.is_expired() ) self.reservation_processor.finish() self.assertEquals( self.reservation_processor.get_status().status, Reservation.Reservation.POST_RESERVATION ) def test_send_command_session_not_found_in_lab(self): self._return_reserved() command = Command.Command("Your command") self.lab_mock.send_command(SessionId.SessionId('my_lab_session_id'), command) self.mocker.throw( LaboratoryErrors.SessionNotFoundInLaboratoryServerError("problem@laboratory") ) self.mocker.replay() self.create_reservation_processor() self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.reservation_processor.get_status() self.assertFalse( self.reservation_processor.is_expired() ) self.assertRaises( coreExc.NoCurrentReservationError, self.reservation_processor.send_command, command ) self.assertEquals( self.reservation_processor.get_status().status, Reservation.Reservation.POST_RESERVATION ) def test_send_async_command_session_not_found_in_lab(self): self._return_reserved() command = Command.Command("Your command") self.lab_mock.send_async_command(SessionId.SessionId('my_lab_session_id'), command) self.mocker.throw( LaboratoryErrors.SessionNotFoundInLaboratoryServerError("problem@laboratory") ) self.mocker.replay() self.create_reservation_processor() self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.reservation_processor.get_status() self.assertFalse( self.reservation_processor.is_expired() ) self.assertRaises( coreExc.NoCurrentReservationError, self.reservation_processor.send_async_command, command ) self.assertEquals( self.reservation_processor.get_status().status, Reservation.Reservation.POST_RESERVATION ) def test_send_async_command_failed_to_send(self): self._return_reserved() command = Command.Command("Your command") self.lab_mock.send_async_command(SessionId.SessionId('my_lab_session_id'), command) self.mocker.throw( LaboratoryErrors.FailedToInteractError("problem@laboratory") ) self.mocker.replay() self.create_reservation_processor() self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.reservation_processor.get_status() self.assertFalse( self.reservation_processor.is_expired() ) self.assertRaises( coreExc.FailedToInteractError, self.reservation_processor.send_async_command, command ) self.assertEquals( self.reservation_processor.get_status().status, Reservation.Reservation.POST_RESERVATION ) def test_send_command_failed_to_send(self): self._return_reserved() command = Command.Command("Your command") self.lab_mock.send_command(SessionId.SessionId('my_lab_session_id'), command) self.mocker.throw( LaboratoryErrors.FailedToInteractError("problem@laboratory") ) self.mocker.replay() self.create_reservation_processor() self.coordinator.confirmer._confirm_handler.join(10) self.assertFalse(self.coordinator.confirmer._confirm_handler.isAlive()) self.reservation_processor.get_status() self.assertFalse( self.reservation_processor.is_expired() ) self.assertRaises( coreExc.FailedToInteractError, self.reservation_processor.send_command, command ) self.assertEquals( self.reservation_processor.get_status().status, Reservation.Reservation.POST_RESERVATION ) def _fake_simple_lab_response(self): self.lab_mock.reserve_experiment(ExperimentInstanceId('inst','ud-dummy','Dummy experiments'), "{}", mocker.ANY) self.mocker.result((SessionId.SessionId('my_lab_session_id'), 'ok', { 'address' : 'servexp:inst@mach' })) self.lab_mock.resolve_experiment_address('my_lab_session_id') self.mocker.result(CoordAddress("exp","inst","mach")) self.lab_mock.should_experiment_finish(SessionId.SessionId('my_lab_session_id')) self.mocker.result(0) self.mocker.replay() def _return_reserved(self): self.lab_mock.reserve_experiment(ExperimentInstanceId('inst','ud-dummy','Dummy experiments'), "{}", mocker.ANY) self.mocker.result((SessionId.SessionId('my_lab_session_id'), 'ok', { 'address' : 'servexp:inst@mach' })) self.lab_mock.resolve_experiment_address('my_lab_session_id') self.mocker.result(CoordAddress("exp","inst","mach")) self.mocker.count(1,2) class FakeDatabase(object): def __init__(self): self.experiments_allowed = [ generate_experiment_allowed( 100, 'ud-dummy', 'Dummy experiments' ) ] self.groups = [ Group.Group("5A") ] self.experiments = [ generate_experiment('ud-dummy', 'Dummy experiments') ] self.experiment_uses = [ generate_experiment_use("student2", self.experiments[0]) ], 1 self.users = [ User.User("admin1", "Admin Test User", "[email protected]", Role.Role("administrator")) ] self.roles = [ Role.Role("student"), Role.Role("instructor"), Role.Role("administrator") ] def is_access_forward(self, db_session_id): return True def store_experiment_usage(self, db_session_id, experiment_usage): pass def list_experiments(self, db_session_id, exp_name = None, cat_name = None): return self.experiments_allowed def get_user_by_name(self, db_session_id): return self.users[0] class FakeLocator(object): def __init__(self, lab): self.lab = lab def __getitem__(self, coord_addr): if laboratory_coordaddr == coord_addr: return self.lab raise Exception("Server not found") def generate_experiment(exp_name,exp_cat_name): cat = Category.ExperimentCategory(exp_cat_name) client = Experiment.ExperimentClient("client", {}) exp = Experiment.Experiment( exp_name, cat, '01/01/2007', '31/12/2007', client) return exp def generate_experiment_allowed(time_allowed, exp_name, exp_cat_name): exp = generate_experiment(exp_name, exp_cat_name) return ExperimentAllowed.ExperimentAllowed(exp, time_allowed, 5, True, '%s::user' % exp_name, 1, 'user') def generate_experiment_use(user_login, exp): exp_use = ExperimentUse.ExperimentUse( datetime.datetime.utcnow(), datetime.datetime.utcnow(), exp, User.User( user_login, "Jaime Irurzun", "[email protected]", Role.Role("student")), "unknown") return exp_use def suite(): return unittest.makeSuite(ReservationProcessorTestCase) if __name__ == '__main__': unittest.main()
96d9a1b535980b5c4821400c772bd1885d87ca2c
a12c090eb57da4c8e1f543a1a9d497abad763ccd
/django-stubs/forms/utils.pyi
0131077285c5d3dbc9de498e79682756eb713275
[ "BSD-3-Clause" ]
permissive
debuggerpk/django-stubs
be12eb6b43354a18675de3f70c491e534d065b78
bbdaebb244bd82544553f4547157e4f694f7ae99
refs/heads/master
2020-04-04T08:33:52.358704
2018-09-26T19:32:19
2018-09-26T19:32:19
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,779
pyi
from collections import UserList from datetime import datetime from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union from django.core.exceptions import ValidationError from django.utils.safestring import SafeText def pretty_name(name: str) -> str: ... def flatatt(attrs: Dict[str, Optional[str]]) -> SafeText: ... class ErrorDict(dict): def as_data(self) -> Dict[str, List[ValidationError]]: ... def get_json_data( self, escape_html: bool = ... ) -> Dict[str, List[Dict[str, str]]]: ... def as_json(self, escape_html: bool = ...) -> str: ... def as_ul(self) -> str: ... def as_text(self) -> str: ... class ErrorList(UserList, list): data: List[Union[django.core.exceptions.ValidationError, str]] error_class: str = ... def __init__( self, initlist: Optional[ Union[List[ValidationError], List[str], ErrorList] ] = ..., error_class: Optional[str] = ..., ) -> None: ... def as_data(self) -> List[ValidationError]: ... def get_json_data( self, escape_html: bool = ... ) -> List[Dict[str, str]]: ... def as_json(self, escape_html: bool = ...) -> str: ... def as_ul(self) -> str: ... def as_text(self) -> str: ... def __contains__(self, item: str) -> bool: ... def __eq__(self, other: Union[List[str], ErrorList]) -> bool: ... def __getitem__(self, i: Union[int, str]) -> str: ... def __reduce_ex__( self, *args: Any, **kwargs: Any ) -> Tuple[ Callable, Tuple[Type[ErrorList]], Dict[str, Union[List[ValidationError], str]], None, None, ]: ... def from_current_timezone(value: datetime) -> datetime: ... def to_current_timezone(value: datetime) -> datetime: ...
82e4a2ec6b46eff9f8e2a406b016bbc33060b85f
6ff318a9f67a3191b2a9f1d365b275c2d0e5794f
/python/小练习/socket.py
f649e706ec3dcebf2cd1769b94c7c70fb4f9a757
[]
no_license
lvhanzhi/Python
c1846cb83660d60a55b0f1d2ed299bc0632af4ba
c89f882f601898b5caab25855ffa7d7a1794f9ab
refs/heads/master
2020-03-25T23:34:00.919197
2018-09-13T12:19:51
2018-09-13T12:19:51
144,281,084
0
0
null
null
null
null
UTF-8
Python
false
false
1,429
py
# 1、买手机 import socket phone = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # tcp称为流式协议,udp称为数据报协议SOCK_DGRAM # print(phone) # 2、插入/绑定手机卡 # phone.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1) phone.bind(('127.0.0.1', 8080)) # 3、开机 phone.listen(5) # 半连接池,限制的是请求数 # 4、等待电话连接 print('start....') while True: # 连接循环 conn, client_addr = phone.accept() # (三次握手建立的双向连接,(客户端的ip,端口)) # print(conn) print('已经有一个连接建立成功', client_addr) # 5、通信:收\发消息 while True: # 通信循环 try: print('服务端正在收数据...') data = conn.recv(1024) # 最大接收的字节数,没有数据会在原地一直等待收,即发送者发送的数据量必须>0bytes # print('===>') if len(data) == 0: break # 在客户端单方面断开连接,服务端才会出现收空数据的情况 print('来自客户端的数据', data) conn.send(data.upper()) except ConnectionResetError: break # 6、挂掉电话连接 conn.close() # 7、关机 phone.close() print('aaa') def foo(): pass from b import foo foo() 圣诞快乐国际旅客的世界观的雷锋精神的理解老师是DLGKLFDNHJGLADF IF 报头
fcfcac8d51858207750e6f6453a9f9c6478ac802
b3ab2979dd8638b244abdb2dcf8da26d45d7b730
/test/test_related_permission_model.py
990172835c1b8a26c3210da6641997d46a7c88b8
[]
no_license
CU-CommunityApps/ct-cloudcheckr-cmx-client
4b3d9b82c5dfdaf24f8f443526868e971d8d1b15
18ac9fd4d6c4ae799c0d21745eaecd783da68c0c
refs/heads/main
2023-03-03T19:53:57.685925
2021-02-09T13:05:07
2021-02-09T13:05:07
329,308,757
0
1
null
null
null
null
UTF-8
Python
false
false
977
py
# coding: utf-8 """ CloudCheckr API CloudCheckr API # noqa: E501 OpenAPI spec version: v1 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import cloudcheckr_cmx_client from cloudcheckr_cmx_client.models.related_permission_model import RelatedPermissionModel # noqa: E501 from cloudcheckr_cmx_client.rest import ApiException class TestRelatedPermissionModel(unittest.TestCase): """RelatedPermissionModel unit test stubs""" def setUp(self): pass def tearDown(self): pass def testRelatedPermissionModel(self): """Test RelatedPermissionModel""" # FIXME: construct object with mandatory attributes with example values # model = cloudcheckr_cmx_client.models.related_permission_model.RelatedPermissionModel() # noqa: E501 pass if __name__ == '__main__': unittest.main()
8567a2b83ce96aee7862a2f85ae3ce4df8398a62
53192abcbb297198128952df6ceed17a32cb5f1f
/pyidml/models/tags.py
1e1fcd76f396d48e9313ac2465021f33ae9f0098
[]
no_license
guardian/pyidml
2e4ba754c6487eb94193db3a74b32a7b58d79384
39afddfee9c432aa5ff12d526aad0eebd2ac66a3
refs/heads/master
2022-07-01T21:03:46.011695
2015-08-10T12:44:42
2015-08-10T12:44:42
987,182
5
8
null
2022-06-17T21:07:33
2010-10-14T14:51:16
Python
UTF-8
Python
false
false
401
py
from pyidml.fields import * from pyidml.models import Element, Properties class Tags(Element): DOMVersion = StringField() class XMLTagProperties(Properties): TagColor = StringField() # TODO InDesignUIColorType_TypeDef class XMLTag(Element): Self = StringField(required=True) Name = StringField(required=True) Properties = EmbeddedDocumentField(XMLTagProperties)
3f7f296b636c36d74be827a2e22cbcf0d2ca042d
028d788c0fa48a8cb0cc6990a471e8cd46f6ec50
/Python-OOP/Iterators-Generators/Exercise/09_permutations.py
0398ec46a79574384144b0fc86190ae53603e0df
[]
no_license
Sheko1/SoftUni
d6b8e79ae545116f4c0e5705ad842f12d77a9c9d
a9fbeec13a30231b6a97c2b22bb35257ac1481c0
refs/heads/main
2023-07-13T15:39:48.826925
2021-08-21T12:51:02
2021-08-21T12:51:02
317,266,200
2
3
null
null
null
null
UTF-8
Python
false
false
199
py
from itertools import permutations def possible_permutations(data): result = permutations(data) for el in result: yield [*el] [print(n) for n in possible_permutations([1, 2, 3])]
51d49121dddb8c05767b1d8f43bd424a39fa97d0
552bc626603a1757cf7836401cff5f0332a91504
/django/django-instagram-clone_kindfamily/instaclone-backend/accounts/admin.py
05daffd421368208ed05399d35596a5e4d5b43c7
[]
no_license
anifilm/webapp
85f3d0aae34f46917b3c9fdf8087ec8da5303df1
7ef1a9a8c0dccc125a8c21b22db7db4b9d5c0cda
refs/heads/master
2023-08-29T18:33:00.323248
2023-08-26T07:42:39
2023-08-26T07:42:39
186,593,754
1
0
null
2023-04-21T12:19:59
2019-05-14T09:49:56
JavaScript
UTF-8
Python
false
false
559
py
from django.contrib import admin from .models import Profile, Follow class FollowInline(admin.TabularInline): model = Follow fk_name = 'from_user' @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): list_display = ['id', 'nickname', 'user'] list_display_links = ['nickname', 'user'] search_fields = ['nickname'] inlines = [FollowInline] @admin.register(Follow) class FollowAdmin(admin.ModelAdmin): list_display = ['from_user', 'to_user', 'created_at'] list_display_links = ['from_user', 'to_user', 'created_at']
4c2b2d67d7dc4ec40f448d6cdbeaca6b5577c01c
ef5f369a8fb3978dbb57cdab2c0f83880fa43c36
/amatino/ledger_order.py
048e9ff9e8eb4cb348903040d2c78b4fc6279022
[ "MIT" ]
permissive
pypi-buildability-project/amatino-python
c8a93c849d9e97ea907d411511a0c732ee51b29e
9178e0883b735f882729c19a7a68df68b49e057b
refs/heads/master
2022-07-19T12:24:06.587840
2020-05-21T05:28:08
2020-05-21T05:28:08
null
0
0
null
null
null
null
UTF-8
Python
false
false
180
py
""" Amatino API Python Bindings Ledger Order Module Author: [email protected] """ from enum import Enum class LedgerOrder(Enum): OLDEST_FIRST = True YOUNGEST_FIRST = False
31192339ea0302d32f15714aa4d5d108ec2ff4b1
443416bab5d7c258936dae678feb27de6c537758
/kratos/python_scripts/application_generator/classes/variableCreator.py
27d35d56986e388d58e7fe4caacfbe7e247208c9
[ "BSD-3-Clause" ]
permissive
pyfsi/Kratos
b941e12594ec487eafcd5377b869c6b6a44681f4
726aa15a04d92c958ba10c8941ce074716115ee8
refs/heads/master
2020-04-27T17:10:10.357084
2019-11-22T09:05:35
2019-11-22T09:05:35
174,507,074
2
0
NOASSERTION
2020-03-27T16:38:28
2019-03-08T09:22:47
C++
UTF-8
Python
false
false
2,293
py
from __future__ import print_function, absolute_import, division from utils.constants import ctab class VariableCreator(object): def __init__(self, name, vtype, is3D=False): ''' Creates a variable for an application Input ----- - name: string name of the variable - vtype: string type of the variable - is3D:: boolean determines if the variable is vectorial(True) or scalar(False, default) NOTE: This will be could be replaced by VariableCreator3D at some point. ''' self.defineString = 'KRATOS_DEFINE_VARIABLE( {type}, {name} )\n'.format(type=vtype, name=name) self.createString = 'KRATOS_CREATE_VARIABLE( {type}, {name} )\n'.format(type=vtype, name=name) self.registerString = ctab + 'KRATOS_REGISTER_VARIABLE( {name} )\n'.format(name=name) self.registerPythonString = ctab + 'KRATOS_REGISTER_IN_PYTHON_VARIABLE(m, {name} )\n'.format(name=name) # String changes if is a 3D variable if is3D: self.defineString = 'KRATOS_DEFINE_3D_VARIABLE_WITH_COMPONENTS( {name} )\n'.format(name=name) self.createString = 'KRATOS_CREATE_3D_VARIABLE_WITH_COMPONENTS( {name} )\n'.format(name=name) self.registerString = ctab + 'KRATOS_REGISTER_3D_VARIABLE_WITH_COMPONENTS( {name} )\n'.format(name=name) self.registerPythonString = ctab + 'KRATOS_REGISTER_IN_PYTHON_3D_VARIABLE_WITH_COMPONENTS(m, {name} )\n'.format(name=name) class VariableCreator3D(object): def __init__(self, name): ''' Creates a 3D variable for an application. All 3D variables are "double" by definition Input ----- - name: string name of the variable ''' self.defineString = 'KRATOS_DEFINE_3D_VARIABLE_WITH_COMPONENTS( {name} )\n'.format(name=name) self.createString = 'KRATOS_CREATE_3D_VARIABLE_WITH_COMPONENTS( {name} )\n'.format(name=name) self.registerString = ctab + 'KRATOS_REGISTER_3D_VARIABLE_WITH_COMPONENTS( {name} )\n'.format(name=name) self.registerPythonString = ctab + 'KRATOS_REGISTER_IN_PYTHON_3D_VARIABLE_WITH_COMPONENTS( {name} )\n'.format(name=name)
a5c0765f4077d63debef0b2e1bd84bb5f445cc20
851e327e5e75392aa755f3d699b474846b886623
/qa/rpc-tests/smartfees.py
f453a2a7c1f68b8c3ebceb91cfdbdb5c58314971
[ "MIT" ]
permissive
advantage-development/v4
140535cfb56e789459078de030c7455ef3228e8f
59245b326a2b1e488be77816dad9c32166465c73
refs/heads/master
2021-10-09T15:32:34.396521
2018-12-30T16:25:43
2018-12-30T16:25:43
163,597,095
0
0
null
null
null
null
UTF-8
Python
false
false
4,309
py
#!/usr/bin/env python2 # Copyright (c) 2014 The Bitcredit Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test fee estimation code # from test_framework import BitcreditTestFramework from bitcreditrpc.authproxy import AuthServiceProxy, JSONRPCException from util import * class EstimateFeeTest(BitcreditTestFramework): def setup_network(self): self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-debug=mempool", "-debug=estimatefee", "-relaypriority=0"])) # Node1 mines small-but-not-tiny blocks, and allows free transactions. # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes, # so blockmaxsize of 2,000 is really just 1,000 bytes (room enough for # 6 or 7 transactions) self.nodes.append(start_node(1, self.options.tmpdir, ["-blockprioritysize=1500", "-blockmaxsize=2000", "-debug=mempool", "-debug=estimatefee", "-relaypriority=0"])) connect_nodes(self.nodes[1], 0) # Node2 is a stingy miner, that # produces very small blocks (room for only 3 or so transactions) node2args = [ "-blockprioritysize=0", "-blockmaxsize=1500", "-debug=mempool", "-debug=estimatefee", "-relaypriority=0"] self.nodes.append(start_node(2, self.options.tmpdir, node2args)) connect_nodes(self.nodes[2], 0) self.is_network_split = False self.sync_all() def run_test(self): # Prime the memory pool with pairs of transactions # (high-priority, random fee and zero-priority, random fee) min_fee = Decimal("0.001") fees_per_kb = []; for i in range(12): (txid, txhex, fee) = random_zeropri_transaction(self.nodes, Decimal("1.1"), min_fee, min_fee, 20) tx_kbytes = (len(txhex)/2)/1000.0 fees_per_kb.append(float(fee)/tx_kbytes) # Mine blocks with node2 until the memory pool clears: count_start = self.nodes[2].getblockcount() while len(self.nodes[2].getrawmempool()) > 0: self.nodes[2].setgenerate(True, 1) self.sync_all() all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ] print("Fee estimates, super-stingy miner: "+str([str(e) for e in all_estimates])) # Estimates should be within the bounds of what transactions fees actually were: delta = 1.0e-6 # account for rounding error for e in filter(lambda x: x >= 0, all_estimates): if float(e)+delta < min(fees_per_kb) or float(e)-delta > max(fees_per_kb): raise AssertionError("Estimated fee (%f) out of range (%f,%f)"%(float(e), min_fee_kb, max_fee_kb)) # Generate transactions while mining 30 more blocks, this time with node1: for i in range(30): for j in range(random.randrange(6-4,6+4)): (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), Decimal("0.0"), min_fee, 20) tx_kbytes = (len(txhex)/2)/1000.0 fees_per_kb.append(float(fee)/tx_kbytes) self.nodes[1].setgenerate(True, 1) self.sync_all() all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ] print("Fee estimates, more generous miner: "+str([ str(e) for e in all_estimates])) for e in filter(lambda x: x >= 0, all_estimates): if float(e)+delta < min(fees_per_kb) or float(e)-delta > max(fees_per_kb): raise AssertionError("Estimated fee (%f) out of range (%f,%f)"%(float(e), min_fee_kb, max_fee_kb)) # Finish by mining a normal-sized block: while len(self.nodes[0].getrawmempool()) > 0: self.nodes[0].setgenerate(True, 1) self.sync_all() final_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ] print("Final fee estimates: "+str([ str(e) for e in final_estimates])) if __name__ == '__main__': EstimateFeeTest().main()
618e2b66a661669d15aebbce7f698055d592a0ef
c049d678830eb37879589a866b39f8e72186a742
/upcfcardsearch/c301.py
0a57ec0a5d0c8b1861615d90c6339ee5cee31ad4
[ "MIT" ]
permissive
ProfessorSean/Kasutamaiza
682bec415397ba90e30ab1c31caa6b2e76f1df68
7a69a69258f67bbb88bebbac6da4e6e1434947e6
refs/heads/main
2023-07-28T06:54:44.797222
2021-09-08T22:22:44
2021-09-08T22:22:44
357,771,466
0
0
null
null
null
null
UTF-8
Python
false
false
1,017
py
import discord from discord.ext import commands from discord.utils import get class c301(commands.Cog, name="c301"): def __init__(self, bot: commands.Bot): self.bot = bot @commands.command(name='Mage\'s_Magic', aliases=['c301']) async def example_embed(self, ctx): embed = discord.Embed(title='Mage\'s Magic', color=0x1D9E74) embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2361242.jpg') embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True) embed.add_field(name='Type', value='Spell/Quick-Play', inline=False) embed.add_field(name='Card Effect', value='Target 1 Set Spell/Trap you control; banish that target, then banish 2 Spell/Traps on the field. You cannot activate the targeted card this Chain.', inline=False) embed.set_footer(text='Set Code: ANCF') await ctx.send(embed=embed) def setup(bot: commands.Bot): bot.add_cog(c301(bot))
387008f8bd1d7b7e2da36f90cdf6c06072c8b63d
76cef2e2909ffaa6f6b594a8fd1aaaa9b754a69b
/netmiko/scp_handler.py
5e0fb7d0cc0c75aea57b5907f566f8ba35c55ec4
[ "MIT" ]
permissive
jinesh-patel/netmiko
978b7747ea2dea3a8b05208313ffc74846d9c2fc
f19b51f9de783a06102d74ef9780ca8547eb2f89
refs/heads/master
2021-01-18T05:16:16.285330
2015-09-04T01:16:34
2015-09-04T01:16:34
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,416
py
''' Create a SCP side-channel to transfer a file to remote network device. SCP requires a separate SSH connection. Currently only supports Cisco IOS. ''' from __future__ import print_function from __future__ import unicode_literals import re import os import hashlib import paramiko import scp class SCPConn(object): ''' Establish an SCP channel to the remote network ''' def __init__(self, ssh_conn): self.ssh_ctl_chan = ssh_conn self.establish_scp_conn() def establish_scp_conn(self): ''' Establish the SCP connection ''' self.scp_conn = paramiko.SSHClient() self.scp_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.scp_conn.connect(hostname=self.ssh_ctl_chan.ip, port=self.ssh_ctl_chan.port, username=self.ssh_ctl_chan.username, password=self.ssh_ctl_chan.password, look_for_keys=False, allow_agent=False, timeout=8) self.scp_client = scp.SCPClient(self.scp_conn.get_transport()) def scp_transfer_file(self, source_file, dest_file): ''' Transfer file using SCP Must close the SCP connection to get the file to write to the remote filesystem ''' self.scp_client.put(source_file, dest_file) def close(self): ''' Close the SCP connection ''' self.scp_conn.close() class FileTransfer(object): ''' Class to manage SCP file transfer and associated SSH control channel ''' def __init__(self, ssh_conn, source_file, dest_file, file_system="flash:"): ''' Establish a SCP connection to the remote network device ''' self.ssh_ctl_chan = ssh_conn self.source_file = source_file self.source_md5 = self.file_md5(source_file) self.dest_file = dest_file self.file_system = file_system src_file_stats = os.stat(source_file) self.file_size = src_file_stats.st_size def __enter__(self): '''Context manager setup''' self.establish_scp_conn() return self def __exit__(self, exc_type, exc_value, traceback): '''Context manager cleanup''' self.close_scp_chan() if exc_type is not None: raise exc_type(exc_value) def establish_scp_conn(self): '''Establish SCP connection''' self.scp_conn = SCPConn(self.ssh_ctl_chan) def close_scp_chan(self): '''Close the SCP connection to the remote network device''' self.scp_conn.close() self.scp_conn = None def verify_space_available(self, search_pattern=r"(.*) bytes available "): ''' Verify sufficient space is available on remote network device Return a boolean ''' remote_cmd = "show {0}".format(self.file_system) remote_output = self.ssh_ctl_chan.send_command(remote_cmd) match = re.search(search_pattern, remote_output) space_avail = int(match.group(1)) if space_avail > self.file_size: return True return False def check_file_exists(self, remote_cmd=""): ''' Check if the dest_file exists on the remote file system Return a boolean ''' if not remote_cmd: remote_cmd = "dir flash:/{0}".format(self.dest_file) remote_out = self.ssh_ctl_chan.send_command(remote_cmd) search_string = r"Directory of .*{0}".format(self.dest_file) if 'Error opening' in remote_out: return False elif re.search(search_string, remote_out): return True else: raise ValueError("Unexpected output from check_file_exists") @staticmethod def file_md5(file_name): ''' Compute MD5 hash of file ''' with open(file_name, "rb") as f: file_contents = f.read() file_hash = hashlib.md5(file_contents).hexdigest() return file_hash @staticmethod def process_md5(md5_output, pattern=r"= (.*)"): ''' Process the string to retrieve the MD5 hash Output from Cisco IOS: .MD5 of flash:file_name Done! verify /md5 (flash:file_name) = 410db2a7015eaa42b1fe71f1bf3d59a2 ''' match = re.search(pattern, md5_output) if match: return match.group(1) else: raise ValueError("Invalid output from MD5 command: {0}".format(md5_output)) def compare_md5(self, base_cmd='verify /md5', delay_factor=8): ''' Calculate remote MD5 and compare to source MD5 Default command is Cisco specific This command can be CPU intensive on the remote device Return boolean ''' remote_md5_cmd = "{0} {1}{2}".format(base_cmd, self.file_system, self.dest_file) dest_md5 = self.ssh_ctl_chan.send_command(remote_md5_cmd, delay_factor=delay_factor) dest_md5 = self.process_md5(dest_md5) if self.source_md5 != dest_md5: return False else: return True def transfer_file(self): ''' SCP transfer source_file to Cisco IOS device Verifies MD5 of file on remote device or generates an exception ''' self.scp_conn.scp_transfer_file(self.source_file, self.dest_file) # Must close the SCP connection to get the file written to the remote filesystem (flush) self.scp_conn.close() def verify_file(self): ''' Verify the file has been transferred correctly ''' return self.compare_md5() def enable_scp(self, cmd=None): ''' Enable SCP on remote device. Defaults to Cisco IOS command ''' if cmd is None: cmd = ['ip scp server enable'] elif not hasattr(cmd, '__iter__'): cmd = [cmd] self.ssh_ctl_chan.send_config_set(cmd) def disable_scp(self, cmd=None): ''' Disable SCP on remote device. Defaults to Cisco IOS command ''' if cmd is None: cmd = ['no ip scp server enable'] elif not hasattr(cmd, '__iter__'): cmd = [cmd] self.ssh_ctl_chan.send_config_set(cmd)
f10d440a207cdc966fcc27e8e8a60b4fe07de07f
ee6ec35a80351480d566b5c65ae331a0f6f577ee
/models/tempdb_tdc.py
ec464c80db58a3e4cf9f2425672a584466aae249
[]
no_license
aroodooteam/connecteur_aro_tempdb
d262103a816156e73a4c30fd86ad6175f449a8b3
f29629a6b380487e6a38cb98344b446ecb49adf5
refs/heads/master
2020-03-12T14:05:21.855008
2018-04-23T08:28:01
2018-04-23T08:28:01
130,658,701
0
0
null
null
null
null
UTF-8
Python
false
false
447
py
# -*- coding: utf-8 -*- from openerp import models, fields class TempdbTdc(models.Model): _name = 'tempdb.tdc' _description = 'Load tempdb_tdc in recup nom apporteur' name = fields.Char(string='Name') statut = fields.Char(string='Statut') agence = fields.Char(string='Agence', size=4) old = fields.Char(string='Old', size=8) new = fields.Char(string='New', size=16) titre = fields.Char(string='Titre', size=16)
7a7d02691a84fce6559b63583b7262b88c04daa9
ad59fb12042bfd3f5c43eca057d0f747f9e148cf
/Se2iP/usr/lib/enigma2/python/Plugins/Extensions/IPTVPlayer/tsiplayer/addons/resources/hosters/dustreaming.py
07a22d09c704cc118d14249ac2bf9d521d16dc1e
[]
no_license
lexlong2007/eePlugins
d62b787100a7069ad5713a47c5688008063b45ec
167b262fe36901a2d3a2fae6d0f85e2307b3eff7
refs/heads/master
2022-03-09T05:37:37.567937
2022-02-27T01:44:25
2022-02-27T01:44:25
253,012,126
0
0
null
2020-04-04T14:03:29
2020-04-04T14:03:29
null
UTF-8
Python
false
false
2,140
py
#-*- coding: utf-8 -*- #Vstream https://github.com/Kodi-vStream/venom-xbmc-addons from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.requestHandler import cRequestHandler from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.hosters.hoster import iHoster from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.comaddon import dialog import json class cHoster(iHoster): def __init__(self): self.__sDisplayName = 'Dustreaming' self.__sFileName = self.__sDisplayName self.__sHD = '' def getDisplayName(self): return self.__sDisplayName def setDisplayName(self, sDisplayName): self.__sDisplayName = sDisplayName + ' [COLOR skyblue]' + self.__sDisplayName + '[/COLOR]' def setFileName(self, sFileName): self.__sFileName = sFileName def getFileName(self): return self.__sFileName def getPluginIdentifier(self): return 'dustreaming' def setHD(self, sHD): self.__sHD = '' def getHD(self): return self.__sHD def isDownloadable(self): return True def setUrl(self, sUrl): self.__sUrl = str(sUrl) def getMediaLink(self): return self.__getMediaLinkForGuest() def __getMediaLinkForGuest(self): api_call = '' sUrl = self.__sUrl.replace('/v/', '/api/source/') oRequest = cRequestHandler(sUrl) oRequest.setRequestType(cRequestHandler.REQUEST_TYPE_POST) oRequest.addHeaderEntry('Referer', self.__sUrl) oRequest.addParameters('r', '') oRequest.addParameters('d', 'dustreaming.fr') sHtmlContent = oRequest.request() page = json.loads(sHtmlContent) if page: url = [] qua = [] for x in page['data']: url.append(x['file']) qua.append(x['label']) if (url): api_call = dialog().VSselectqual(qua, url) if (api_call): return True, api_call return False, False
25daeb07963fe92517947f4aa8197cc45303103d
68e65df90da9169733025dfede0a8b30a5e3d7e3
/Inheritance_and_More_on_OOPS/11_practice_test2.py
a5a4b0e8cd0c57a3f7980babe409762b8328ee98
[]
no_license
shubam-garg/Python-Beginner
290346cbb309a28d28d6ac04034cb084b71ccbc6
30742006c380a0a18aff574567a95c8b8c694754
refs/heads/main
2023-05-06T07:11:29.943475
2021-05-29T20:35:59
2021-05-29T20:35:59
354,527,532
0
0
null
null
null
null
UTF-8
Python
false
false
299
py
''' create a class pets from class animals and further create class dog from pets, add a method bark to class dog ''' class animals: animaltype="Mammal" class pets(animals): petcolor="Black" class dog(pets): @staticmethod def bark(): print("dog") d=dog() d.bark()
ca6db4381b477d224e0c52a8e60201f6444ddf5d
1d0a4750e216f301ec49a247bf7bf07cd61fa29f
/app/views/reports/integration/advantage_payroll/advantage_payroll_client_setup_csv.py
fcfcf75d4a02f73167a6dcc346bf96726af2ace1
[]
no_license
smoothbenefits/BenefitMY_Python
52745a11db2cc9ab394c8de7954974e6d5a05e13
b7e8474a728bc22778fd24fe88d1918945a8cfc8
refs/heads/master
2021-03-27T15:57:34.798289
2018-04-29T19:04:04
2018-04-29T19:04:04
24,351,568
0
1
null
null
null
null
UTF-8
Python
false
false
751
py
from rest_framework.response import Response from django.http import HttpResponse from django.http import Http404 from app.views.reports.report_export_view_base import ReportExportViewBase from app.service.Report.integration.advantage_payroll.advantage_payroll_company_setup_csv_service \ import AdvantagePayrollCompanySetupCsvService class AdvantagePayrollClientSetupCsvView(ReportExportViewBase): def get(self, request, company_id, format=None): csv_service = AdvantagePayrollCompanySetupCsvService() response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=client_setup.csv' csv_service.get_report(company_id, response) return response
79b5630a7028e9684b024691493af2f536e75980
159d4ae61f4ca91d94e29e769697ff46d11ae4a4
/venv/lib/python3.9/site-packages/pandas/tests/libs/test_hashtable.py
08bfc74e0ef8dcfccfd2496ccc91cde128205b85
[ "MIT" ]
permissive
davidycliao/bisCrawler
729db002afe10ae405306b9eed45b782e68eace8
f42281f35b866b52e5860b6a062790ae8147a4a4
refs/heads/main
2023-05-24T00:41:50.224279
2023-01-22T23:17:51
2023-01-22T23:17:51
411,470,732
8
0
MIT
2023-02-09T16:28:24
2021-09-28T23:48:13
Python
UTF-8
Python
false
false
18,483
py
from contextlib import contextmanager import tracemalloc import numpy as np import pytest from pandas._libs import hashtable as ht import pandas as pd import pandas._testing as tm from pandas.core.algorithms import isin @contextmanager def activated_tracemalloc(): tracemalloc.start() try: yield finally: tracemalloc.stop() def get_allocated_khash_memory(): snapshot = tracemalloc.take_snapshot() snapshot = snapshot.filter_traces( (tracemalloc.DomainFilter(True, ht.get_hashtable_trace_domain()),) ) return sum(map(lambda x: x.size, snapshot.traces)) @pytest.mark.parametrize( "table_type, dtype", [ (ht.PyObjectHashTable, np.object_), (ht.Complex128HashTable, np.complex128), (ht.Int64HashTable, np.int64), (ht.UInt64HashTable, np.uint64), (ht.Float64HashTable, np.float64), (ht.Complex64HashTable, np.complex64), (ht.Int32HashTable, np.int32), (ht.UInt32HashTable, np.uint32), (ht.Float32HashTable, np.float32), (ht.Int16HashTable, np.int16), (ht.UInt16HashTable, np.uint16), (ht.Int8HashTable, np.int8), (ht.UInt8HashTable, np.uint8), ], ) class TestHashTable: def test_get_set_contains_len(self, table_type, dtype): index = 5 table = table_type(55) assert len(table) == 0 assert index not in table table.set_item(index, 42) assert len(table) == 1 assert index in table assert table.get_item(index) == 42 table.set_item(index + 1, 41) assert index in table assert index + 1 in table assert len(table) == 2 assert table.get_item(index) == 42 assert table.get_item(index + 1) == 41 table.set_item(index, 21) assert index in table assert index + 1 in table assert len(table) == 2 assert table.get_item(index) == 21 assert table.get_item(index + 1) == 41 assert index + 2 not in table with pytest.raises(KeyError, match=str(index + 2)): table.get_item(index + 2) def test_map(self, table_type, dtype, writable): # PyObjectHashTable has no map-method if table_type != ht.PyObjectHashTable: N = 77 table = table_type() keys = np.arange(N).astype(dtype) vals = np.arange(N).astype(np.int64) + N keys.flags.writeable = writable vals.flags.writeable = writable table.map(keys, vals) for i in range(N): assert table.get_item(keys[i]) == i + N def test_map_locations(self, table_type, dtype, writable): N = 8 table = table_type() keys = (np.arange(N) + N).astype(dtype) keys.flags.writeable = writable table.map_locations(keys) for i in range(N): assert table.get_item(keys[i]) == i def test_lookup(self, table_type, dtype, writable): N = 3 table = table_type() keys = (np.arange(N) + N).astype(dtype) keys.flags.writeable = writable table.map_locations(keys) result = table.lookup(keys) expected = np.arange(N) tm.assert_numpy_array_equal(result.astype(np.int64), expected.astype(np.int64)) def test_lookup_wrong(self, table_type, dtype): if dtype in (np.int8, np.uint8): N = 100 else: N = 512 table = table_type() keys = (np.arange(N) + N).astype(dtype) table.map_locations(keys) wrong_keys = np.arange(N).astype(dtype) result = table.lookup(wrong_keys) assert np.all(result == -1) def test_unique(self, table_type, dtype, writable): if dtype in (np.int8, np.uint8): N = 88 else: N = 1000 table = table_type() expected = (np.arange(N) + N).astype(dtype) keys = np.repeat(expected, 5) keys.flags.writeable = writable unique = table.unique(keys) tm.assert_numpy_array_equal(unique, expected) def test_tracemalloc_works(self, table_type, dtype): if dtype in (np.int8, np.uint8): N = 256 else: N = 30000 keys = np.arange(N).astype(dtype) with activated_tracemalloc(): table = table_type() table.map_locations(keys) used = get_allocated_khash_memory() my_size = table.sizeof() assert used == my_size del table assert get_allocated_khash_memory() == 0 def test_tracemalloc_for_empty(self, table_type, dtype): with activated_tracemalloc(): table = table_type() used = get_allocated_khash_memory() my_size = table.sizeof() assert used == my_size del table assert get_allocated_khash_memory() == 0 def test_get_state(self, table_type, dtype): table = table_type(1000) state = table.get_state() assert state["size"] == 0 assert state["n_occupied"] == 0 assert "n_buckets" in state assert "upper_bound" in state def test_no_reallocation(self, table_type, dtype): for N in range(1, 110): keys = np.arange(N).astype(dtype) preallocated_table = table_type(N) n_buckets_start = preallocated_table.get_state()["n_buckets"] preallocated_table.map_locations(keys) n_buckets_end = preallocated_table.get_state()["n_buckets"] # original number of buckets was enough: assert n_buckets_start == n_buckets_end # check with clean table (not too much preallocated) clean_table = table_type() clean_table.map_locations(keys) assert n_buckets_start == clean_table.get_state()["n_buckets"] class TestPyObjectHashTableWithNans: def test_nan_float(self): nan1 = float("nan") nan2 = float("nan") assert nan1 is not nan2 table = ht.PyObjectHashTable() table.set_item(nan1, 42) assert table.get_item(nan2) == 42 def test_nan_complex_both(self): nan1 = complex(float("nan"), float("nan")) nan2 = complex(float("nan"), float("nan")) assert nan1 is not nan2 table = ht.PyObjectHashTable() table.set_item(nan1, 42) assert table.get_item(nan2) == 42 def test_nan_complex_real(self): nan1 = complex(float("nan"), 1) nan2 = complex(float("nan"), 1) other = complex(float("nan"), 2) assert nan1 is not nan2 table = ht.PyObjectHashTable() table.set_item(nan1, 42) assert table.get_item(nan2) == 42 with pytest.raises(KeyError, match=None) as error: table.get_item(other) assert str(error.value) == str(other) def test_nan_complex_imag(self): nan1 = complex(1, float("nan")) nan2 = complex(1, float("nan")) other = complex(2, float("nan")) assert nan1 is not nan2 table = ht.PyObjectHashTable() table.set_item(nan1, 42) assert table.get_item(nan2) == 42 with pytest.raises(KeyError, match=None) as error: table.get_item(other) assert str(error.value) == str(other) def test_nan_in_tuple(self): nan1 = (float("nan"),) nan2 = (float("nan"),) assert nan1[0] is not nan2[0] table = ht.PyObjectHashTable() table.set_item(nan1, 42) assert table.get_item(nan2) == 42 def test_nan_in_nested_tuple(self): nan1 = (1, (2, (float("nan"),))) nan2 = (1, (2, (float("nan"),))) other = (1, 2) table = ht.PyObjectHashTable() table.set_item(nan1, 42) assert table.get_item(nan2) == 42 with pytest.raises(KeyError, match=None) as error: table.get_item(other) assert str(error.value) == str(other) def test_hash_equal_tuple_with_nans(): a = (float("nan"), (float("nan"), float("nan"))) b = (float("nan"), (float("nan"), float("nan"))) assert ht.object_hash(a) == ht.object_hash(b) assert ht.objects_are_equal(a, b) def test_get_labels_groupby_for_Int64(writable): table = ht.Int64HashTable() vals = np.array([1, 2, -1, 2, 1, -1], dtype=np.int64) vals.flags.writeable = writable arr, unique = table.get_labels_groupby(vals) expected_arr = np.array([0, 1, -1, 1, 0, -1], dtype=np.int64) expected_unique = np.array([1, 2], dtype=np.int64) tm.assert_numpy_array_equal(arr.astype(np.int64), expected_arr) tm.assert_numpy_array_equal(unique, expected_unique) def test_tracemalloc_works_for_StringHashTable(): N = 1000 keys = np.arange(N).astype(np.compat.unicode).astype(np.object_) with activated_tracemalloc(): table = ht.StringHashTable() table.map_locations(keys) used = get_allocated_khash_memory() my_size = table.sizeof() assert used == my_size del table assert get_allocated_khash_memory() == 0 def test_tracemalloc_for_empty_StringHashTable(): with activated_tracemalloc(): table = ht.StringHashTable() used = get_allocated_khash_memory() my_size = table.sizeof() assert used == my_size del table assert get_allocated_khash_memory() == 0 def test_no_reallocation_StringHashTable(): for N in range(1, 110): keys = np.arange(N).astype(np.compat.unicode).astype(np.object_) preallocated_table = ht.StringHashTable(N) n_buckets_start = preallocated_table.get_state()["n_buckets"] preallocated_table.map_locations(keys) n_buckets_end = preallocated_table.get_state()["n_buckets"] # original number of buckets was enough: assert n_buckets_start == n_buckets_end # check with clean table (not too much preallocated) clean_table = ht.StringHashTable() clean_table.map_locations(keys) assert n_buckets_start == clean_table.get_state()["n_buckets"] @pytest.mark.parametrize( "table_type, dtype", [ (ht.Float64HashTable, np.float64), (ht.Float32HashTable, np.float32), (ht.Complex128HashTable, np.complex128), (ht.Complex64HashTable, np.complex64), ], ) class TestHashTableWithNans: def test_get_set_contains_len(self, table_type, dtype): index = float("nan") table = table_type() assert index not in table table.set_item(index, 42) assert len(table) == 1 assert index in table assert table.get_item(index) == 42 table.set_item(index, 41) assert len(table) == 1 assert index in table assert table.get_item(index) == 41 def test_map(self, table_type, dtype): N = 332 table = table_type() keys = np.full(N, np.nan, dtype=dtype) vals = (np.arange(N) + N).astype(np.int64) table.map(keys, vals) assert len(table) == 1 assert table.get_item(np.nan) == 2 * N - 1 def test_map_locations(self, table_type, dtype): N = 10 table = table_type() keys = np.full(N, np.nan, dtype=dtype) table.map_locations(keys) assert len(table) == 1 assert table.get_item(np.nan) == N - 1 def test_unique(self, table_type, dtype): N = 1020 table = table_type() keys = np.full(N, np.nan, dtype=dtype) unique = table.unique(keys) assert np.all(np.isnan(unique)) and len(unique) == 1 def test_unique_for_nan_objects_floats(): table = ht.PyObjectHashTable() keys = np.array([float("nan") for i in range(50)], dtype=np.object_) unique = table.unique(keys) assert len(unique) == 1 def test_unique_for_nan_objects_complex(): table = ht.PyObjectHashTable() keys = np.array([complex(float("nan"), 1.0) for i in range(50)], dtype=np.object_) unique = table.unique(keys) assert len(unique) == 1 def test_unique_for_nan_objects_tuple(): table = ht.PyObjectHashTable() keys = np.array( [1] + [(1.0, (float("nan"), 1.0)) for i in range(50)], dtype=np.object_ ) unique = table.unique(keys) assert len(unique) == 2 def get_ht_function(fun_name, type_suffix): return getattr(ht, fun_name) @pytest.mark.parametrize( "dtype, type_suffix", [ (np.object_, "object"), (np.complex128, "complex128"), (np.int64, "int64"), (np.uint64, "uint64"), (np.float64, "float64"), (np.complex64, "complex64"), (np.int32, "int32"), (np.uint32, "uint32"), (np.float32, "float32"), (np.int16, "int16"), (np.uint16, "uint16"), (np.int8, "int8"), (np.uint8, "uint8"), ], ) class TestHelpFunctions: def test_value_count(self, dtype, type_suffix, writable): N = 43 value_count = get_ht_function("value_count", type_suffix) expected = (np.arange(N) + N).astype(dtype) values = np.repeat(expected, 5) values.flags.writeable = writable keys, counts = value_count(values, False) tm.assert_numpy_array_equal(np.sort(keys), expected) assert np.all(counts == 5) def test_value_count_stable(self, dtype, type_suffix, writable): # GH12679 value_count = get_ht_function("value_count", type_suffix) values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype) values.flags.writeable = writable keys, counts = value_count(values, False) tm.assert_numpy_array_equal(keys, values) assert np.all(counts == 1) def test_duplicated_first(self, dtype, type_suffix, writable): N = 100 duplicated = get_ht_function("duplicated", type_suffix) values = np.repeat(np.arange(N).astype(dtype), 5) values.flags.writeable = writable result = duplicated(values) expected = np.ones_like(values, dtype=np.bool_) expected[::5] = False tm.assert_numpy_array_equal(result, expected) def test_ismember_yes(self, dtype, type_suffix, writable): N = 127 ismember = get_ht_function("ismember", type_suffix) arr = np.arange(N).astype(dtype) values = np.arange(N).astype(dtype) arr.flags.writeable = writable values.flags.writeable = writable result = ismember(arr, values) expected = np.ones_like(values, dtype=np.bool_) tm.assert_numpy_array_equal(result, expected) def test_ismember_no(self, dtype, type_suffix): N = 17 ismember = get_ht_function("ismember", type_suffix) arr = np.arange(N).astype(dtype) values = (np.arange(N) + N).astype(dtype) result = ismember(arr, values) expected = np.zeros_like(values, dtype=np.bool_) tm.assert_numpy_array_equal(result, expected) def test_mode(self, dtype, type_suffix, writable): if dtype in (np.int8, np.uint8): N = 53 else: N = 11111 mode = get_ht_function("mode", type_suffix) values = np.repeat(np.arange(N).astype(dtype), 5) values[0] = 42 values.flags.writeable = writable result = mode(values, False) assert result == 42 def test_mode_stable(self, dtype, type_suffix, writable): mode = get_ht_function("mode", type_suffix) values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype) values.flags.writeable = writable keys = mode(values, False) tm.assert_numpy_array_equal(keys, values) def test_modes_with_nans(): # GH39007 values = np.array([True, pd.NA, np.nan], dtype=np.object_) # pd.Na and np.nan will have the same representative: np.nan # thus we have 2 nans and 1 True modes = ht.mode(values, False) assert modes.size == 1 assert np.isnan(modes[0]) @pytest.mark.parametrize( "dtype, type_suffix", [ (np.float64, "float64"), (np.float32, "float32"), (np.complex128, "complex128"), (np.complex64, "complex64"), ], ) class TestHelpFunctionsWithNans: def test_value_count(self, dtype, type_suffix): value_count = get_ht_function("value_count", type_suffix) values = np.array([np.nan, np.nan, np.nan], dtype=dtype) keys, counts = value_count(values, True) assert len(keys) == 0 keys, counts = value_count(values, False) assert len(keys) == 1 and np.all(np.isnan(keys)) assert counts[0] == 3 def test_duplicated_first(self, dtype, type_suffix): duplicated = get_ht_function("duplicated", type_suffix) values = np.array([np.nan, np.nan, np.nan], dtype=dtype) result = duplicated(values) expected = np.array([False, True, True]) tm.assert_numpy_array_equal(result, expected) def test_ismember_yes(self, dtype, type_suffix): ismember = get_ht_function("ismember", type_suffix) arr = np.array([np.nan, np.nan, np.nan], dtype=dtype) values = np.array([np.nan, np.nan], dtype=dtype) result = ismember(arr, values) expected = np.array([True, True, True], dtype=np.bool_) tm.assert_numpy_array_equal(result, expected) def test_ismember_no(self, dtype, type_suffix): ismember = get_ht_function("ismember", type_suffix) arr = np.array([np.nan, np.nan, np.nan], dtype=dtype) values = np.array([1], dtype=dtype) result = ismember(arr, values) expected = np.array([False, False, False], dtype=np.bool_) tm.assert_numpy_array_equal(result, expected) def test_mode(self, dtype, type_suffix): mode = get_ht_function("mode", type_suffix) values = np.array([42, np.nan, np.nan, np.nan], dtype=dtype) assert mode(values, True) == 42 assert np.isnan(mode(values, False)) def test_ismember_tuple_with_nans(): # GH-41836 values = [("a", float("nan")), ("b", 1)] comps = [("a", float("nan"))] result = isin(values, comps) expected = np.array([True, False], dtype=np.bool_) tm.assert_numpy_array_equal(result, expected) def test_float_complex_int_are_equal_as_objects(): values = ["a", 5, 5.0, 5.0 + 0j] comps = list(range(129)) result = isin(values, comps) expected = np.array([False, True, True, True], dtype=np.bool_) tm.assert_numpy_array_equal(result, expected)
ab58404bbcdbd307415fd151a7e798392d35f0f6
25872e1ba4f86cbbf77d0130f341b21e5dd9e692
/GameOfLife.py
00600969a68b00f6a35c13b6dc1b798ddd428888
[]
no_license
zongxinwu92/leetcode
dc3d209e14532b9b01cfce6d4cf6a4c2d7ced7de
e1aa45a1ee4edaf72447b771ada835ad73e7f508
refs/heads/master
2021-06-10T21:46:23.937268
2017-01-09T09:58:49
2017-01-09T09:58:49
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,468
py
''' Created on 1.12.2017 @author: Jesse '''''' According to the Wikipedia s article: "The Game of Life, also known simply as Life, is a cellular automaton devised by the British mathematician John Horton Conway in 1970." Given a board with m by n cells, each cell has an initial state live (1) or dead (0). Each cell interacts with its eight neighbors (horizontal, vertical, diagonal) using the following four rules (taken from the above Wikipedia article): Any live cell with fewer than two live neighbors dies, as if caused by under-population. Any live cell with two or three live neighbors lives on to the next generation. Any live cell with more than three live neighbors dies, as if by over-population.. Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction. Write a function to compute the next state (after one update) of the board given its current state. Follow up: Could you solve it in-place? Remember that the board needs to be updated at the same time: You cannot update some cells first and then use their updated values to update other cells. In this question, we represent the board using a 2D array. In principle, the board is infinite, which would cause problems when the active area encroaches the border of the array. How would you address these problems? Credits:Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases." '''