blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d7796aa004c50f361ca9e8a8a0b3f4ef051ceb67 | d9b672fd2096b9e91dda9c82a4ae47d628f023e4 | /settings.py | a0655b84c59756eff3b37f60615c035fa474b8bd | [
"Apache-2.0"
] | permissive | homepods/silver-payu | 3f314be0b1cd2409b80a79ad1519b0b881506d69 | 6d2ad761d5972f4b92928c12dde97959939d2786 | refs/heads/master | 2020-06-22T06:15:20.522288 | 2018-10-01T09:24:13 | 2018-10-01T09:24:13 | 197,654,966 | 1 | 0 | null | 2019-07-18T20:50:09 | 2019-07-18T20:50:09 | null | UTF-8 | Python | false | false | 3,602 | py | # Copyright (c) 2015 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from silver import HOOK_EVENTS as _HOOK_EVENTS
from django.utils.log import DEFAULT_LOGGING as LOGGING
"""
These settings are used by the ``manage.py`` command.
"""
import os
DEBUG = False
SITE_ID = 1
USE_TZ = True
TIME_ZONE = 'UTC'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite',
}
}
EXTERNAL_APPS = [
# Django core apps
# 'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
]
INSTALLED_APPS = EXTERNAL_APPS
ROOT_URLCONF = 'silver.urls'
PROJECT_ROOT = os.path.dirname(__file__)
FIXTURE_DIRS = (
PROJECT_ROOT,
PROJECT_ROOT + '/silver/'
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [
PROJECT_ROOT + '/payment_processors/templates/',
PROJECT_ROOT + '/templates/',
PROJECT_ROOT + '/silver/templates/',
],
'OPTIONS': {
'context_processors': (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages"
)
}
}
]
MEDIA_ROOT = PROJECT_ROOT + '/app_media/'
MEDIA_URL = '/app_media/'
STATIC_ROOT = PROJECT_ROOT + '/app_static/'
STATIC_URL = '/app_static/'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
SECRET_KEY = 'secret'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'silver.api.pagination.LinkHeaderPagination'
}
LOGGING['loggers']['xhtml2pdf'] = {
'level': 'DEBUG',
'handlers': ['console']
}
LOGGING['loggers']['pisa'] = {
'level': 'DEBUG',
'handlers': ['console']
}
LOGGING['loggers']['django'] = {
'level': 'DEBUG',
'handlers': ['console']
}
LOGGING['loggers']['django.security'] = {
'level': 'DEBUG',
'handlers': ['console']
}
LOGGING['formatters'] = {}
LOGGING['formatters']['verbose'] = {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| [
"[email protected]"
] | |
c74db58ad4e08ae2ba7fae4dcad8d34e14650f04 | f719dc32c437a15c0eb7a229adc2848e4646a172 | /billy/api/customer/forms.py | fd102c147046aae78bf8ff40db5b2ac935fa24db | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | grang5/billy | db3a88b650962f25b8bdea80a81c5efa5d80dec0 | a723c3aca18f817829ae088f469fabc5bea9d538 | refs/heads/master | 2021-04-18T19:36:05.586549 | 2014-06-16T21:47:37 | 2014-06-16T21:47:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | from __future__ import unicode_literals
from wtforms import Form
from wtforms import TextField
from wtforms import validators
class CustomerCreateForm(Form):
processor_uri = TextField('URI of customer in processor', [
validators.Optional(),
])
| [
"[email protected]"
] | |
b25403fc53f5f5969f36e94ee28d051866d5f2e6 | 5983ea8a59cd0b9763e0eb0dfc7f26dfd2ba5e60 | /2019102962刘铎/刘铎-2019102962/DQN-First/maze_env.py | 33157255db41910b66f0e97841906122112ce32f | [] | no_license | wanghan79/2020_Master_Python | 0d8bdcff719a4b3917caa76ae318e3f8134fa83a | b3f6f3825b66b93ec6c54ed6187f6c0edcad6010 | refs/heads/master | 2021-01-26T13:29:09.439023 | 2020-06-23T02:17:52 | 2020-06-23T02:17:52 | 243,442,589 | 11 | 6 | null | 2020-03-29T05:59:29 | 2020-02-27T05:55:52 | Python | UTF-8 | Python | false | false | 4,088 | py | """
Reinforcement learning maze example.
Red rectangle: explorer.
Black rectangles: hells [reward = -1].
Yellow bin circle: paradise [reward = +1].
All other states: ground [reward = 0].
This script is the environment part of this example.
The RL is in RL_brain.py.
"""
import numpy as np
import time
import sys
if sys.version_info.major == 2:
import Tkinter as tk
else:
import tkinter as tk
UNIT = 40 # pixels
MAZE_H = 4 # grid height
MAZE_W = 4 # grid width
class Maze(tk.Tk, object):
def __init__(self):
super(Maze, self).__init__()
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.n_features = 2
self.title('maze')
self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT))
self._build_maze()
def _build_maze(self):
self.canvas = tk.Canvas(self, bg='white',
height=MAZE_H * UNIT,
width=MAZE_W * UNIT)
# create grids
for c in range(0, MAZE_W * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, MAZE_H * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r
self.canvas.create_line(x0, y0, x1, y1)
# create origin
origin = np.array([20, 20])
# hell
hell1_center = origin + np.array([UNIT * 2, UNIT])
self.hell1 = self.canvas.create_rectangle(
hell1_center[0] - 15, hell1_center[1] - 15,
hell1_center[0] + 15, hell1_center[1] + 15,
fill='black')
# hell
# hell2_center = origin + np.array([UNIT, UNIT * 2])
# self.hell2 = self.canvas.create_rectangle(
# hell2_center[0] - 15, hell2_center[1] - 15,
# hell2_center[0] + 15, hell2_center[1] + 15,
# fill='black')
# create oval
oval_center = origin + UNIT * 2
self.oval = self.canvas.create_oval(
oval_center[0] - 15, oval_center[1] - 15,
oval_center[0] + 15, oval_center[1] + 15,
fill='yellow')
# create red rect
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# pack all
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.1)
self.canvas.delete(self.rect)
origin = np.array([20, 20])
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# return observation
return (np.array(self.canvas.coords(self.rect)[:2]) - np.array(self.canvas.coords(self.oval)[:2]))/(MAZE_H*UNIT)
def step(self, action):
s = self.canvas.coords(self.rect)
base_action = np.array([0, 0])
if action == 0: # up
if s[1] > UNIT:
base_action[1] -= UNIT
elif action == 1: # down
if s[1] < (MAZE_H - 1) * UNIT:
base_action[1] += UNIT
elif action == 2: # right
if s[0] < (MAZE_W - 1) * UNIT:
base_action[0] += UNIT
elif action == 3: # left
if s[0] > UNIT:
base_action[0] -= UNIT
self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent
next_coords = self.canvas.coords(self.rect) # next state
# reward function
if next_coords == self.canvas.coords(self.oval):
reward = 1
done = True
elif next_coords in [self.canvas.coords(self.hell1)]:
reward = -1
done = True
else:
reward = 0
done = False
s_ = (np.array(next_coords[:2]) - np.array(self.canvas.coords(self.oval)[:2]))/(MAZE_H*UNIT)
return s_, reward, done
def render(self):
# time.sleep(0.01)
self.update() | [
"[email protected]"
] | |
878bd7c77267774dbcacd5ed1f887c9c3b26ddb7 | c5294a8e9a6aa7da37850443d3a5d366ee4b5c35 | /build/spencer_people_tracking/launch/spencer_people_tracking_launch/catkin_generated/pkg.installspace.context.pc.py | 0e8b747bde57ae825f82cfdacbb221758a0acfbb | [] | no_license | scutDavid/ros_gradution_project | 6eab9a5776ae090ae8999d31e840a12a99020c79 | fbbd83ada5aa223809615d55a48e632699afd4b5 | refs/heads/master | 2020-03-07T18:39:24.084619 | 2018-04-25T13:41:04 | 2018-04-25T13:41:04 | 127,647,113 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "spencer_people_tracking_launch"
PROJECT_SPACE_DIR = "/home/wwh/qqq/install"
PROJECT_VERSION = "1.0.8"
| [
"[email protected]"
] | |
cede0e482d3ea3a08d161be9ba8ae74f9c9f82fa | 7dc05dc9ba548cc97ebe96ed1f0dab8dfe8d8b81 | /trunk/pida/utils/pgd/debugsession.py | 6e0c5d6df92cc9f56a44188a64b2d6b59d10bc26 | [] | no_license | BackupTheBerlios/pida-svn | b68da6689fa482a42f5dee93e2bcffb167a83b83 | 739147ed21a23cab23c2bba98f1c54108f8c2516 | refs/heads/master | 2020-05-31T17:28:47.927074 | 2006-05-18T21:42:32 | 2006-05-18T21:42:32 | 40,817,392 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,589 | py | # -*- coding: utf-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
# Copyright (c) 2006 Ali Afshar
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
import sys
from winpdb import rpdb2
from console import Terminal
def get_debugee_script_path():
import pkg_resources
req = pkg_resources.Requirement.parse('pgd')
try:
sfile = pkg_resources.resource_filename(req, 'pgd/winpdb/rpdb2.py')
except pkg_resources.DistributionNotFound:
sfile = os.path.join(
os.path.dirname(__file__),
'winpdb',
'rpdb2.py')
if sfile.endswith('c'):
sfile = sfile[:-1]
return sfile
class SessionManagerInternal(rpdb2.CSessionManagerInternal):
def _spawn_server(self, fchdir, ExpandedFilename, args, rid):
"""
Start an OS console to act as server.
What it does is to start rpdb again in a new console in server only mode.
"""
debugger = get_debugee_script_path()
baseargs = ['python', debugger, '--debugee', '--rid=%s' % rid]
if fchdir:
baseargs.append('--chdir')
if self.m_fAllowUnencrypted:
baseargs.append('--plaintext')
if self.m_fRemote:
baseargs.append('--remote')
if os.name == 'nt':
baseargs.append('--pwd=%s' % self.m_pwd)
if 'PGD_DEBUG' in os.environ:
baseargs.append('--debug')
baseargs.append(ExpandedFilename)
cmdargs = baseargs + args.split()
python_exec = sys.executable
self.terminal.fork_command(python_exec, cmdargs)
class SessionManager(rpdb2.CSessionManager):
def __init__(self, app):
self.app = app
self.options = app.options
self.main_window = app.main_window
self.delegate = self._create_view()
self._CSessionManager__smi = self._create_smi()
def _create_smi(self):
smi = SessionManagerInternal(
self.options.pwd,
self.options.allow_unencrypted,
self.options.remote,
self.options.host)
smi.terminal = self
return smi
def _create_view(self):
view = Terminal(self.app)
self.main_window.attach_slave('outterm_holder', view)
return view
def fork_command(self, *args, **kw):
self.delegate.terminal.fork_command(*args, **kw)
def launch_filename(self, filename):
self.app.launch(filename)
class RunningOptions(object):
def set_options(self, command_line,
fAttach,
fchdir,
pwd,
fAllowUnencrypted,
fRemote,
host):
self.command_line = command_line
self.attach = fAttach
self.pwd = pwd
self.allow_unencrypted = fAllowUnencrypted
self.remote = fRemote
self.host = host
def connect_events(self):
event_type_dict = {rpdb2.CEventState: {}}
self.session_manager.register_callback(self.update_state, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventStackFrameChange: {}}
self.session_manager.register_callback(self.update_frame, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventThreads: {}}
self.session_manager.register_callback(self.update_threads, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventNoThreads: {}}
self.session_manager.register_callback(self.update_no_threads, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventNamespace: {}}
self.session_manager.register_callback(self.update_namespace, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventThreadBroken: {}}
self.session_manager.register_callback(self.update_thread_broken, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventStack: {}}
self.session_manager.register_callback(self.update_stack, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventBreakpoint: {}}
self.session_manager.register_callback(self.update_bp, event_type_dict, fSingleUse = False)
def start(command_line, fAttach, fchdir, pwd, fAllowUnencrypted, fRemote, host):
options= RunningOptions()
options.set_options(command_line, fAttach, fchdir, pwd, fAllowUnencrypted, fRemote, host)
return options
def main(start):
rpdb2.main(start)
def start_as_cl():
rpdb2.main()
if __name__ == '__main__':
start_as_cl()
| [
"aafshar@ef0b12da-61f9-0310-ba38-b2629ec279a7"
] | aafshar@ef0b12da-61f9-0310-ba38-b2629ec279a7 |
47abd26109c9522bd177dbbf0cd330a30d2b38c2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03998/s861110715.py | 06ced3f35c4b4debbacf590c69b46eaf81a9942e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | A = list(input())
B = list(input())
C = list(input())
n = A[0]
del A[0]
while True:
if n == "a":
if A == []:
print("A")
break
n = A[0]
del A[0]
elif n == "b":
if B == []:
print("B")
break
n = B[0]
del B[0]
else:
if C == []:
print("C")
break
n = C[0]
del C[0]
| [
"[email protected]"
] | |
c041ce36c31bab982df6a4db67cbb9e648902b4e | d88e4152c0540c4c3c0a93c997ed8666d6ede863 | /daeUSB.py | 299f7ae439ebaf4837641b8975188e5fef04d3c2 | [] | no_license | mahongquan/word_auto | 2ad36383a435a0b020c3939e1efafec13a64a59f | 552ecb5dc43fdd71785f6e188de7cd0df66345ac | refs/heads/master | 2020-03-21T17:28:03.633892 | 2018-07-22T02:38:31 | 2018-07-22T02:38:31 | 138,833,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,815 | py | # -*- coding: utf-8 -*-
#安装USB2COM驱动
import pywinauto
import pywintypes
from pywinauto import findwindows,handleprops
import os
import _thread
import traceback
from threading import Thread,Event
import time
def runUSB():
#cmd=r"start c:\9111\PCIS-DASK\PCIS-DASK.exe"
cmd="start c:\\CS3000备份\\CDM21216_Setup.exe"
os.system(cmd)
class Program(object):
"""example for AutoResetEvent"""
def main(self):
_thread.start_new_thread(runUSB,())#start program
wind=EastWind(self.mre)
wind.start()
self.mre.wait()#wait install finish
def __init__(self):
super(Program, self).__init__()
self.mre=Event()
class EastWind(Thread):
"""dmretring for EastWind"""
def __init__(self, mreV):
super(EastWind, self).__init__()
self.mre=mreV
def run(self):
lookupWindow()#installing
self.mre.set()#install finish
def treatadlink(h):
try:
d=pywinauto.controls.hwndwrapper.DialogWrapper(h)
#print(dir(d))
cs=d.children()
d.set_focus()
for c in cs:
bt=c.window_text()
if c.class_name()=="Button" and "Extract" in bt:
c.set_focus()
c.click_input()
if c.class_name()=="Button" and "下一步" in bt:
c.set_focus()
c.click_input()
if c.class_name()=="Button" and "Finish" in bt:
c.set_focus()
c.click_input()
if c.class_name()=="Button" and "No" in bt:
c.set_focus()
c.click_input()
except pywintypes.error as e:
print(e)
pass
except TypeError as e:
print(e)
return True
def treatQudong(h):
try:
d=pywinauto.controls.hwndwrapper.DialogWrapper(h)
#print(dir(d))
cs=d.children()
d.set_focus()
for c in cs:
bt=c.window_text()
if(bt!=None):
if c.class_name()=="Button" and "我接受" in bt:
c.set_focus()
c.click_input()
if c.class_name()=="Button" and "下一步" in bt:
c.set_focus()
c.click_input()
# if c.class_name()=="Button" and "完成" in bt:
# c.set_focus()
# c.click_input()
# if c.class_name()=="Button" and "No" in bt:
# c.set_focus()
# c.click_input()
except pywintypes.error as e:
print(e)
pass
return True
def lookupWindow():
idle=0
while(True):
print("=================")
try:
wins=findwindows.find_windows()
for win in wins:
try:
title=handleprops.text(win)
print(title)
if title!=None:
if "FTDI" in title:
print("--------------------find:"+title)
if treatadlink(win):
idle=0
elif "设备驱动程序" in title:
print("、、、、、、、、、、、find:"+title)
if treatQudong(win):
idle=0
except UnicodeEncodeError as e:
print(e)
pass
time.sleep(1)#每秒检查一次
idle+=1
if idle>5 :#连续五秒没有查找到窗口则退出
break
except:
traceback.print_exc()
a=input("except")
def main():
program=Program()
program.main()
if __name__=="__main__":
lookupWindow() | [
"[email protected]"
] | |
132e5c34952c72f35e089c726777abaa59399bcd | daca4a0604a21e4dcdab501369704db24741938e | /webdriver/tests/sessions/new_session/invalid_capabilities.py | 8452bfb11e908be7a06fd65f6138f6f0c3958cd8 | [
"W3C",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-w3c-03-bsd-license",
"BSD-3-Clause"
] | permissive | Spec-Ops/web-platform-tests | 4ac91a784d5d363d261bfe92e251e6efd74b6b01 | c07a244ceef0d0833f61f2efa227bc1c65371c9c | refs/heads/master | 2020-12-11T06:08:52.141843 | 2017-10-20T11:55:15 | 2017-10-20T12:56:55 | 54,023,228 | 3 | 4 | null | 2016-09-06T21:01:15 | 2016-03-16T10:39:23 | HTML | UTF-8 | Python | false | false | 3,428 | py | #META: timeout=long
import pytest
from webdriver import error
from conftest import product, flatten
@pytest.mark.parametrize("value", [None, 1, "{}", []])
def test_invalid_capabilites(new_session, value):
with pytest.raises(error.InvalidArgumentException):
new_session({"capabilities": value})
@pytest.mark.parametrize("value", [None, 1, "{}", []])
def test_invalid_always_match(new_session, value):
with pytest.raises(error.InvalidArgumentException):
new_session({"capabilities": {"alwaysMatch": value}})
@pytest.mark.parametrize("value", [None, 1, "[]", {}])
def test_invalid_first_match(new_session, value):
with pytest.raises(error.InvalidArgumentException):
new_session({"capabilities": {"firstMatch": value}})
invalid_data = [
("acceptInsecureCerts", [1, [], {}, "false"]),
("browserName", [1, [], {}, False]),
("browserVersion", [1, [], {}, False]),
("platformName", [1, [], {}, False]),
("pageLoadStrategy", [1, [], {}, False, "invalid", "NONE", "Eager", "eagerblah", "interactive",
" eager", "eager "]),
("proxy", [1, [], "{}", {"proxyType": "SYSTEM"}, {"proxyType": "systemSomething"},
{"proxy type": "pac"}, {"proxy-Type": "system"}, {"proxy_type": "system"},
{"proxytype": "system"}, {"PROXYTYPE": "system"}, {"proxyType": None},
{"proxyType": 1}, {"proxyType": []}, {"proxyType": {"value": "system"}},
{" proxyType": "system"}, {"proxyType ": "system"}, {"proxyType ": " system"},
{"proxyType": "system "}]),
("timeouts", [1, [], "{}", {}, False, {"pageLOAD": 10}, {"page load": 10},
{"page load": 10}, {"pageLoad": "10"}, {"pageLoad": {"value": 10}},
{"invalid": 10}, {"pageLoad": -1}, {"pageLoad": 2**64},
{"pageLoad": None}, {"pageLoad": 1.1}, {"pageLoad": 10, "invalid": 10},
{" pageLoad": 10}, {"pageLoad ": 10}]),
("unhandledPromptBehavior", [1, [], {}, False, "DISMISS", "dismissABC", "Accept",
" dismiss", "dismiss "])
]
@pytest.mark.parametrize("body", [lambda key, value: {"alwaysMatch": {key: value}},
lambda key, value: {"firstMatch": [{key: value}]}])
@pytest.mark.parametrize("key,value", flatten(product(*item) for item in invalid_data))
def test_invalid_values(new_session, body, key, value):
with pytest.raises(error.InvalidArgumentException):
resp = new_session({"capabilities": body(key, value)})
invalid_extensions = [
"firefox",
"firefox_binary",
"firefoxOptions",
"chromeOptions",
"automaticInspection",
"automaticProfiling",
"platform",
"version",
"browser",
"platformVersion",
"javascriptEnabled",
"nativeEvents",
"seleniumProtocol",
"profile",
"trustAllSSLCertificates",
"initialBrowserUrl",
"requireWindowFocus",
"logFile",
"logLevel",
"safari.options",
"ensureCleanSession",
]
@pytest.mark.parametrize("body", [lambda key, value: {"alwaysMatch": {key: value}},
lambda key, value: {"firstMatch": [{key: value}]}])
@pytest.mark.parametrize("key", invalid_extensions)
def test_invalid_extensions(new_session, body, key):
with pytest.raises(error.InvalidArgumentException):
resp = new_session({"capabilities": body(key, {})})
| [
"[email protected]"
] | |
ecc937959d6a50bec48eee0e0c39cad9bc0593a0 | 3ff9821b1984417a83a75c7d186da9228e13ead9 | /2020_August_Leetcode_30_days_challenge/Week_2_Excel Sheet Column Number/by_recursion_and_base_conversion.py | 8614066d3ca3b81874534ab1c3be1896d784e563 | [
"MIT"
] | permissive | brianchiang-tw/leetcode | fd4df1917daef403c48cb5a3f5834579526ad0c2 | 6978acfb8cb767002cb953d02be68999845425f3 | refs/heads/master | 2023-06-11T00:44:01.423772 | 2023-06-01T03:52:00 | 2023-06-01T03:52:00 | 222,939,709 | 41 | 12 | null | null | null | null | UTF-8 | Python | false | false | 1,450 | py | '''
Description:
Given a column title as appear in an Excel sheet, return its corresponding column number.
For example:
A -> 1
B -> 2
C -> 3
...
Z -> 26
AA -> 27
AB -> 28
...
Example 1:
Input: "A"
Output: 1
Example 2:
Input: "AB"
Output: 28
Example 3:
Input: "ZY"
Output: 701
Constraints:
1 <= s.length <= 7
s consists only of uppercase English letters.
s is between "A" and "FXSHRXW".
'''
class Solution:
def titleToNumber(self, s: str) -> int:
if len(s) == 1:
# base case
return ord(s) - ord('A') + 1
else:
# general case
return 26*self.titleToNumber( s[:-1] ) + self.titleToNumber( s[-1] )
# n : the length of string, s
## Time Complexity: O( n )
#
# The overhead in time is the cost of recusion depth, which is of O( n ).
## Space Comeplexity: O( n )
#
# The overhead in space is the storage for recursion call stack, which is of O( n ).
import unittest
class Testing( unittest.TestCase ):
def test_case_1( self ):
result = Solution().titleToNumber( s='A' )
self.assertEqual(result, 1)
def test_case_2( self ):
result = Solution().titleToNumber( s='AB' )
self.assertEqual(result, 28)
def test_case_3( self ):
result = Solution().titleToNumber( s='ZY' )
self.assertEqual(result, 701)
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
181242534741172deff30d1570b6c1e64b1d1641 | 6daaf3cecb19f95265188adc9afc97e640ede23c | /python_design/pythonprogram_design/Ch5/5-3-E60.py | cae23228ee53270139ad9d77e1371e78ac0da7f0 | [] | no_license | guoweifeng216/python | 723f1b29610d9f536a061243a64cf68e28a249be | 658de396ba13f80d7cb3ebd3785d32dabe4b611d | refs/heads/master | 2021-01-20T13:11:47.393514 | 2019-12-04T02:23:36 | 2019-12-04T02:23:36 | 90,457,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | import pickle
def main():
## Display the large cities in a specified state.
largeCities = createDictionaryFromBinaryFile("LargeCitiesDict.dat")
state = input("Enter the name of a state: ")
getCities(state, largeCities)
def createDictionaryFromBinaryFile(fileName):
# Assume pickle module has been imported.
infile = open(fileName, 'rb')
dictionaryName = pickle.load(infile)
infile.close()
return dictionaryName
def getCities(state, dictionaryName):
if dictionaryName[state] != []:
print("Large cities:", " ".join(dictionaryName[state]))
else:
print("There are no large cities in", state + '.')
main()
| [
"[email protected]"
] | |
9f9de4a0ce359c148f01dc85de77fe73c15d295b | 06a38963f22f8a74cc129033f8f1cac7f114cf5d | /problem_set_1/.history/src/p03d_poisson_20210517220103.py | 6c67a7f7ecde98d6ae3463c6f27704f0d07843ae | [] | no_license | lulugai/CS229 | 47e59bcefe9dccbb83c6d903682eb6ddca3a24b5 | 4dd77b2895559689e875afe749360c3751a12cf1 | refs/heads/main | 2023-05-09T17:39:45.336695 | 2021-06-01T09:15:25 | 2021-06-01T09:15:25 | 372,768,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,462 | py | import numpy as np
import util
from linear_model import LinearModel
def main(lr, train_path, eval_path, pred_path):
"""Problem 3(d): Poisson regression with gradient ascent.
Args:
lr: Learning rate for gradient ascent.
train_path: Path to CSV file containing dataset for training.
eval_path: Path to CSV file containing dataset for evaluation.
pred_path: Path to save predictions.
"""
# Load training set
x_train, y_train = util.load_dataset(train_path, add_intercept=True)
# *** START CODE HERE ***
# Fit a Poisson Regression model
# Run on the validation set, and use np.savetxt to save outputs to pred_path
x_val, y_val = util.load_dataset(eval_path, add_intercept=True)
logreg = PoissonRegression()
logreg.fit(x_train, y_train)
print("Theta is: ", logreg.theta)
print("The accuracy on training set is: ", np.mean(logreg.predict(x_train) == y_train))
print("The accuracy on valid set is: ", np.mean(logreg.predict(x_val) == y_val))
np.savetxt(pred_path, logreg.predict(x_val))
# *** END CODE HERE ***
class PoissonRegression(LinearModel):
"""Poisson Regression.
Example usage:
> clf = PoissonRegression(step_size=lr)
> clf.fit(x_train, y_train)
> clf.predict(x_eval)
"""
def h(self, theta, x):
return np.exp(x @ theta)
def fit(self, x, y):
"""Run gradient ascent to maximize likelihood for Poisson regression.
Args:
x: Training example inputs. Shape (m, n).
y: Training example labels. Shape (m,).
"""
# *** START CODE HERE ***
m, n = x.shape
if self.theta is None:
self.theta = np.zeros(n)
theta = self.theta
next_theta = theta + self.step_size / m * x.T @ (y - self.h(theta, x))
print(next_theta)
while np.linalg.norm(self.step_size / m * x.T @ (y - self.h(theta, x)), 1) >= self.eps:
theta = next_theta
next_theta = theta + self.step_size / m * x.T @ (y - self.h(theta, x))
self.theta = theta
# *** END CODE HERE ***
def predict(self, x):
"""Make a prediction given inputs x.
Args:
x: Inputs of shape (m, n).
Returns:
Floating-point prediction for each input, shape (m,).
"""
# *** START CODE HERE ***
return self.h(self.theta, x)
# *** END CODE HERE ***
| [
"[email protected]"
] | |
ec7aa8ec3045bb0672f893db968f532ab7ae139d | aef6e68ccb00c423ebfad39101c1913c68253c83 | /saramin/parser.py | ca99d8976a02a2ae8ac2f0f0c92607724ebb452b | [] | no_license | plzprayme/work-scholarship-tools | 76dd39f037d774111927a316cdd8c88ed854aec9 | df8004c90a8a3b33e5c858c7d544a0e35ef7ce19 | refs/heads/main | 2023-02-13T05:24:35.673243 | 2021-01-15T07:47:18 | 2021-01-15T07:47:18 | 306,560,615 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,233 | py | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from bs4 import BeautifulSoup as bs4
from datetime import datetime, date
from time import sleep
import pandas as pd
def filter_by_condition(rows):
result = []
for row in rows:
location_condition = row.find_element_by_css_selector("p.work_place").text
career_condition = row.find_element_by_css_selector(".career").text
deadline = row.find_element_by_css_selector('.deadlines').text
url = row.find_element_by_css_selector(
'.str_tit').get_attribute('href')
try:
if is_deadline_over_ten_days(deadline) \
and "대전" in location_condition \
and ("신입" in career_condition or "경력무관" in career_condition):
print("=====FILTERED ROW=====")
print("LOCATION CONDITION", location_condition)
print("CAREER CONDITION", career_condition)
print("DEADLINE CONDITIOn", deadline)
print("=====END FILTERED=====")
result.append(url)
else:
print("=====UNFILTERED ROW=====")
print("LOCATION CONDITION", location_condition)
print("CAREER CONDITION", career_condition)
print("DEADLINE CONDITIOn", deadline)
print("=====END UNFILTERED=====")
except:
print("UNFILTERED DEADLINE ROW", deadline)
return result
def is_deadline_over_ten_days(deadline):
today = date.today()
iso_format = get_deadline_for_iso(deadline)
deadline_date = date.fromisoformat(iso_format)
deadline = (deadline_date - today).days
return deadline >= 10
def get_deadline_for_iso(deadline):
iso_format = [str(date.today().year)]
a = deadline.split(" ")
b = a[1].split("(")
c = b[0].split("/")
d = iso_format + c
return "-".join(d)
def get_column_value(soup, selector, parser_function):
try:
value = parser_function(soup, selector)
return value
except:
return ""
def default_parser(soup, selector):
return soup.select(selector)[0].text.strip()
def replace_wrap_parser(soup, selector):
target = soup.select(selector)[0]
result = target.text
if target.select(".toolTipWrap"):
result = result.replace(target.select(".toolTipWrap")[0].text, "")
return result.strip()
def deadline_parser(soup, selector):
deadline = soup.select(selector)[0].text # YYYY.MM.DD tt:mm
return deadline.split(" ")[0] # YYYY.MM.DD
def href_parser(soup, selector):
return soup.select(selector)[0].attrs["href"]
def benefit_parser(soup, selector):
# 버튼이 있는지 없는지 살펴보기
# 있으면 버튼 클릭하기
# option: 복리후생이 있는 곳 까지 스크롤 내리기
# row가져오고 col 단위로 접근하기
result = []
parent = soup.select(selector)[0]
button = parent.find('button')
if button is not None:
driver.find_element_by_css_selector('.jv_benefit > div.cont > button').click()
columns = parent.select('.col')
for column in columns[:-2]:
title = column.dt.text
value = column.dd.text.strip()
result.append(title)
result.append(value)
result.append("\n")
return "\n".join(result)
def get_next_page_url(base_url, page):
url = base_url.split("?")
url[1] = f"page={page}&" + url[1]
return "?".join(url)
with webdriver.Chrome("./chromedriver") as driver:
wait = WebDriverWait(driver, 10)
company_name_list = []
content_list = []
aa_list = []
recruitment_number_list = []
task_list = []
work_location_list = []
url_list = []
deadline_list = []
imployee_list = []
sales_list = []
gender_list = []
find_who_list = []
income_list = []
work_time_list = []
benefit_list = []
resume_format_list = []
# print("*" * 35)
# print("*" + " " * 33 + "*")
# print("* 한남대학교 취업전략개발팀 *")
# print("* 근로장학생 업무 자동화 프로젝트 *")
# print("* 사람인 채용공고 크롤러 *")
# print("*" + " " * 33 + "*")
# print("*********CREATED BY PRAYME*********")
# print()
# print()
# print()
# print(">> 데이터를 수집할 URL을 입력해주세요 ")
target_url = "http://www.saramin.co.kr/zf_user/jobs/list/domestic?loc_mcd=105000&cat_cd=404%2C407%2C402%2C417%2C411%2C315%2C309%2C302%2C308%2C314&panel_type=&search_optional_item=n&search_done=y&panel_count=y"
driver.get(target_url)
print(">> 수집을 시작합니다....")
rows = driver.find_elements_by_css_selector("#default_list_wrap > section > div.list_body > .list_item")
print(">> 대전 지역, 10일 이상, 신입 조건 채용공고 필터링 시작....")
rows = filter_by_condition(rows)
page = 1
while len(rows) <= 40:
page += 1
next_page = get_next_page_url(target_url, page)
driver.get(next_page)
elements = driver.find_elements_by_css_selector("#default_list_wrap > section > div.list_body > .list_item")
rows += filter_by_condition(elements)
rows = set(rows)
rows = list(rows)
print(">> 필터링 완료. 총 {}개의 채용공고 수집을 시작합니다....".format(len(rows)))
for i, row in enumerate(rows):
print(">> {}/{}번째 채용공고 수집 시작".format(i, len(rows)))
# print("ROW", row)
# row에서 해결할 수 있는 것들
# 아이템 페이지로 이동하기
driver.get(row)
# DOM 로딩 기다리기
# wait.until(presence_of_element_located(
# (By.CSS_SELECTOR, ".info_period > dd:nth-child(4)")
# ))
sleep(5)
# HTML 얻기
html = driver.page_source
soup = bs4(html, 'html.parser')
# # 각 아이템의 회사 이름 가져오기
company_name = get_column_value(soup, ".company_name", default_parser)
print("COMPANY NAME", company_name)
############# 여기까지 완료 #################
# # # 각 아이템의 근무 주소 가져오기
work_location = get_column_value(
soup, "#map_0 > div > address > span", default_parser)
print("WORK LOCATION", work_location)
# # # 각 아이템의 이력서 제출 형식 가져오기
# resume_submission_format = get_column_value(
# soup, '.template', default_parser)
resume_submission_format = get_column_value(
soup, '.template', replace_wrap_parser)
print("RESUME SUBMISSION FORMAT", resume_submission_format)
# # 각 아이템의 모집 마감 날짜 가져오기
deadline = get_column_value(
soup, '.info_period > dd:nth-child(4)', deadline_parser)
print("DEADLINE", deadline)
# # # 각 아이템의 복리후생 가져오기
benefit = get_column_value(soup, '.jv_benefit', benefit_parser)
print("BENEFIT", benefit)
income = ""
work_time = ""
find_who = ""
try:
cont = soup.select(".cont")[0]
for column in cont:
for dl in column.select("dl"):
title = dl.dt.text
content = dl.dd.text
tooltip = dl.select(".toolTipWrap")
if "급여" == title:
print("급여", content)
if tooltip:
content = content.replace(tooltip[0].text, "").strip()
income = content
print("급여", income)
elif "근무일시" == title:
print("근무일시", content)
if tooltip:
content = content.replace(tooltip[0].text, "").strip()
work_time = content
print("근무일시", work_time)
elif "우대사항" == title:
result = []
for item in dl.select(".toolTipTxt > li"):
result.append(item.text.strip())
print("우대사항", "\n".join(result))
find_who = "\n".join(result)
except:
print("ERROR")
recruitment_number = get_column_value(soup, ".recruit_division_0 #template_divisions_work_dept_nm_0",
default_parser)
print("RECRUIT_NUMBER", recruitment_number)
task = get_column_value(soup,
".recruit_division_0 #template_divisions_assign_task_nm_0 > tbody > tr:nth-child(2) > td",
default_parser)
print("TASK", task)
# 사업내용 가져오기
company_detail = get_column_value(
soup, '.jv_header > a.company', href_parser)
print("COMPANY_DETAIL", company_detail)
sleep(5)
driver.get("http://www.saramin.co.kr" + company_detail)
sleep(5)
# wait.until(presence_of_element_located(
# (By.CSS_SELECTOR, "#company_info_introduce")
# ))
company_detail_html = driver.page_source
company_detail_soup = bs4(company_detail_html, 'html.parser')
# document.querySelector("#company_info_introduce").textContent.trim()[116:]
content = ""
imployee = ""
sales = ""
gender = "무관"
try:
detail = company_detail_soup.select(".summary > li")
for li in detail:
title = li.span.text
body = li.strong.text
if "사원수" == title:
imployee = body
elif "매출액" == title:
sales = body
print(f"사원수: {imployee}, 매출액: {sales}")
data_title = company_detail_soup.select(".info > dt")
data_content = company_detail_soup.select(".info > dd")
position = 0
for i, title in enumerate(data_title):
if "업종" == title.text or "사업내용" == title.text:
content = data_content[i].text
print(f"사업내용: {content}")
except:
print("사업내용 쪽 에러 남")
company_name_list.append(company_name)
content_list.append(content)
aa_list.append("")
recruitment_number_list.append(recruitment_number)
task_list.append(task)
work_location_list.append(work_location)
url_list.append(row)
deadline_list.append(deadline)
imployee_list.append(imployee)
sales_list.append(sales)
gender_list.append(gender)
find_who_list.append(find_who)
income_list.append(income)
work_time_list.append(work_time)
benefit_list.append(benefit)
resume_format_list.append(resume_submission_format)
print(">> Excel 파일로 저장을 시작합니다.")
df = pd.DataFrame({
"사업장명(회사이름)": company_name_list,
"사업내용": content_list,
"모집직종": aa_list,
"모집인원": recruitment_number,
"직무내용": task_list,
"근무지주소": work_location_list,
"소재지주소": work_location_list,
"공지사이트": url_list,
"서류마감": deadline_list,
"근로자 수": imployee_list,
"매출액": sales_list,
"성별": gender_list,
"우대조건": find_who_list,
"임금액": income_list,
"근무시간": work_time_list,
"복리후생": benefit_list,
"제출서류": resume_format_list
})
save_time = datetime.now().strftime("%Y_%m_%d_%H시%M분")
name = '{}_채용공고.csv'
df.to_csv('{}utf_채용공고.csv'.format(save_time), encoding='utf-8-sig')
# df.to_csv(name.format(save_time), encoding='euc-kr')
print(">> Excel 파일로 저장을 완료했습니다.")
| [
"[email protected]"
] | |
acf5d4b77fd234775af80508e3a4688f0811f179 | 37e53bb1d47004e956119e7e7047b8194b3142f5 | /grok/recursion_ver_3.py | 1d12fa0694653459fa11a17525f97743ce815aa0 | [] | no_license | fortredux/py_miscellaneous | eea82f900434ef23d98769f9057c87c97b278e45 | 7aee7c8bc0ddda1e137e4cb7e88eed78dbdb98d8 | refs/heads/master | 2020-12-21T21:03:28.897746 | 2020-04-05T20:44:55 | 2020-04-05T20:44:55 | 236,560,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py |
def check(i):
i = int(i)
if i == 0:
print('0')
return False
else:
return True
def countdown(num):
x = check(num)
while x is True:
num = int(num)
print(num)
return countdown(num-1)
countdown('15')
| [
"[email protected]"
] | |
9092164cade33e786253089e178044d0156653f7 | 71d4fafdf7261a7da96404f294feed13f6c771a0 | /mainwebsiteenv/lib/python2.7/site-packages/phonenumbers/shortdata/region_SK.py | fdc7ed9e7dd3d18546d0c1cdbe188f1eb3101d50 | [] | no_license | avravikiran/mainwebsite | 53f80108caf6fb536ba598967d417395aa2d9604 | 65bb5e85618aed89bfc1ee2719bd86d0ba0c8acd | refs/heads/master | 2021-09-17T02:26:09.689217 | 2018-06-26T16:09:57 | 2018-06-26T16:09:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | """Auto-generated file, do not edit by hand. SK metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_SK = PhoneMetadata(id='SK', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2,5}', possible_length=(3, 4, 5, 6)),
toll_free=PhoneNumberDesc(national_number_pattern='116\\d{3}', example_number='116000', possible_length=(6,)),
emergency=PhoneNumberDesc(national_number_pattern='1(?:12|5[058])', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:1(?:2|6(?:000|111)|8[0-8])|[24]\\d{3}|5[0589]|8\\d{3})', example_number='112', possible_length=(3, 4, 5, 6)),
short_data=True)
| [
"[email protected]"
] | |
4301d8ddb5bac52d283851cc404947a820b5da05 | 362224f8a23387e8b369b02a6ff8690c200a2bce | /django/django_intro/dojosurveyRev/manage.py | 925c756178ebae644969e9b503f6ded8d97f2cb8 | [] | no_license | Helenyixuanwang/python_stack | ac94c7c532655bf47592a8453738daac10f220ad | 97fbc77e3971b5df1fe3e79652b294facf8d6cee | refs/heads/main | 2023-06-11T02:17:27.277551 | 2021-06-21T17:01:09 | 2021-06-21T17:01:09 | 364,336,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dojosurveyRev.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e3ccc17dad2cce4bc4775644166167b2526d35eb | 6c5ce1e621e0bd140d127527bf13be2093f4a016 | /ex044/exercicio044.py | 290134b8ab1da627fa43d612670b1700cc3f8065 | [
"MIT"
] | permissive | ArthurAlesi/Python-Exercicios-CursoEmVideo | 124e2ee82c3476a5a49baafed657788591a232c1 | ed0f0086ddbc0092df9d16ec2d8fdbabcb480cdd | refs/heads/master | 2022-12-31T13:21:30.001538 | 2020-09-24T02:09:23 | 2020-09-24T02:09:23 | 268,917,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | # Elabore um programa que calcule o valor a ser pago por um produto
# considerando o preço normal e condição de pagamento
# a vista dinheiro 10% de desconto
# a vista cartao 5% de desconto
# 2x cartao preço normal
# 3x ou mais
compra = float(input("Informe o preço das compras"))
print("Escolha a forma de pagamento")
print("1 - vista dinheiro/cheque")
print("2 - vista cartao")
print("3 - 2x cartao")
print("4 - 3x ou + cartao")
entrada = int(input())
if entrada == 1:
total = compra * 0.9
print("Voce pagara ", total)
elif entrada == 2:
total = compra * 0.95
print("Voce pagara ", total)
elif entrada == 3:
parcela = compra / 2
print("2 parcelas de ", parcela)
elif entrada ==4:
total = compra * 1.2
parcela = total / 3
print("3 parcelas de ", parcela)
| [
"[email protected]"
] | |
a2669f6f3cf2be4424ef7db7edfd60b48effbd05 | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/logs/apis/TestMetricTaskRequest.py | 8e2bba6c9cb8119d77246c48816fc0632e6a8191 | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 3,730 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class TestMetricTaskRequest(JDCloudRequest):
"""
日志测试,根据用户输入的日志筛选条件以及监控指标设置进行模拟监控统计
"""
def __init__(self, parameters, header=None, version="v1"):
super(TestMetricTaskRequest, self).__init__(
'/regions/{regionId}/logsets/{logsetUID}/logtopics/{logtopicUID}/metrictaskTest', 'POST', header, version)
self.parameters = parameters
class TestMetricTaskParameters(object):
def __init__(self, regionId,logsetUID,logtopicUID,content, ):
"""
:param regionId: 地域 Id
:param logsetUID: 日志集 UID
:param logtopicUID: 日志主题 UID
:param content: 测试内容
"""
self.regionId = regionId
self.logsetUID = logsetUID
self.logtopicUID = logtopicUID
self.aggregate = None
self.content = content
self.dataField = None
self.filterContent = None
self.filterOpen = None
self.filterType = None
self.metric = None
self.settingType = None
self.sqlSpec = None
def setAggregate(self, aggregate):
"""
:param aggregate: (Optional) 聚合函数,支持 count sum max min avg; 配置方式(SettingType) 为 空或visual 时,必填;
"""
self.aggregate = aggregate
def setDataField(self, dataField):
"""
:param dataField: (Optional) 查询字段,支持 英文字母 数字 下划线 中划线 点(中文日志原文和各产品线的key); 配置方式(SettingType) 为 空或visual 时,必填;
"""
self.dataField = dataField
def setFilterContent(self, filterContent):
"""
:param filterContent: (Optional) 过滤语法,可以为空
"""
self.filterContent = filterContent
def setFilterOpen(self, filterOpen):
"""
:param filterOpen: (Optional) 是否打开过滤; 配置方式(SettingType) 为 空或visual 时,必填;
"""
self.filterOpen = filterOpen
def setFilterType(self, filterType):
"""
:param filterType: (Optional) 过滤类型,只能是fulltext和 advance; 配置方式(SettingType) 为 空或visual 时,必填;
"""
self.filterType = filterType
def setMetric(self, metric):
"""
:param metric: (Optional) 监控项 , 支持大小写英文字母 下划线 数字 点,且不超过255byte(不支持中划线); 配置方式(SettingType) 为 空或visual 时,必填;
"""
self.metric = metric
def setSettingType(self, settingType):
"""
:param settingType: (Optional) 配置方式: 可选参数;枚举值 visual,sql;分别代表可视化配置及sql配置方式,传空表示可视化配置;
"""
self.settingType = settingType
def setSqlSpec(self, sqlSpec):
"""
:param sqlSpec: (Optional)
"""
self.sqlSpec = sqlSpec
| [
"[email protected]"
] | |
5aa6d0190c71dff4c9372596cae52cfd9393b80b | 08a93de4813e0efb5cf40ac490eb0502266ca8ba | /settings/urls.py | c64f4a4821b4b9656b8e0cfa43d8ede15897236c | [
"BSD-2-Clause"
] | permissive | hunglethanh9/modelchimp | eaf3a8c6c73a3ed4c942fa361d8b661bf5d92a87 | cdb8162cdb554aa5ef00999f390138bd8325f472 | refs/heads/master | 2020-06-06T00:09:33.447383 | 2019-06-18T13:02:03 | 2019-06-18T13:02:03 | 192,582,892 | 1 | 0 | null | 2019-06-18T17:14:47 | 2019-06-18T17:14:47 | null | UTF-8 | Python | false | false | 1,188 | py | from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth.views import (
PasswordResetView,
PasswordResetConfirmView,
PasswordResetDoneView,
PasswordResetCompleteView,
)
urlpatterns = [
url(r'^api/', include('settings.api_urls')),
url(r'^api/v2/', include('settings.api_urls_v2')),
url(r'^hq/', admin.site.urls),
# Password reset views
url(r'^password_reset/$', PasswordResetView.as_view(), name='password_reset'),
url(r'^password_reset/done/$', PasswordResetDoneView.as_view(), name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
url(r'^reset/done/$', PasswordResetCompleteView.as_view(),name='password_reset_complete'),
]
urlpatterns = urlpatterns + static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
urlpatterns = urlpatterns + static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
| [
"[email protected]"
] | |
d7c759cbcfcbf99821bb0d3e66eb3e9796632e2b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_hires.py | 55de384efb494e1deadbf5e334722a21e472f366 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
from xai.brain.wordbase.nouns._hire import _HIRE
#calss header
class _HIRES(_HIRE, ):
def __init__(self,):
_HIRE.__init__(self)
self.name = "HIRES"
self.specie = 'nouns'
self.basic = "hire"
self.jsondata = {}
| [
"[email protected]"
] | |
0df41ae40a0375dbac1794c5a5d7bb4a19941e27 | 0e6311674ad4637031ff4c7e0f522a7fac65973d | /akshare/futures/futures_daily_bar.py | adc6d5c6f6e66a3584b8960a5602b1284331c185 | [
"MIT"
] | permissive | waruanfou/akshare | 26b064e305c05b4b4d1bf297fe73ab61e465bb06 | b240109eae08d6bb5eee2df3edeac680c9176430 | refs/heads/master | 2023-06-23T08:32:42.035038 | 2021-07-18T11:26:29 | 2021-07-18T11:26:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,312 | py | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/5/4 15:58
Desc: 交易所网站获取期货日线行情
"""
import datetime
import json
import re
import warnings
import zipfile
from io import BytesIO, StringIO
import numpy as np
import pandas as pd
import requests
from akshare.futures import cons
from akshare.futures.requests_fun import requests_link
calendar = cons.get_calendar()
def get_cffex_daily(date: str = "20100401") -> pd.DataFrame:
"""
中国金融期货交易所日交易数据
http://www.cffex.com.cn/rtj/
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象; 为空时为当天
:return: pandas.DataFrame
中国金融期货交易所日:
symbol 合约代码
date 日期
open 开盘价
high 最高价
low 最低价
close 收盘价
volume 成交量
open_interest 持仓量
turnover 成交额
settle 结算价
pre_settle 前结算价
variety 合约类别
或 None(给定日期没有交易数据)
"""
day = cons.convert_date(date) if date is not None else datetime.date.today()
if day.strftime("%Y%m%d") not in calendar:
warnings.warn("%s非交易日" % day.strftime("%Y%m%d"))
return None
url = f"http://www.cffex.com.cn/sj/historysj/{date[:-2]}/zip/{date[:-2]}.zip"
r = requests.get(url)
try:
with zipfile.ZipFile(BytesIO(r.content)) as file:
with file.open(f"{date}_1.csv") as my_file:
data = my_file.read().decode("gb2312")
data_df = pd.read_csv(StringIO(data))
except:
return None
data_df = data_df[data_df["合约代码"] != "小计"]
data_df = data_df[data_df["合约代码"] != "合计"]
data_df = data_df[~data_df["合约代码"].str.contains("IO")]
data_df.reset_index(inplace=True, drop=True)
data_df["合约代码"] = data_df["合约代码"].str.strip()
symbol_list = data_df["合约代码"].to_list()
variety_list = [re.compile(r"[a-zA-Z_]+").findall(item)[0] for item in symbol_list]
if data_df.shape[1] == 15:
data_df.columns = ["symbol", "open", "high", "low", "volume", "turnover",
"open_interest", "_", "close", "settle", "pre_settle", "_", "_", "_", "_"]
else:
data_df.columns = ["symbol", "open", "high", "low", "volume", "turnover",
"open_interest", "_", "close", "settle", "pre_settle", "_", "_", "_"]
data_df["date"] = date
data_df["variety"] = variety_list
data_df = data_df[
["symbol", "date", "open", "high", "low", "close", "volume", "open_interest", "turnover", "settle",
"pre_settle", "variety"]]
return data_df
def get_ine_daily(date: str = "20200106") -> pd.DataFrame:
"""
上海国际能源交易中心-日频率-量价数据
上海国际能源交易中心: 原油期货(上市时间: 20180326); 20号胶期货(上市时间: 20190812)
trade_price: http://www.ine.cn/statements/daily/?paramid=kx
trade_note: http://www.ine.cn/data/datanote.dat
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象,默认为当前交易日
:type date: str or datetime.date
:return: 上海国际能源交易中心-日频率-量价数据
:rtype: pandas.DataFrame or None
"""
day = cons.convert_date(date) if date is not None else datetime.date.today()
if day.strftime("%Y%m%d") not in calendar:
warnings.warn(f"{day.strftime('%Y%m%d')}非交易日")
return None
url = f"http://www.ine.cn/data/dailydata/kx/kx{day.strftime('%Y%m%d')}.dat"
r = requests.get(url)
result_df = pd.DataFrame()
try:
data_json = r.json()
except:
return None
temp_df = pd.DataFrame(data_json["o_curinstrument"]).iloc[:-1, :]
temp_df = temp_df[temp_df["DELIVERYMONTH"] != "小计"]
temp_df = temp_df[~temp_df["PRODUCTNAME"].str.contains("总计")]
try:
result_df["symbol"] = temp_df["PRODUCTGROUPID"].str.upper().str.strip() + temp_df["DELIVERYMONTH"]
except:
result_df["symbol"] = temp_df["PRODUCTID"].str.upper().str.strip().str.split("_", expand=True).iloc[:, 0] + temp_df["DELIVERYMONTH"]
result_df["date"] = day.strftime("%Y%m%d")
result_df["open"] = temp_df["OPENPRICE"]
result_df["high"] = temp_df["HIGHESTPRICE"]
result_df["low"] = temp_df["LOWESTPRICE"]
result_df["close"] = temp_df["CLOSEPRICE"]
result_df["volume"] = temp_df["VOLUME"]
result_df["open_interest"] = temp_df["OPENINTEREST"]
result_df["turnover"] = 0
result_df["settle"] = temp_df["SETTLEMENTPRICE"]
result_df["pre_settle"] = temp_df["PRESETTLEMENTPRICE"]
try:
result_df["variety"] = temp_df["PRODUCTGROUPID"].str.upper().str.strip()
except:
result_df["variety"] = temp_df["PRODUCTID"].str.upper().str.strip().str.split("_", expand=True).iloc[:, 0]
result_df = result_df[result_df["symbol"] != "总计"]
result_df = result_df[~result_df["symbol"].str.contains("efp")]
return result_df
def get_czce_daily(date: str = "20050525") -> pd.DataFrame:
"""
郑州商品交易所-日频率-量价数据
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象,默认为当前交易日; 日期需要大于 20100824
:type date: str or datetime.date
:return: 郑州商品交易所-日频率-量价数据
:rtype: pandas.DataFrame or None
"""
day = cons.convert_date(date) if date is not None else datetime.date.today()
if day.strftime("%Y%m%d") not in calendar:
warnings.warn(f"{day.strftime('%Y%m%d')}非交易日")
return None
if day > datetime.date(2010, 8, 24):
if day > datetime.date(2015, 9, 19):
u = cons.CZCE_DAILY_URL_3
url = u % (day.strftime("%Y"), day.strftime("%Y%m%d"))
elif day < datetime.date(2015, 9, 19):
u = cons.CZCE_DAILY_URL_2
url = u % (day.strftime("%Y"), day.strftime("%Y%m%d"))
listed_columns = cons.CZCE_COLUMNS
output_columns = cons.OUTPUT_COLUMNS
try:
r = requests.get(url)
html = r.text
except requests.exceptions.HTTPError as reason:
if reason.response.status_code != 404:
print(
cons.CZCE_DAILY_URL_3
% (day.strftime("%Y"), day.strftime("%Y%m%d")),
reason,
)
return
if html.find("您的访问出错了") >= 0 or html.find("无期权每日行情交易记录") >= 0:
return
html = [
i.replace(" ", "").split("|")
for i in html.split("\n")[:-4]
if i[0][0] != "小"
]
if day > datetime.date(2015, 9, 19):
if html[1][0] not in ["品种月份", "品种代码", "合约代码"]:
return
dict_data = list()
day_const = int(day.strftime("%Y%m%d"))
for row in html[2:]:
m = cons.FUTURES_SYMBOL_PATTERN.match(row[0])
if not m:
continue
row_dict = {"date": day_const, "symbol": row[0], "variety": m.group(1)}
for i, field in enumerate(listed_columns):
if row[i + 1] == "\r" or row[i + 1] == '':
row_dict[field] = 0.0
elif field in [
"volume",
"open_interest",
"oi_chg",
"exercise_volume",
]:
row[i + 1] = row[i + 1].replace(",", "")
row_dict[field] = int(row[i + 1])
else:
row[i + 1] = row[i + 1].replace(",", "")
row_dict[field] = float(row[i + 1])
dict_data.append(row_dict)
return pd.DataFrame(dict_data)[output_columns]
elif day < datetime.date(2015, 9, 19):
dict_data = list()
day_const = int(day.strftime("%Y%m%d"))
for row in html[1:]:
row = row[0].split(",")
m = cons.FUTURES_SYMBOL_PATTERN.match(row[0])
if not m:
continue
row_dict = {"date": day_const, "symbol": row[0], "variety": m.group(1)}
for i, field in enumerate(listed_columns):
if row[i + 1] == "\r":
row_dict[field] = 0.0
elif field in [
"volume",
"open_interest",
"oi_chg",
"exercise_volume",
]:
row_dict[field] = int(float(row[i + 1]))
else:
row_dict[field] = float(row[i + 1])
dict_data.append(row_dict)
return pd.DataFrame(dict_data)[output_columns]
if day <= datetime.date(2010, 8, 24):
u = cons.CZCE_DAILY_URL_1
url = u % day.strftime("%Y%m%d")
listed_columns = cons.CZCE_COLUMNS_2
output_columns = cons.OUTPUT_COLUMNS
df = pd.read_html(url)[1].dropna(how="any")
dict_data = list()
day_const = int(day.strftime("%Y%m%d"))
for row in df.to_dict(orient="records"):
row = list(row.values())
m = cons.FUTURES_SYMBOL_PATTERN.match(row[0])
if not m:
continue
row_dict = {"date": day_const, "symbol": row[0], "variety": m.group(1)}
for i, field in enumerate(listed_columns):
if row[i + 1] == "\r":
row_dict[field] = 0.0
elif field in ["volume", "open_interest", "oi_chg", "exercise_volume"]:
row_dict[field] = int(row[i + 1])
else:
row_dict[field] = float(row[i + 1])
dict_data.append(row_dict)
return pd.DataFrame(dict_data)[output_columns]
def get_shfe_v_wap(date: str = "20131017") -> pd.DataFrame:
"""
获取上期所日成交均价数据
Parameters
------
date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
Return
-------
DataFrame
郑商所日交易数据(DataFrame):
symbol 合约代码
date 日期
time_range v_wap时段,分09:00-10:15和09:00-15:00两类
v_wap 加权平均成交均价
或 None(给定日期没有数据)
"""
day = cons.convert_date(date) if date is not None else datetime.date.today()
if day.strftime("%Y%m%d") not in calendar:
warnings.warn("%s非交易日" % day.strftime("%Y%m%d"))
return None
try:
json_data = json.loads(
requests_link(
cons.SHFE_V_WAP_URL % (day.strftime("%Y%m%d")),
headers=cons.headers,
encoding="utf-8",
).text
)
except:
return None
if len(json_data["o_currefprice"]) == 0:
return None
try:
df = pd.DataFrame(json_data["o_currefprice"])
df["INSTRUMENTID"] = df["INSTRUMENTID"].str.strip()
df[":B1"].astype("int16")
return df.rename(columns=cons.SHFE_V_WAP_COLUMNS)[
list(cons.SHFE_V_WAP_COLUMNS.values())
]
except:
return None
def get_shfe_daily(date: str = "20160104") -> pd.DataFrame:
"""
上海期货交易所-日频率-量价数据
http://www.shfe.com.cn/statements/dataview.html?paramid=kx
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象, 默认为当前交易日
:type date: str or datetime.date
:return: 上海期货交易所-日频率-量价数据
:rtype: pandas.DataFrame or None
上期所日交易数据(DataFrame):
symbol 合约代码
date 日期
open 开盘价
high 最高价
low 最低价
close 收盘价
volume 成交量
open_interest 持仓量
turnover 成交额
settle 结算价
pre_settle 前结算价
variety 合约类别
或 None(给定交易日没有交易数据)
"""
day = cons.convert_date(date) if date is not None else datetime.date.today()
if day.strftime("%Y%m%d") not in calendar:
warnings.warn("%s非交易日" % day.strftime("%Y%m%d"))
return None
try:
json_data = json.loads(
requests_link(
cons.SHFE_DAILY_URL % (day.strftime("%Y%m%d")),
headers=cons.shfe_headers,
).text
)
except requests.HTTPError as reason:
if reason.response != 404:
print(cons.SHFE_DAILY_URL % (day.strftime("%Y%m%d")), reason)
return
if len(json_data["o_curinstrument"]) == 0:
return
df = pd.DataFrame(
[
row
for row in json_data["o_curinstrument"]
if row["DELIVERYMONTH"] not in ["小计", "合计"] and row["DELIVERYMONTH"] != ""
]
)
try:
df["variety"] = df["PRODUCTGROUPID"].str.upper().str.strip()
except KeyError as e:
df["variety"] = df["PRODUCTID"].str.upper().str.split('_', expand=True).iloc[:, 0].str.strip()
df["symbol"] = df["variety"] + df["DELIVERYMONTH"]
df["date"] = day.strftime("%Y%m%d")
v_wap_df = get_shfe_v_wap(day)
if v_wap_df is not None:
df = pd.merge(
df,
v_wap_df[v_wap_df.time_range == "9:00-15:00"],
on=["date", "symbol"],
how="left",
)
df["turnover"] = df.v_wap * df.VOLUME
else:
df["VOLUME"] = df["VOLUME"].apply(lambda x: 0 if x == "" else x)
df["turnover"] = df["VOLUME"] * df["SETTLEMENTPRICE"]
df.rename(columns=cons.SHFE_COLUMNS, inplace=True)
df = df[~df["symbol"].str.contains("efp")]
return df[cons.OUTPUT_COLUMNS]
def get_dce_daily(date: str = "20030115") -> pd.DataFrame:
"""
大连商品交易所日交易数据
http://www.dce.com.cn/dalianshangpin/xqsj/tjsj26/rtj/rxq/index.html
:param date: 交易日, e.g., 20200416
:type date: str
:return: 具体交易日的个品种行情数据
:rtype: pandas.DataFrame
"""
day = cons.convert_date(date) if date is not None else datetime.date.today()
if day.strftime("%Y%m%d") not in calendar:
warnings.warn("%s非交易日" % day.strftime("%Y%m%d"))
return None
url = "http://www.dce.com.cn/publicweb/quotesdata/exportDayQuotesChData.html"
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Content-Length": "86",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "www.dce.com.cn",
"Origin": "http://www.dce.com.cn",
"Pragma": "no-cache",
"Referer": "http://www.dce.com.cn/publicweb/quotesdata/dayQuotesCh.html",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36",
}
params = {
"dayQuotes.variety": "all",
"dayQuotes.trade_type": "0",
"year": date[:4],
"month": str(int(date[4:6]) - 1),
"day": date[6:],
"exportFlag": "excel",
}
r = requests.post(url, data=params, headers=headers)
data_df = pd.read_excel(BytesIO(r.content))
data_df = data_df[~data_df["商品名称"].str.contains("小计")]
data_df = data_df[~data_df["商品名称"].str.contains("总计")]
data_df["variety"] = data_df["商品名称"].map(lambda x: cons.DCE_MAP[x])
data_df["symbol"] = data_df["variety"] + data_df["交割月份"].astype(int).astype(str)
del data_df["商品名称"]
del data_df["交割月份"]
data_df.columns = ["open", "high", "low", "close",
"pre_settle", "settle", "_", "_",
"volume", "open_interest", "_", "turnover", "variety", "symbol"]
data_df["date"] = date
data_df = data_df[
["symbol", "date", "open", "high", "low", "close", "volume", "open_interest", "turnover", "settle",
"pre_settle", "variety"]]
data_df = data_df.applymap(lambda x: x.replace(",", ""))
data_df = data_df.astype({"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
"open_interest": "float",
"turnover": "float",
"settle": "float",
"pre_settle": "float",
})
return data_df
def get_futures_daily(start_date: str = "20210421", end_date: str = "20210426", market: str = "INE", index_bar: bool = False) -> pd.DataFrame:
"""
交易所日交易数据
:param start_date: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:type start_date: str
:param end_date: 结束数据 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:type end_date: str
:param market: 'CFFEX' 中金所, 'CZCE' 郑商所, 'SHFE' 上期所, 'DCE' 大商所 之一, 'INE' 上海国际能源交易中心。默认为中金所
:type market: str
:param index_bar: 是否合成指数K线, 默认为 False 否则影响 roll_yield 的计算
:type index_bar: bool
:return: 交易所日交易数据
:rtype: pandas.DataFrame
"""
if market.upper() == "CFFEX":
f = get_cffex_daily
elif market.upper() == "CZCE":
f = get_czce_daily
elif market.upper() == "SHFE":
f = get_shfe_daily
elif market.upper() == "DCE":
f = get_dce_daily
elif market.upper() == "INE":
f = get_ine_daily
else:
print("Invalid Market Symbol")
return None
start_date = (
cons.convert_date(start_date) if start_date is not None else datetime.date.today()
)
end_date = (
cons.convert_date(end_date)
if end_date is not None
else cons.convert_date(cons.get_latest_data_date(datetime.datetime.now()))
)
df_list = list()
while start_date <= end_date:
df = f(date=str(start_date).replace("-", ""))
if df is not None:
df_list.append(df)
if index_bar:
df_list.append(get_futures_index(df))
start_date += datetime.timedelta(days=1)
if len(df_list) > 0:
temp_df = pd.concat(df_list).reset_index(drop=True)
temp_df = temp_df[~temp_df['symbol'].str.contains("efp")]
return temp_df
def get_futures_index(df: pd.DataFrame) -> pd.DataFrame:
"""
指数日交易数据, 指数合成
:param df: 爬到的原始合约日线行情
:type df: pandas.DataFrame
:return: 持仓量加权指数日线行情
:rtype: pandas.DataFrame
"""
index_dfs = []
for var in set(df["variety"]):
df_cut = df[df["variety"] == var]
df_cut = df_cut[df_cut["open_interest"] != 0]
df_cut = df_cut[df_cut["close"] != np.nan]
df_cut = df_cut[df_cut["volume"] != int(0)]
if len(df_cut.index) > 0:
index_df = pd.Series(index=df_cut.columns, dtype="object")
index_df[["volume", "open_interest", "turnover"]] = df_cut[
["volume", "open_interest", "turnover"]
].sum()
if "efp" in df_cut.iloc[-1, 0]:
df_cut = df_cut.iloc[:-1, :]
df_cut.replace("", 0, inplace=True) # 20201026 部分数据开盘价空缺
index_df[["open", "high", "low", "close", "settle", "pre_settle"]] = np.dot(
np.array(
df_cut[["open", "high", "low", "close", "settle", "pre_settle"]]
).T,
np.array((df_cut["open_interest"].astype(float))),
) / np.sum(df_cut["open_interest"].astype(float))
index_df[["date", "variety"]] = df_cut[["date", "variety"]].iloc[0, :]
index_df["symbol"] = index_df["variety"] + "99"
index_dfs.append(index_df)
return pd.concat(index_dfs, axis=1).T
if __name__ == "__main__":
get_futures_daily_df = get_futures_daily(start_date='20200105', end_date='20200201', market="INE", index_bar=False)
print(get_futures_daily_df)
get_dce_daily_df = get_dce_daily(date="20210427")
print(get_dce_daily_df)
get_cffex_daily_df = get_cffex_daily(date="20101101")
print(get_cffex_daily_df)
get_ine_daily_df = get_ine_daily(date="20210426")
print(get_ine_daily_df)
get_czce_daily_df = get_czce_daily(date="20210416")
print(get_czce_daily_df)
get_shfe_daily_df = get_shfe_daily(date="20160104")
print(get_shfe_daily_df)
| [
"[email protected]"
] | |
55366e829303d9641699634cc44569e2d2923a81 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/bubble_20200722092444.py | 5beeb7c609804901133743187b10d997aa2b27bc | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | def bubble(data):
# TODO: start with the array leneght and decrement each time
for i in range(len()) | [
"[email protected]"
] | |
e3609abff0757e33f926d3770d5d1e1febb9b6ab | b007d88e6726452ffa8fe80300614f311ae5b318 | /LeetCode/facebook/arrays_and_strings/merge_sorted_array.py | 3713e9ba76233fd84aca0a38b42724a3074bffa4 | [] | no_license | jinurajan/Datastructures | ec332b12b8395f42cb769e771da3642f25ba7e7f | 647fea5d2c8122468a1c018c6829b1c08717d86a | refs/heads/master | 2023-07-06T14:42:55.168795 | 2023-07-04T13:23:22 | 2023-07-04T13:23:22 | 76,943,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | """
Merge Sorted Array
Given two sorted integer arrays nums1 and nums2, merge nums2 into nums1 as one sorted array.
The number of elements initialized in nums1 and nums2 are m and n respectively. You may assume that nums1 has a size equal to m + n such that it has enough space to hold additional elements from nums2.
Input: nums1 = [1,2,3,0,0,0], m = 3, nums2 = [2,5,6], n = 3
Output: [1,2,2,3,5,6]
Input: nums1 = [1], m = 1, nums2 = [], n = 0
Output: [1]
Constraints:
nums1.length == m + n
nums2.length == n
0 <= m, n <= 200
1 <= m + n <= 200
-109 <= nums1[i], nums2[i] <= 109
"""
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
while m-1 >=0 and n-1 >= 0:
if nums1[m-1] > nums2[n-1]:
nums1[m+n-1] = nums1[m-1]
m -= 1
else:
nums1[m+n-1] = nums2[n-1]
n -= 1
if m - 1 < 0:
# nums 1 is empty
nums1[:n] = nums2[:n]
| [
"[email protected]"
] | |
9b97c3e7ea9e41ffbc45b8c498367f691295fa0e | 58cd392c642ac9408349f03dc72927db6abcce55 | /team2/src/Without_Doubt_Project/venv/lib/python3.6/site-packages/earlgrey/patterns/rpc/client_async.py | a94df1e4d861d470e4928d8f5d3ed96175e519b9 | [] | no_license | icon-hackathons/201902-dapp-competition-bu | 161226eb792425078351c790b8795a0fe5550735 | f3898d31a20f0a85637f150d6187285514528d53 | refs/heads/master | 2020-04-24T07:48:18.891646 | 2019-04-18T01:47:21 | 2019-04-18T01:47:21 | 171,809,810 | 3 | 11 | null | 2019-04-18T01:47:23 | 2019-02-21T06:01:04 | Python | UTF-8 | Python | false | false | 4,824 | py | # Copyright 2017 theloop Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The original codes exist in aio_pika.patterns.rpc
import asyncio
import logging
import time
from concurrent import futures
from aio_pika.exchange import ExchangeType
from aio_pika.channel import Channel
from aio_pika.exceptions import UnroutableError
from aio_pika.message import Message, IncomingMessage, DeliveryMode, ReturnedMessage
from aio_pika.tools import create_future
from aio_pika.patterns.base import Base
class ClientAsync(Base):
DLX_NAME = 'rpc.dlx'
def __init__(self, channel: Channel, queue_name):
self.channel = channel
self.queue_name = queue_name
self.queue = None
self.result_queue = None
self.async_futures = {}
self.concurrent_futures = {}
self.func_names = {}
self.routes = {}
self.dlx_exchange = None
@asyncio.coroutine
def initialize_exchange(self):
self.dlx_exchange = yield from self.channel.declare_exchange(
self.DLX_NAME,
type=ExchangeType.HEADERS,
auto_delete=True,
)
@asyncio.coroutine
def initialize_queue(self, **kwargs):
arguments = kwargs.pop('arguments', {}).update({
'x-dead-letter-exchange': self.DLX_NAME,
})
kwargs['arguments'] = arguments
self.queue = yield from self.channel.declare_queue(name=self.queue_name, **kwargs)
self.result_queue = yield from self.channel.declare_queue(None, exclusive=True, auto_delete=True)
yield from self.result_queue.bind(
self.dlx_exchange, "",
arguments={
"From": self.result_queue.name,
'x-match': 'any',
}
)
yield from self.result_queue.consume(
self._on_result_message, no_ack=True
)
self.channel.add_on_return_callback(self._on_message_returned)
def _on_message_returned(self, message: ReturnedMessage):
correlation_id = int(message.correlation_id) if message.correlation_id else None
future = self.async_futures.pop(correlation_id, None) or self.concurrent_futures.pop(correlation_id, None)
if future and future.done():
logging.warning("Unknown message was returned: %r", message)
else:
future.set_exception(UnroutableError([message]))
@asyncio.coroutine
def _on_result_message(self, message: IncomingMessage):
correlation_id = int(message.correlation_id) if message.correlation_id else None
try:
future = self.async_futures[correlation_id] # type: asyncio.Future
except KeyError:
pass
else:
payload = self.deserialize(message.body)
if message.type == 'result':
future.set_result(payload)
elif message.type == 'error':
future.set_exception(payload)
elif message.type == 'call':
future.set_exception(asyncio.TimeoutError("Message timed-out", message))
else:
future.set_exception(RuntimeError("Unknown message type %r" % message.type))
@asyncio.coroutine
def call(self, func_name, kwargs: dict=None, *, expiration: int=None,
priority: int=128, delivery_mode: DeliveryMode=DeliveryMode.NOT_PERSISTENT):
future = self._create_future()
message = Message(
body=self.serialize(kwargs or {}),
type='call',
timestamp=time.time(),
expiration=expiration,
priority=priority,
correlation_id=id(future),
delivery_mode=delivery_mode,
reply_to=self.result_queue.name,
headers={
'From': self.result_queue.name,
'FuncName': func_name
},
)
yield from self.channel.default_exchange.publish(
message, routing_key=self.queue_name, mandatory=True
)
return (yield from future)
def _create_future(self) -> asyncio.Future:
future = create_future(loop=self.channel.loop)
future_id = id(future)
self.async_futures[future_id] = future
future.add_done_callback(lambda f: self.async_futures.pop(future_id, None))
return future
| [
"[email protected]"
] | |
62cbc2bc1f0553ebc160c9187f62311a93b9fbf8 | 11ddf56093b5a821a080249f6fc2e50e34f8970d | /opennsa/backends/dud.py | ad776c4f171d702c54f71fa839ea79587c1479fa | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | igable/opennsa | 067d0043d324e32cebb6967023fbcc0df9ec836c | 5db943d50310345d18113dbfbe2251bb2a1a63f0 | refs/heads/master | 2021-01-15T08:36:33.998695 | 2014-02-11T11:49:53 | 2014-02-13T13:28:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | """
NRM backends which just logs actions performed.
Author: Henrik Thostrup Jensen <[email protected]>
Copyright: NORDUnet (2011)
"""
import string
import random
from twisted.python import log
from twisted.internet import defer
from opennsa.backends.common import genericbackend
def DUDNSIBackend(network_name, network_topology, parent_requester, port_map, configuration):
name = 'DUD NRM %s' % network_name
cm = DUDConnectionManager(name, port_map)
return genericbackend.GenericBackend(network_name, network_topology, cm, parent_requester, name)
class DUDConnectionManager:
def __init__(self, log_system, port_map):
self.log_system = log_system
self.port_map = port_map
def getResource(self, port, label_type, label_value):
return self.port_map[port]
def getTarget(self, port, label_type, label_value):
return self.port_map[port] + '#' + label_value
def createConnectionId(self, source_target, dest_target):
return 'DUD-' + ''.join( [ random.choice(string.hexdigits[:16]) for _ in range(8) ] )
def canSwapLabel(self, label_type):
#return True
return False
def setupLink(self, connection_id, source_target, dest_target, bandwidth):
log.msg('Link %s -> %s up' % (source_target, dest_target), system=self.log_system)
return defer.succeed(None)
#from opennsa import error
#return defer.fail(error.InternalNRMError('Link setup failed'))
def teardownLink(self, connection_id, source_target, dest_target, bandwidth):
log.msg('Link %s -> %s down' % (source_target, dest_target), system=self.log_system)
return defer.succeed(None)
#from opennsa import error
#return defer.fail(error.InternalNRMError('Link teardown failed'))
| [
"[email protected]"
] | |
b4490bd15f0a948e14d1137f93d740488ef938cb | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/coverage-big-2264.py | cd5238d534bb618b3c7d6f7cfcdb66b9da278905 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,351 | py | count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > $Exp:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"[email protected]"
] | |
1f2eeaf30554fc2b63cc3c041524f636236bf967 | 2e927b6e4fbb4347f1753f80e9d43c7d01b9cba5 | /Section 20 - Lamba Functions/lambdas.py | 2d5e9687c5496af858166dab6f7b8ab7af2a3de4 | [] | no_license | tielushko/The-Modern-Python-3-Bootcamp | ec3d60d1f2e887d693efec1385a6dbcec4aa8b9a | 17b3156f256275fdba204d514d914731f7038ea5 | refs/heads/master | 2023-01-22T01:04:31.918693 | 2020-12-04T02:41:29 | 2020-12-04T02:41:29 | 262,424,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | def square(num): return num*num
#syntax - lambda parameter: code in one line without return - you usually don't store that in variable
square2 = lambda num: num * num
add = lambda num: num + num
print(square2(3))
print(square2.__name__)
#use case - passing in a function as a parameter into another function. short and sweet, single expression
#map - accepts at least two arguments, a function and an "iterable"
""" It runs the lambda for each value in the iterable and returns a map object that can be converted into a different data structure"""
nums = [2,4,6,8,10]
doubles = map(lambda x: x*2, nums) # returns map object
people = ["Darcy", 'Anabel', 'Dana', 'Christina']
peeps = map(lambda name: name.upper(), people)
print(list(peeps))
#Exercise 1: Decrement each element in list by one
def decrement_list(l_for_list):
return list(map(lambda x: x - 1, l_for_list))
#filter - there is a lambda for each value in the iterable
"""
returns filter object, contains only objects that return true to lambda
"""
l = [1,2,3,4]
evens = list(filter(lambda x: x % 2 == 0, l))
print(evens) | [
"[email protected]"
] | |
575abf06c7f23652b3a2ac3b595778e90ab5e287 | 663c108dca9c4a30b7dfdc825a8f147ba873da52 | /venv/functions/38GlobalKeywordDeclareAssignError.py | 1dd186e5658999379d9e336cc39512ba1c6f8a73 | [] | no_license | ksrntheja/08-Python-Core | 54c5a1e6e42548c10914f747ef64e61335e5f428 | b5fe25eead8a0fcbab0757b118d15eba09b891ba | refs/heads/master | 2022-10-02T04:11:07.845269 | 2020-06-02T15:23:18 | 2020-06-02T15:23:18 | 261,644,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | def f01():
global a = 10
print(a)
def f02():
print(a)
f01()
f02()
# File "/Code/venv/functions/38GlobalKeywordDeclareAssignError", line <>
# global a = 10
# ^
# SyntaxError: invalid syntax
| [
"[email protected]"
] | |
4ddb5fd6cb5cb9f52754f3743df130f969867b20 | c2f92d75d235ff5ed7b213c02c4a0657545ba02f | /newchama_web/2/newchama/member_message/migrations/0006_auto__chg_field_message_add_time__add_field_favorites_receiver__add_fi.py | 6ac54ad252be403a32024e11e3ce0f1817ed4167 | [] | no_license | cash2one/tstpthon | fab6112691eb15a8a26bd168af3f179913e0c4e0 | fc5c42c024065c7b42bea2b9de1e3874a794a30d | refs/heads/master | 2021-01-20T01:52:06.519021 | 2017-04-14T09:50:55 | 2017-04-14T09:50:55 | 89,338,193 | 0 | 1 | null | 2017-04-25T08:46:06 | 2017-04-25T08:46:06 | null | UTF-8 | Python | false | false | 39,290 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Message.add_time'
db.alter_column('member_message', 'add_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
# Adding field 'Favorites.receiver'
def backwards(self, orm):
# Changing field 'Message.add_time'
db.alter_column('member_message', 'add_time', self.gf('django.db.models.fields.DateTimeField')())
# Deleting field 'Favorites.company'
db.delete_column('member_favorites', 'company_id')
# Deleting field 'Favorites.news'
db.delete_column('member_favorites', 'news_id')
# Deleting field 'Favorites.data'
db.delete_column('member_favorites', 'data_id')
# Changing field 'Favorites.add_time'
db.alter_column('member_favorites', 'add_time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True))
models = {
u'adminuser.adminuser': {
'Meta': {'object_name': 'AdminUser'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isactive': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'realname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'role': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['adminuser.Role']", 'symmetrical': 'False'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'adminuser.role': {
'Meta': {'object_name': 'Role'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'permission': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'area.city': {
'Meta': {'object_name': 'City'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name_cn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'province': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['area.Province']"})
},
u'area.continent': {
'Meta': {'object_name': 'Continent'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name_cn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'area.country': {
'Meta': {'object_name': 'Country'},
'continent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['area.Continent']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intel_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'name_cn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sort': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'area.province': {
'Meta': {'object_name': 'Province'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['area.Country']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name_cn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sort': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'deal.deal': {
'Meta': {'object_name': 'Deal', 'db_table': "'cvsource_deals'"},
'add_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 29, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '6'}),
'amount_usd': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'city_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'cv1': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cv2': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cv3': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'deal_type': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'equity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'happen_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'market_value': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'province_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'target_company': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'demand.demand': {
'Meta': {'object_name': 'Demand'},
'add_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'audit_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'business_cn': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'business_en': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'company_cities': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['area.City']", 'null': 'True', 'blank': 'True'}),
'company_countries': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['area.Country']", 'null': 'True', 'blank': 'True'}),
'company_industries': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'demand_target_company_industries'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['industry.Industry']"}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'company_provinces': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['area.Province']", 'null': 'True', 'blank': 'True'}),
'company_stock_symbol': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'company_symbol': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'currency_type_financial': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'deal_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'deal_size_enter': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'ebitda': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'employees_count_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'expected_enterprice_value': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'expected_enterprice_value_enter': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'expire_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'financial_audit_company_is_must_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'financial_audit_company_name': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True'}),
'financial_is_must_audit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'financial_year': ('django.db.models.fields.IntegerField', [], {'default': '2015'}),
'growth_three_year': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2'}),
'has_attach': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'income': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'income_enter': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'income_last_phase': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'income_last_phase_enter': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'integrity': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'integrity_en': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'intro_cn': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'intro_en': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_list_company': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_recommend': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_suitor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_demand_publisher'", 'to': u"orm['member.Member']"}),
'name_cn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_project_cn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_project_en': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'net_assets': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pay_currency': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pay_way': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'process': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'profit': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'profit_enter': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'profit_last_phase': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'profit_last_phase_enter': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'project_relation': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'project_stage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pv': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'registered_capital': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'remark_cn': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'remark_en': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'service_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'stock_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'stock_structure_percentage_type_institutional': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'stock_structure_percentage_type_management': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'stock_structure_percentage_type_private': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'target_companies': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'project_target_companies'", 'symmetrical': 'False', 'to': u"orm['member.Company']"}),
'target_industries': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'demand_push_target_industries'", 'symmetrical': 'False', 'to': u"orm['industry.Industry']"}),
'target_members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'demand_push_target_members'", 'symmetrical': 'False', 'to': u"orm['member.Member']"}),
'total_assets': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'total_assets_last_phase': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'total_profit': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 29, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'valid_day': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'industry.industry': {
'Meta': {'object_name': 'Industry'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'father': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['industry.Industry']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_display': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name_cn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'member.company': {
'Meta': {'object_name': 'Company'},
'add_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'address_cn': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_en': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'capital_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['area.City']", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['area.Country']", 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'found_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'industry': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['industry.Industry']", 'null': 'True', 'blank': 'True'}),
'intro_cn': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'intro_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'intro_file': ('django.db.models.fields.files.FileField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'investment_experience_cn': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'investment_experience_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'memo': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name_cn': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'parent_company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['member.Company']", 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'province': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['area.Province']", 'null': 'True', 'blank': 'True'}),
'short_name_cn': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'short_name_en': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tel': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 29, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'member.member': {
'Meta': {'object_name': 'Member'},
'activecode': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'add_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'default': "'default.jpg'", 'max_length': '100', 'blank': 'True'}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['member.Company']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'creator'", 'null': 'True', 'to': u"orm['adminuser.AdminUser']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'expire_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'focus_aspect': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['industry.Industry']", 'symmetrical': 'False'}),
'gender': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro_cn': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'intro_en': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'invite_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'invite_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['member.Member']", 'null': 'True', 'blank': 'True'}),
'last_login_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'linkedin': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'linkedin'", 'null': 'True', 'to': u"orm['member.OtherLogin']"}),
'login_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'mobile_intel': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owner'", 'null': 'True', 'to': u"orm['adminuser.AdminUser']"}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'position_cn': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'position_en': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tel': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'tel_intel': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'weibo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'weibo'", 'null': 'True', 'to': u"orm['member.OtherLogin']"}),
'weixin': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'weixin'", 'null': 'True', 'to': u"orm['member.OtherLogin']"})
},
u'member.otherlogin': {
'Meta': {'object_name': 'OtherLogin', 'db_table': "'other_login'"},
'access_token': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'add_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expires_in': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'login_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'uid': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
},
u'member_message.favorites': {
'Meta': {'object_name': 'Favorites', 'db_table': "'member_favorites'"},
'add_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['member.Company']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['deal.Deal']", 'null': 'True', 'blank': 'True'}),
'demand': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['demand.Demand']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['member.Member']"}),
'news': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['news.News']", 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['project.Project']", 'null': 'True', 'blank': 'True'}),
'receiver': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'member_receiver_favorite'", 'null': 'True', 'to': u"orm['member.Member']"}),
'type_relation': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'member_message.message': {
'Meta': {'object_name': 'Message', 'db_table': "'member_message'"},
'add_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'demand': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['demand.Demand']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_delete': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_read': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['project.Project']", 'null': 'True', 'blank': 'True'}),
'receiver': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_message_receiver'", 'to': u"orm['member.Member']"}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_message_sender'", 'to': u"orm['member.Member']"}),
'type_relation': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'news.news': {
'Meta': {'object_name': 'News', 'db_table': "'cvsource_news'"},
'add_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'project.project': {
'Meta': {'object_name': 'Project'},
'add_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'audit_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'audit_status_2': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'audit_status_3': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'company_cities': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['area.City']", 'null': 'True', 'blank': 'True'}),
'company_country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['area.Country']", 'null': 'True', 'blank': 'True'}),
'company_industry': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'project_company_industry'", 'null': 'True', 'to': u"orm['industry.Industry']"}),
'company_industry_intro_cn': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'company_industry_intro_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'company_intro_cn': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'company_intro_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'company_name_cn': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'company_name_en': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'company_province': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['area.Province']", 'null': 'True', 'blank': 'True'}),
'company_stock_exchange': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['repository.StockExchange']", 'null': 'True', 'blank': 'True'}),
'company_stock_symbol': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'company_symbol_cn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'company_symbol_en': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'currency_type_financial': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'currency_type_service': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cv1': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cv2': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cv3': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'deal_size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ebitda': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'ebitda_2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'ebitda_3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'employees_count_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'expected_enterprice_value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'expire_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'features_cn': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'features_en': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'financial_audit_company_is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'financial_audit_company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'financial_is_audit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'financial_year': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'growth_three_year': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2'}),
'has_attach': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'income': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'income_last_phase': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'income_last_phase_2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'income_last_phase_3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'income_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'integrity': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'integrity_en': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'intro_cn': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'intro_en': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'is_agent_project': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_follow': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_list_company': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_recommend': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_suitor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lock_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_publisher'", 'to': u"orm['member.Member']"}),
'name_cn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pay_currency': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'price_max': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'price_min': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'process': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'profit': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'profit_last_phase': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'profit_last_phase_2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'profit_last_phase_3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'profit_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'project_relation': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'project_stage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pv': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'registered_capital': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'service_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'stock_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'target_companies': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['member.Company']", 'symmetrical': 'False'}),
'target_industries': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['industry.Industry']", 'symmetrical': 'False'}),
'target_members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['member.Member']", 'symmetrical': 'False'}),
'total_assets_last_phase': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'total_assets_last_phase_2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'total_assets_last_phase_3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'total_profit_last_phase': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'total_profit_last_phase_2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'total_profit_last_phase_3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 29, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'upload_file': ('django.db.models.fields.files.FileField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'valid_day': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'repository.stockexchange': {
'Meta': {'object_name': 'StockExchange'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name_cn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'short_name_cn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'short_name_en': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['member_message'] | [
"[email protected]"
] | |
2415a47b98f58149d4981c714498f9eb67383b11 | 2baeb9965f64214e5a1478ab5139c53bb363ff50 | /torch/fx/experimental/fx_acc/acc_ops.py | d4927d60e60152bd46115b814696ec9035e576b0 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | Dithn/pytorch | b327d9a7434a0dbadb7bbf148b22cef7d2e4bc1c | 86399d8e0cb72dfd1501fb9be870ac29af38e241 | refs/heads/master | 2023-04-26T12:01:26.043128 | 2021-11-16T19:54:14 | 2021-11-16T19:55:40 | 207,961,157 | 1 | 0 | NOASSERTION | 2021-11-17T02:14:51 | 2019-09-12T03:58:02 | C++ | UTF-8 | Python | false | false | 52,212 | py | # encoding: utf-8
import operator
import torch # isort:skip
from typing import Sequence, Optional, List, cast
import torch.fx.experimental.fx_acc.acc_utils as acc_utils
import torch.nn as nn
from torch.fx.experimental.fx_acc.acc_normalizer import (
register_acc_op,
register_acc_op_mapping,
register_custom_acc_mapper_fn,
)
from torch.fx.experimental.fx_acc.acc_op_properties import (
AccOpProperty,
register_acc_op_properties,
)
from torch.fx.passes.shape_prop import _extract_tensor_metadata
this_arg_is_optional = True
move_to_qparams = True
dont_move_to_qparams = False
@register_acc_op_mapping(op_and_target=("call_function", nn.functional.linear))
@register_acc_op
def linear(*, input, weight, bias):
return nn.functional.linear(**locals())
@register_acc_op_properties(AccOpProperty.quantized)
@register_acc_op
def quantized_linear(*, input, weight, bias, acc_out_ty):
qparams = acc_utils.get_field_from_acc_out_ty(acc_out_ty, "qparams")
return nn.quantized.functional.linear(
input,
weight,
bias,
qparams["scale"],
qparams["zero_point"],
)
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op_mapping(
op_and_target=("call_method", "flatten"),
arg_replacement_tuples=[
("input", "input"),
("start_dim", "start_dim", this_arg_is_optional),
("end_dim", "end_dim", this_arg_is_optional),
],
)
@register_acc_op_mapping(op_and_target=("call_function", torch.flatten))
@register_acc_op
def flatten(*, input, start_dim=0, end_dim=-1):
return torch.flatten(**locals())
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op_mapping(
op_and_target=("call_method", "squeeze"),
arg_replacement_tuples=[
("input", "input"),
("dim", "dim", this_arg_is_optional),
],
)
@register_acc_op_mapping(
op_and_target=("call_function", torch.squeeze),
arg_replacement_tuples=[
("input", "input"),
("dim", "dim", this_arg_is_optional),
],
)
@register_acc_op
def squeeze(*, input, dim=None):
if dim is None:
return input.squeeze()
return input.squeeze(dim=dim)
@register_acc_op_mapping(op_and_target=("call_function", nn.functional.max_pool2d))
@register_acc_op
def max_pool2d(
*, input, kernel_size, stride, padding, dilation, ceil_mode, return_indices
):
return nn.functional.max_pool2d(**locals())
@register_acc_op_mapping(
op_and_target=("call_function", nn.functional.adaptive_avg_pool2d)
)
@register_acc_op
def adaptive_avg_pool2d(*, input, output_size):
return nn.functional.adaptive_avg_pool2d(**locals())
@register_acc_op_mapping(op_and_target=("call_function", nn.functional.avg_pool2d))
@register_acc_op
def avg_pool2d(
*,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override,
):
return nn.functional.avg_pool2d(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.sign))
@register_acc_op
def sign(*, input):
return torch.sign(input)
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op
def size(*, input):
return input.size()
@register_custom_acc_mapper_fn(
op_and_target=("call_function", getattr),
arg_replacement_tuples=[],
)
def custom_getattr_mapper(node: torch.fx.Node, _: nn.Module) -> torch.fx.Node:
"""
Custom function for mapping a call_function getattr to other ops. Currently only
supports loading a getattr called on a torch.Tensor with attr name "shape", which is
supported by mapping it to acc_ops.size().
"""
# Have to use args here since getattr forces positional args.
input_obj = node.args[0]
attr_name = node.args[1]
assert isinstance(input_obj, torch.fx.Node)
assert (
input_obj.meta["type"] == torch.Tensor
), f"Expected torch.Tensor type for {input_obj.meta['type']}"
assert (
attr_name == "shape"
), f"Only supporting shape getattr for now, not {attr_name}"
with node.graph.inserting_before(node):
size_node = node.graph.call_function(size, kwargs={"input": input_obj})
size_node.meta = node.meta.copy()
return size_node
@register_custom_acc_mapper_fn(
op_and_target=("call_method", "size"),
arg_replacement_tuples=[
("input", "input"),
("dim", "dim", this_arg_is_optional),
],
)
def tensor_size_mapper(node: torch.fx.Node, _: nn.Module) -> torch.fx.Node:
"""
Mapping from Tensor.size() to acc_ops.size. We map size() to acc_ops.size directly
and map size(dim) to acc_ops.size + acc_ops.getitem.
"""
with node.graph.inserting_before(node):
size_node = node.graph.call_function(
size, kwargs={"input": node.kwargs["input"]}
)
if "dim" not in node.kwargs:
size_node.meta = node.meta.copy()
return size_node
size_node.meta["type"] = torch.Size
getitem_node = node.graph.call_function(
getitem, kwargs={"input": size_node, "idx": node.kwargs["dim"]}
)
getitem_node.meta = node.meta.copy()
return getitem_node
@register_acc_op_properties(AccOpProperty.pointwise)
@register_acc_op_mapping(op_and_target=("call_function", operator.add))
@register_acc_op_mapping(op_and_target=("call_method", "add"))
@register_acc_op
def add(*, input, other):
return input + other
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_method", "unsqueeze"))
@register_acc_op_mapping(op_and_target=("call_function", torch.unsqueeze))
@register_acc_op
def unsqueeze(*, input, dim):
return torch.unsqueeze(**locals())
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_method", "tile"))
@register_acc_op_mapping(op_and_target=("call_function", torch.tile))
@register_acc_op
def tile(*, input, dims):
return torch.tile(**locals())
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.stack),
arg_replacement_tuples=[
("tensors", "tensors"),
("dim", "dim"),
],
)
def stack_mapper(node: torch.fx.Node, _: nn.Module) -> torch.fx.Node:
"""
Map torch.stack to unsqueeze + cat.
"""
with node.graph.inserting_before(node):
inputs = node.kwargs["tensors"]
unsqueeze_nodes = []
assert isinstance(inputs, Sequence)
for i, t in enumerate(inputs):
new_node = node.graph.create_node(
"call_function",
unsqueeze,
kwargs={"input": t, "dim": node.kwargs["dim"]},
name=f"{node.name}_unsqueeze_{i}",
)
new_node.meta["type"] = torch.Tensor
unsqueeze_nodes.append(new_node)
cat_node = node.graph.create_node(
"call_function",
cat,
kwargs={"tensors": unsqueeze_nodes, "dim": node.kwargs["dim"]},
)
cat_node.meta = node.meta.copy()
return cat_node
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.clamp))
@register_acc_op_mapping(op_and_target=("call_method", "clamp"))
@register_acc_op
def clamp(*, input, min=None, max=None):
return torch.clamp(**locals())
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.cat))
@register_acc_op
def cat(*, tensors, dim):
return torch.cat(**locals())
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.transpose),
arg_replacement_tuples=[
("input", "input"),
("dim0", "dim0"),
("dim1", "dim1"),
],
)
@register_custom_acc_mapper_fn(
op_and_target=("call_method", "transpose"),
arg_replacement_tuples=[
("input", "input"),
("dim0", "dim0"),
("dim1", "dim1"),
],
)
def transpose_mapper(node: torch.fx.Node, _: nn.Module) -> torch.fx.Node:
# Get the dim-permutation/shuffle
shape_as_list = node.meta["tensor_meta"].shape
ranks = len(shape_as_list)
shuffle = list(i for i in range(ranks))
dim0 = cast(int, node.kwargs["dim0"])
dim1 = cast(int, node.kwargs["dim1"])
shuffle[dim0] = dim1
shuffle[dim1] = dim0
# Create the new acc_ops.permute node. Update all uses of the transpose
# node and then delete the transpose node.
with node.graph.inserting_after(node):
permute_node = node.graph.call_function(
the_function=permute,
kwargs={
"input": node.kwargs.get("input"),
"permutation": shuffle,
},
)
permute_node.meta = node.meta.copy()
node.replace_all_uses_with(permute_node)
permute_node.graph.erase_node(node)
return permute_node
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_method", "contiguous"))
@register_acc_op
def contiguous(*, input):
return input.contiguous()
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.nn.functional.softmax))
@register_acc_op
def softmax(*, input, dim, dtype):
"""
_stacklevel are ignored here.
"""
return torch.nn.functional.softmax(**locals())
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.addmm),
arg_replacement_tuples=[
("input", "input"),
("mat1", "mat1"),
("mat2", "mat2"),
("beta", "beta"),
("alpha", "alpha"),
],
)
def addmm_mapper(node: torch.fx.Node, _: nn.Module) -> torch.fx.Node:
"""
Mapping from torch.addmm to acc_ops.mm -> acc_ops.add, if alpha or beta is not 1
then we also insert acc_ops.mul to the right place.
"""
with node.graph.inserting_before(node):
mm_kwargs = {"input": node.kwargs["mat1"], "other": node.kwargs["mat2"]}
mm_node = node.graph.create_node(
"call_function", matmul, kwargs=mm_kwargs, name=f"{node.name}_mm"
)
mm_node.meta = node.meta.copy()
if node.kwargs["alpha"] != 1:
mul_kwargs = {"input": mm_node, "other": node.kwargs["alpha"]}
mm_node = node.graph.create_node(
"call_function", mul, kwargs=mul_kwargs, name=f"{mm_node.name}_mul"
)
mm_node.meta = node.meta.copy()
input_node = node.kwargs["input"]
if node.kwargs["beta"] != 1:
mul_kwargs = {"input": input_node, "other": node.kwargs["beta"]}
new_input_node = node.graph.create_node(
"call_function", mul, kwargs=mul_kwargs, name=f"{node.name}_input_mul"
)
assert isinstance(input_node, torch.fx.Node)
new_input_node.meta = input_node.meta.copy()
input_node = new_input_node
add_kwargs = {"input": mm_node, "other": input_node}
add_node = node.graph.create_node(
"call_function", add, kwargs=add_kwargs, name=f"{node.name}_add"
)
add_node.meta = node.meta.copy()
return add_node
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.t),
arg_replacement_tuples=[
("input", "input"),
],
)
@register_custom_acc_mapper_fn(
op_and_target=("call_method", "t"),
arg_replacement_tuples=[
("input", "input"),
],
)
def t_mapper(node: torch.fx.Node, _: nn.Module):
ranks = len(node.meta["tensor_meta"].shape)
shuffle = [1, 0] if (ranks > 1) else [0]
with node.graph.inserting_before(node):
new_node = node.graph.create_node(
"call_function",
permute,
kwargs={"input": node.kwargs["input"], "permutation": shuffle},
)
new_node.meta = node.meta.copy()
return new_node
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op_mapping(
op_and_target=("call_method", "permute"),
arg_replacement_tuples=[
("input", "input"),
("*", "permutation"),
],
)
@register_acc_op
def permute(*, input, permutation):
return input.permute(*permutation)
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.square),
arg_replacement_tuples=[
("input", "input"),
],
)
def square_mapper(node: torch.fx.Node, _: nn.Module) -> torch.fx.Node:
input_node = node.kwargs["input"]
with node.graph.inserting_before(node):
new_node = node.graph.call_function(
mul, kwargs={"input": input_node, "other": input_node}
)
new_node.meta = node.meta.copy()
return new_node
@register_acc_op_mapping(
op_and_target=("call_function", torch.bmm),
arg_replacement_tuples=[
("input", "input"),
("mat2", "other"),
],
)
@register_acc_op_mapping(op_and_target=("call_function", torch.matmul))
@register_acc_op
def matmul(*, input, other):
return torch.matmul(**locals())
@register_custom_acc_mapper_fn(
op_and_target=("call_function", nn.functional.dropout),
arg_replacement_tuples=[("input", "input")],
)
@register_custom_acc_mapper_fn(
op_and_target=("call_method", "detach"), arg_replacement_tuples=[("input", "input")]
)
def dropout_mapper(node: torch.fx.Node, mod: nn.Module):
"""
Remove dropout node and directly map its input to output.
"""
return node.kwargs["input"]
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(
op_and_target=("call_function", nn.functional.hardtanh),
arg_replacement_tuples=[
("input", "input"),
("min_val", "left"),
("max_val", "right"),
],
)
@register_acc_op
def hardtanh(*, input, left=-1.0, right=1.0):
return nn.functional.hardtanh(input, min_val=left, max_val=right)
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", nn.functional.hardsigmoid))
@register_acc_op
def hardsigmoid(*, input):
return nn.functional.hardsigmoid(input)
@register_custom_acc_mapper_fn(
op_and_target=("call_function", nn.functional.hardswish),
arg_replacement_tuples=[
("input", "input"),
],
)
def hardswish_mapper(node: torch.fx.Node, _: nn.Module) -> torch.fx.Node:
input_node = node.kwargs["input"]
with node.graph.inserting_before(node):
new_sigmoid_node = node.graph.call_function(
hardsigmoid, kwargs={"input": input_node}
)
new_sigmoid_node.meta = node.meta.copy()
new_node = node.graph.call_function(
mul, kwargs={"input": new_sigmoid_node, "other": input_node}
)
new_node.meta = node.meta.copy()
return new_node
@register_acc_op_properties(AccOpProperty.quantized)
@register_acc_op_mapping(
op_and_target=("call_function", torch.ops.quantized.add),
arg_replacement_tuples=[
("qa", "input"),
("qb", "other"),
("scale", "scale"),
("zero_point", "zero_point"),
],
kwargs_to_move_to_acc_out_ty=[
("scale", "scale", move_to_qparams),
("zero_point", "zero_point", move_to_qparams),
],
)
@register_acc_op
def quantized_add(*, input, other, acc_out_ty):
qparams = acc_utils.get_field_from_acc_out_ty(acc_out_ty, "qparams")
return torch.ops.quantized.add(
input,
other,
qparams["scale"],
qparams["zero_point"],
)
@register_acc_op_properties(AccOpProperty.quantized)
@register_acc_op_mapping(
op_and_target=("call_function", torch.ops.quantized.mul),
arg_replacement_tuples=[
("qa", "input"),
("qb", "other"),
("scale", "scale"),
("zero_point", "zero_point"),
],
kwargs_to_move_to_acc_out_ty=[
("scale", "scale", move_to_qparams),
("zero_point", "zero_point", move_to_qparams),
],
)
@register_acc_op
def quantized_mul(*, input, other, acc_out_ty):
qparams = acc_utils.get_field_from_acc_out_ty(acc_out_ty, "qparams")
return torch.ops.quantized.mul(
input,
other,
qparams["scale"],
qparams["zero_point"],
)
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_properties(AccOpProperty.quantized)
@register_acc_op_mapping(
op_and_target=("call_function", torch.quantize_per_tensor),
arg_replacement_tuples=[
("input", "input"),
("scale", "scale"),
("zero_point", "zero_point"),
("dtype", "dtype"),
],
kwargs_to_move_to_acc_out_ty=[
("scale", "scale", move_to_qparams),
("zero_point", "zero_point", move_to_qparams),
("dtype", "dtype", dont_move_to_qparams),
],
)
@register_acc_op
def quantize_per_tensor(*, input, acc_out_ty):
qparams = acc_utils.get_field_from_acc_out_ty(acc_out_ty, "qparams")
dtype = acc_utils.get_field_from_acc_out_ty(acc_out_ty, "dtype")
return torch.quantize_per_tensor(
input, qparams["scale"], qparams["zero_point"], dtype
)
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op_mapping(
op_and_target=("call_function", torch.quantize_per_channel),
arg_replacement_tuples=[
("input", "input"),
("scales", "scales"),
("zero_points", "zero_points"),
("axis", "axis"),
("dtype", "dtype"),
],
kwargs_to_move_to_acc_out_ty=[
("scales", "scale", move_to_qparams),
("zero_points", "zero_point", move_to_qparams),
("axis", "axis", move_to_qparams),
("dtype", "dtype", dont_move_to_qparams),
],
)
@register_acc_op
def quantize_per_channel(*, input, acc_out_ty):
qparams = acc_utils.get_field_from_acc_out_ty(acc_out_ty, "qparams")
dtype = acc_utils.get_field_from_acc_out_ty(acc_out_ty, "dtype")
return torch.quantize_per_channel(
input,
torch.tensor(qparams["scale"]),
torch.tensor(qparams["zero_point"]),
qparams["axis"],
dtype,
) # type: ignore[call-overload]
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_method", "dequantize"))
@register_acc_op_mapping(op_and_target=("call_function", torch.dequantize))
@register_acc_op
def dequantize(*, input):
return torch.dequantize(input)
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary, AccOpProperty.quantized)
@register_acc_op
def rescale_quantize_per_tensor(*, input, acc_out_ty):
d = dequantize(input=input)
return quantize_per_tensor(input=d, acc_out_ty=acc_out_ty)
@register_acc_op_properties(AccOpProperty.unary, AccOpProperty.quantized)
@register_acc_op
def rescale_quantize_per_channel(*, input, acc_out_ty):
d = dequantize(input=input)
return quantize_per_channel(input=d, acc_out_ty=acc_out_ty)
@register_acc_op_properties(AccOpProperty.pointwise)
@register_acc_op_mapping(op_and_target=("call_function", operator.sub))
@register_acc_op
def sub(*, input, other):
return input - other
@register_acc_op_properties(AccOpProperty.pointwise)
@register_acc_op_mapping(op_and_target=("call_function", torch.mul))
@register_acc_op_mapping(op_and_target=("call_function", operator.mul))
@register_acc_op_mapping(op_and_target=("call_method", "mul"))
@register_acc_op
def mul(*, input, other):
return input * other
@register_acc_op_properties(AccOpProperty.pointwise)
@register_acc_op_mapping(op_and_target=("call_function", operator.truediv))
@register_acc_op
def div(*, input, other):
return input / other
@register_acc_op_properties(AccOpProperty.pointwise)
@register_acc_op_mapping(op_and_target=("call_function", torch.pow))
@register_acc_op
def pow(*, input, exponent):
return torch.pow(input, exponent)
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", nn.functional.relu))
@register_acc_op_mapping(
op_and_target=("call_function", torch.relu),
arg_replacement_tuples=[("input", "input")],
)
@register_acc_op_mapping(
op_and_target=("call_method", "relu"),
arg_replacement_tuples=[("input", "input")],
)
@register_acc_op
def relu(*, input, inplace=False):
return nn.functional.relu(**locals())
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.log1p),
arg_replacement_tuples=[
("input", "input"),
],
)
def torch_log1p_mapper(node: torch.fx.Node, _: torch.nn.Module) -> torch.fx.Node:
with node.graph.inserting_before(node):
add_kwargs = {"input": node.kwargs["input"], "other": 1.0}
add_node = node.graph.call_function(add, kwargs=add_kwargs)
add_node.meta = node.meta.copy()
log_kwargs = {"input": add_node}
log_node = node.graph.call_function(log, kwargs=log_kwargs)
log_node.meta = node.meta.copy()
return log_node
@register_acc_op_properties(AccOpProperty.unary)
@register_custom_acc_mapper_fn(
op_and_target=("call_method", "sum"),
arg_replacement_tuples=[
("input", "input"),
("dim", "dim", this_arg_is_optional),
("keepdim", "keepdim", this_arg_is_optional),
("dtype", "dtype", this_arg_is_optional),
],
)
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.sum),
arg_replacement_tuples=[
("input", "input"),
("dim", "dim", this_arg_is_optional),
("keepdim", "keepdim", this_arg_is_optional),
("dtype", "dtype", this_arg_is_optional),
],
)
def add_sum_mapper(node: torch.fx.Node, mod: torch.fx.GraphModule) -> torch.fx.Node:
with node.graph.inserting_before(node):
sum_kwargs = dict(node.kwargs)
if "dim" in sum_kwargs and isinstance(sum_kwargs["dim"], int):
sum_kwargs["dim"] = (sum_kwargs["dim"],)
sum_node = node.graph.call_function(sum, kwargs=sum_kwargs)
sum_node.meta = node.meta.copy()
return sum_node
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op
def sum(*, input, dim=None, keepdim=False, dtype=None):
if dim is not None:
return torch.sum(**locals())
else:
return input.sum(dtype=dtype)
@register_custom_acc_mapper_fn(
op_and_target=("call_method", "max"),
arg_replacement_tuples=[
("input", "input"),
(("dim", "other"), "dim_or_other", this_arg_is_optional),
("keepdim", "keepdim", this_arg_is_optional),
],
)
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.max),
arg_replacement_tuples=[
("input", "input"),
(("dim", "other"), "dim_or_other", this_arg_is_optional),
("keepdim", "keepdim", this_arg_is_optional),
],
)
@register_custom_acc_mapper_fn(
op_and_target=("call_method", "min"),
arg_replacement_tuples=[
("input", "input"),
(("dim", "other"), "dim_or_other", this_arg_is_optional),
("keepdim", "keepdim", this_arg_is_optional),
],
)
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.min),
arg_replacement_tuples=[
("input", "input"),
(("dim", "other"), "dim_or_other", this_arg_is_optional),
("keepdim", "keepdim", this_arg_is_optional),
],
)
def add_maximum_minimum_mapper(
node: torch.fx.Node, mod: torch.fx.GraphModule
) -> torch.fx.Node:
# there are effectively three versions of torch.max / torch.min
# full reduce: torch.max(input) -> Tensor
# dimensional reduce: torch.max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
# elementwise: torch.max(input, other, *, out=None) -> Tensor
# the mapper function is remapping for both min and max situations
# this helper function makes the choices available clearer and provides an easier way
# to lookup the right function
def target_map(op, target):
if (op, target) in (("call_method", "max"), ("call_function", torch.max)):
return dict(
full_reduce=max_full_reduce,
dim_reduce=max_dim_reduce,
elementwise=maximum,
)
elif (op, target) in (("call_method", "min"), ("call_function", torch.min)):
return dict(
full_reduce=min_full_reduce,
dim_reduce=min_dim_reduce,
elementwise=minimum,
)
with node.graph.inserting_before(node):
new_targets = target_map(node.op, node.target)
max_kwargs = dict()
max_kwargs["input"] = node.kwargs["input"]
if ("dim_or_other" not in node.kwargs) or (node.kwargs["dim_or_other"] is None):
nt = new_targets["full_reduce"]
max_node = node.graph.call_function(nt, kwargs=max_kwargs)
elif isinstance(node.kwargs["dim_or_other"], int):
nt = new_targets["dim_reduce"]
dim = node.kwargs["dim_or_other"]
max_kwargs["dim"] = dim
max_kwargs["keepdim"] = node.kwargs.get("keepdim", False)
max_node = node.graph.call_function(nt, kwargs=max_kwargs)
else:
other = node.kwargs["dim_or_other"]
assert isinstance(other, torch.fx.Node)
# Lowering path for when provided "other", where we do elem-wise max
nt = new_targets["elementwise"]
max_kwargs["other"] = other
max_node = node.graph.call_function(nt, kwargs=max_kwargs)
max_node.meta = node.meta.copy()
return max_node
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op
def max_full_reduce(*, input):
return torch.max(**locals())
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op
def max_dim_reduce(*, input, dim=None, keepdim=False):
return torch.max(**locals())
@register_acc_op_properties(AccOpProperty.pointwise)
@register_acc_op_mapping(op_and_target=("call_function", torch.maximum))
@register_acc_op_mapping(op_and_target=("call_method", "maximum"))
@register_acc_op
def maximum(*, input, other):
return torch.maximum(**locals())
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op
def min_full_reduce(*, input):
return torch.min(input)
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op
def min_dim_reduce(*, input, dim=None, keepdim=False):
return torch.min(input, dim=dim, keepdim=keepdim)
@register_acc_op_properties(AccOpProperty.pointwise)
@register_acc_op_mapping(op_and_target=("call_function", torch.minimum))
@register_acc_op_mapping(op_and_target=("call_method", "minimum"))
@register_acc_op
def minimum(*, input, other):
return torch.minimum(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.sigmoid))
@register_acc_op_mapping(op_and_target=("call_method", "sigmoid"))
@register_acc_op
def sigmoid(*, input):
return torch.sigmoid(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.sinh))
@register_acc_op
def sinh(*, input):
return torch.sinh(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.cosh))
@register_acc_op
def cosh(*, input):
return torch.cosh(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.tanh))
@register_acc_op_mapping(op_and_target=("call_method", "tanh"))
@register_acc_op
def tanh(*, input):
return torch.tanh(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.asin))
@register_acc_op
def asin(*, input):
return torch.asin(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.acos))
@register_acc_op
def acos(*, input):
return torch.acos(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.atan))
@register_acc_op
def atan(*, input):
return torch.atan(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.exp))
@register_acc_op
def exp(*, input):
return torch.exp(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.log))
@register_acc_op
def log(*, input):
return torch.log(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.sqrt))
@register_acc_op
def sqrt(*, input):
return torch.sqrt(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.reciprocal))
@register_acc_op
def reciprocal(*, input):
return torch.reciprocal(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.abs))
@register_acc_op
def abs(*, input):
return torch.abs(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.neg))
@register_acc_op
def neg(*, input):
return torch.neg(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.floor))
@register_acc_op
def floor(*, input):
return torch.floor(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.ceil))
@register_acc_op
def ceil(*, input):
return torch.ceil(**locals())
@register_acc_op_mapping(op_and_target=("call_function", torch.nn.functional.pad))
@register_acc_op
def pad(*, input, pad, mode, value):
return torch.nn.functional.pad(**locals())
@register_acc_op_mapping(op_and_target=("call_function", torch.conv2d))
@register_acc_op
def conv2d(*, input, weight, bias, stride, padding, dilation, groups):
return nn.functional.conv2d(**locals())
@register_acc_op_properties(AccOpProperty.quantized)
@register_acc_op
def quantized_conv2d(
*,
input,
weight,
bias,
stride,
padding,
dilation,
groups,
padding_mode,
acc_out_ty,
):
qparams = acc_utils.get_field_from_acc_out_ty(acc_out_ty, "qparams")
return torch.nn.quantized.functional.conv2d(
input,
weight,
bias,
stride,
padding,
dilation,
groups,
padding_mode,
qparams["scale"],
qparams["zero_point"],
)
@register_acc_op_mapping(op_and_target=("call_function", nn.functional.batch_norm))
@register_acc_op
def batch_norm(
*, input, running_mean, running_var, weight, bias, training, momentum, eps
):
return nn.functional.batch_norm(**locals())
@register_acc_op_mapping(op_and_target=("call_function", nn.functional.layer_norm))
@register_acc_op
def layer_norm(*, input, normalized_shape, weight, bias, eps):
return nn.functional.layer_norm(**locals())
def argmin_max_mapper_impl(node: torch.fx.Node, largest: bool) -> torch.fx.Node:
"""
Map torch.argmin or torch.argmax to acc_ops.flatten (depend on dim) + acc_ops.topk
+ acc_ops.getitem + acc_ops.squeeze (depends on keepdim).
"""
input_node = node.kwargs["input"]
dim = node.kwargs["dim"]
keepdim = node.kwargs["keepdim"]
if dim is None and keepdim:
raise RuntimeError(
"We currently don't support argmin/argmax with dim=None and keepdim=True"
)
with node.graph.inserting_before(node):
if dim is None:
flatten_kwargs = {
"input": node.kwargs["input"],
"start_dim": 0,
"end_dim": -1,
}
flatten_node = node.graph.call_function(flatten, kwargs=flatten_kwargs)
flatten_node.meta["type"] = torch.Tensor
input_node = flatten_node
dim = -1
topk_kwargs = {
"input": input_node,
"k": 1,
"dim": dim,
"largest": largest,
"sorted": False,
}
topk_node = node.graph.call_function(topk, kwargs=topk_kwargs)
# It's actually more like NamedTuple but tuple here should be fine.
topk_node.meta["type"] = tuple
getitem_kwargs = {"input": topk_node, "idx": 1}
getitem_node = node.graph.call_function(getitem, kwargs=getitem_kwargs)
getitem_node.meta["type"] = torch.Tensor
output_node = getitem_node
if not keepdim:
squeeze_kwargs = {"input": getitem_node, "dim": dim}
output_node = node.graph.call_function(squeeze, kwargs=squeeze_kwargs)
output_node.meta = node.meta.copy()
return output_node
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.argmin),
arg_replacement_tuples=[
("input", "input"),
("dim", "dim"),
("keepdim", "keepdim"),
],
)
def torch_argmin_mapper(node: torch.fx.Node, _: torch.nn.Module) -> torch.fx.Node:
"""
Map torch.argmin to acc_ops.flatten (depend on dim) + acc_ops.topk + acc_ops.getitem
+ acc_ops.squeeze (depends on keepdim).
"""
return argmin_max_mapper_impl(node, largest=False)
@register_acc_op_mapping(op_and_target=("call_function", torch.linalg.norm))
@register_acc_op
def linalg_norm(*, input, ord, dim, keepdim):
return torch.linalg.norm(**locals())
@register_custom_acc_mapper_fn(
op_and_target=("call_method", "split"),
arg_replacement_tuples=[
("tensor", "input"),
("split_size_or_sections", "split_size_or_sections"),
("dim", "dim"),
],
)
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.split),
arg_replacement_tuples=[
("tensor", "input"),
("split_size_or_sections", "split_size_or_sections"),
("dim", "dim"),
],
)
def torch_split_mapper(node: torch.fx.Node, mod: nn.Module) -> torch.fx.Node:
"""
If split_size_or_sections is sections, map the node to slice_tensors
+ tuple_construct. Otherwise, if split_size_or_sections is split_size,
map the node to acc_ops.split.
"""
split_size_or_sections = node.kwargs["split_size_or_sections"]
with node.graph.inserting_before(node):
if isinstance(split_size_or_sections, int):
new_kwargs = {
"input": node.kwargs["input"],
"split_size": split_size_or_sections,
"dim": node.kwargs["dim"],
}
new_node = node.graph.call_function(split, kwargs=new_kwargs)
new_node.meta = node.meta.copy()
return new_node
assert isinstance(split_size_or_sections, Sequence)
start = 0
slice_nodes = []
for i in split_size_or_sections:
assert isinstance(i, int)
new_kwargs = {
"input": node.kwargs["input"],
"dims": (node.kwargs["dim"],),
"starts": (start,),
"stops": (start + i,),
"steps": (1,),
}
new_node = node.graph.call_function(slice_tensor, kwargs=new_kwargs)
new_node.meta["type"] = torch.Tensor
slice_nodes.append(new_node)
start += i
new_node = node.graph.call_function(
tuple_construct, kwargs={"tensors": tuple(slice_nodes)}
)
new_node.meta = node.meta.copy()
return new_node
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op
def split(*, input, split_size, dim):
return torch.split(input, split_size, dim)
@register_acc_op
def tuple_construct(*, tensors):
return tuple(tensors)
@register_acc_op_properties(AccOpProperty.quantized)
@register_acc_op_mapping(
op_and_target=("call_function", torch.ops.quantized.batch_norm2d),
arg_replacement_tuples=[
("input", "input"),
("weight", "weight"),
("bias", "bias"),
("running_mean", "running_mean"),
("running_var", "running_var"),
("eps", "eps"),
("scale", "scale"),
("zero_point", "zero_point"),
],
kwargs_to_move_to_acc_out_ty=[
("scale", "scale", move_to_qparams),
("zero_point", "zero_point", move_to_qparams),
],
)
@register_acc_op
def quantized_batch_norm2d(
*, input, running_mean, running_var, weight, bias, eps, acc_out_ty
):
qparams = acc_utils.get_field_from_acc_out_ty(acc_out_ty, "qparams")
return torch.ops.quantized.batch_norm2d(
input,
weight,
bias,
running_mean,
running_var,
eps,
qparams["scale"],
qparams["zero_point"],
)
@register_acc_op_mapping(op_and_target=("call_function", nn.functional.embedding_bag))
@register_acc_op
def embedding_bag(
*,
input,
weight,
offsets,
max_norm,
norm_type,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
padding_idx,
):
return nn.functional.embedding_bag(**locals())
@register_acc_op_mapping(
op_and_target=(
"call_function",
torch.ops.quantized.embedding_bag_byte_rowwise_offsets,
)
)
@register_acc_op
def embedding_bag_byte_rowwise_offsets(
*,
weight,
indices,
offsets,
scale_grad_by_freq,
mode,
pruned_weights,
per_sample_weights,
compressed_indices_mapping,
include_last_offset,
):
return torch.ops.quantized.embedding_bag_byte_rowwise_offsets(**locals())
@register_acc_op_mapping(
op_and_target=(
"call_function",
torch.ops.quantized.embedding_bag_4bit_rowwise_offsets,
)
)
@register_acc_op
def embedding_bag_4bit_rowwise_offsets(
*,
weight,
indices,
offsets,
scale_grad_by_freq,
mode,
pruned_weights,
per_sample_weights,
compressed_indices_mapping,
include_last_offset,
):
return torch.ops.quantized.embedding_bag_4bit_rowwise_offsets(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.sin))
@register_acc_op
def sin(*, input):
return torch.sin(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.cos))
@register_acc_op
def cos(*, input):
return torch.cos(**locals())
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.tan))
@register_acc_op
def tan(*, input):
return torch.tan(**locals())
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.topk))
@register_acc_op
def topk(*, input, k, dim, largest, sorted):
return torch.topk(**locals())
@register_acc_op_mapping(op_and_target=("call_function", operator.getitem))
@register_acc_op
def getitem(*, input, idx):
return input[idx]
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op
def slice_tensor(*, input, dims, starts, stops, steps):
slices: List[Optional[slice]] = [None for _ in range(input.dim())]
# For all provided dims, extract out a slice for starts/stops/steps.
for idx, dim in enumerate(dims):
slices[dim] = slice(starts[idx], stops[idx], steps[idx])
# For all unspecified dims, default to the full slice.
for idx, s in enumerate(slices):
if s is None:
slices[idx] = slice(None, None, None)
return input[slices]
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.narrow),
arg_replacement_tuples=[
("input", "input"),
("dim", "dim"),
("start", "start"),
("length", "length"),
],
)
@register_custom_acc_mapper_fn(
op_and_target=("call_method", "narrow"),
arg_replacement_tuples=[
("input", "input"),
("dim", "dim"),
("start", "start"),
("length", "length"),
],
)
def custom_narrow_mapper(node: torch.fx.Node, mod: nn.Module) -> torch.fx.Node:
assert isinstance(node.kwargs["start"], int) and isinstance(
node.kwargs["length"], int
)
kwargs = {
"input": node.kwargs["input"],
"dims": (node.kwargs["dim"],),
"starts": (node.kwargs["start"],),
"stops": (node.kwargs["start"] + node.kwargs["length"],),
"steps": (1,),
}
with node.graph.inserting_before(node):
new_node = node.graph.call_function(slice_tensor, kwargs=kwargs)
new_node.meta = node.meta.copy()
return new_node
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op_mapping(
op_and_target=("call_function", torch.reshape),
arg_replacement_tuples=[
("input", "input"),
("shape", "shape"),
],
kwargs_to_move_to_acc_out_ty=[("shape", "shape")],
)
@register_acc_op_mapping(
op_and_target=("call_method", "view"),
arg_replacement_tuples=[
("input", "input"),
("*", "shape"),
],
kwargs_to_move_to_acc_out_ty=[("shape", "shape")],
)
@register_acc_op
def reshape(*, input, acc_out_ty):
return torch.reshape(
input, tuple(acc_utils.get_field_from_acc_out_ty(acc_out_ty, "shape"))
)
@register_custom_acc_mapper_fn(
op_and_target=("call_method", "reshape"),
arg_replacement_tuples=[
("input", "input"),
("*", "shape"),
],
)
def custom_tensor_reshape_mapper(node: torch.fx.Node, _: nn.Module) -> torch.fx.Node:
"""
For Tensor.reshape node, args could be (input, 1, 2, 3) or (input, (1, 2, 3)).
Here we do some special handling with the `shape` arg in order to map it to
acc_ops.reshape. It also handles the case when `shape` is a list instead of
tuple.
"""
input_node = node.kwargs["input"]
shape = node.kwargs["shape"]
assert isinstance(shape, Sequence)
if isinstance(shape[0], (tuple, list)): # type: ignore[index]
shape = shape[0] # type: ignore[index]
with node.graph.inserting_before(node):
new_node = node.graph.call_function(
reshape,
kwargs={
"input": input_node,
"acc_out_ty": acc_utils.build_raw_tensor_meta(shape=shape),
},
)
new_node.meta = node.meta.copy()
return new_node
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op
def to_dtype(input, acc_out_ty):
assert acc_out_ty is not None, "valid acc_out_ty needed"
return input.to(dtype=acc_utils.get_field_from_acc_out_ty(acc_out_ty, "dtype"))
@register_custom_acc_mapper_fn(
op_and_target=("call_method", "to"),
arg_replacement_tuples=[
("input", "input"),
("dtype", "dtype"),
],
)
def custom_tensor_to_mapper(node: torch.fx.Node, _: nn.Module):
dest_dtype = node.kwargs["dtype"]
mem_format = node.kwargs.get("memory_format")
device = node.kwargs.get("device")
assert dest_dtype is not None
assert mem_format is None or mem_format == torch.preserve_format
assert device is None
new_kwargs = {
"input": node.kwargs["input"],
"acc_out_ty": acc_utils.build_raw_tensor_meta(dtype=dest_dtype),
}
with node.graph.inserting_before(node):
new_node = node.graph.create_node(
"call_function", to_dtype, kwargs=new_kwargs, name=node.name
)
new_node.meta = node.meta
return new_node
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.add),
# Note that we may have aliases for inputs here due to issues with deterministically
# knowing the correct target that will be resolved by pytorch.
arg_replacement_tuples=[
(("input", "a"), "input"),
(("other", "b"), "other"),
("alpha", "alpha", this_arg_is_optional),
],
)
def custom_torch_add_mapper(node: torch.fx.Node, mod: nn.Module) -> torch.fx.Node:
"""
Add custom mapping for torch.add because it has an `alpha` parameter which scales
the `other` input, and we want to make that mul a separate node.
"""
with node.graph.inserting_before(node):
# If alpha is in kwargs check if we need to add a mul, and use correct kwargs.
if "alpha" in node.kwargs:
# Add mul node only if it has a numerical impact, i.e. alpha != 1.0.
if node.kwargs["alpha"] != 1.0:
other_node = node.graph.create_node(
"call_function",
mul,
kwargs={
"input": node.kwargs["other"],
"other": node.kwargs["alpha"],
},
name=node.name + "_mul_alpha",
)
other_node.meta = node.meta
else:
other_node = node.kwargs["other"]
add_kwargs = {"input": node.kwargs["input"], "other": other_node}
else:
add_kwargs = node.kwargs
new_node = node.graph.create_node(
"call_function", add, kwargs=add_kwargs, name=node.name
)
new_node.meta = node.meta
return new_node
@register_custom_acc_mapper_fn(
op_and_target=("call_module", nn.quantized.Linear),
arg_replacement_tuples=[
("input", "input"),
],
)
def packed_quantized_linear_mapper(
node: torch.fx.Node, mod: nn.Module
) -> torch.fx.Node:
"""
Mapping from quantized_linear module to acc_op.linear. We unpack weight and bias
in this mapper and pass them directly to linear node.
"""
assert isinstance(node.target, str)
linear_module = dict(mod.named_modules())[node.target]
prefix = node.target.replace(".", "_")
weight_name = f"{prefix}_weight"
bias_name = f"{prefix}_bias"
# Store weight and bias in the main module
mod.register_buffer(weight_name, linear_module.weight())
if linear_module.bias() is not None:
mod.register_buffer(bias_name, linear_module.bias())
with node.graph.inserting_before(node):
# Insert get_attr nodes for weight and bias
get_weight = node.graph.get_attr(weight_name)
get_weight.meta["tensor_meta"] = _extract_tensor_metadata(
linear_module.weight()
)
get_bias = None
if linear_module.bias() is not None:
get_bias = node.graph.get_attr(bias_name)
get_bias.meta["tensor_meta"] = _extract_tensor_metadata(
linear_module.bias()
)
qparams = {"scale": linear_module.scale, "zero_point": linear_module.zero_point}
# Create kwargs for acc_op.quantized_linear
kwargs = {
"input": node.kwargs["input"],
"weight": get_weight,
"bias": get_bias,
"acc_out_ty": acc_utils.build_raw_tensor_meta(qparams=qparams),
}
new_node = node.graph.call_function(quantized_linear, kwargs=kwargs)
new_node.meta = node.meta
return new_node
@register_custom_acc_mapper_fn(
op_and_target=("call_module", nn.quantized.Conv2d),
arg_replacement_tuples=[
("input", "input"),
],
)
def packed_quantized_conv2d_mapper(
node: torch.fx.Node, mod: nn.Module
) -> torch.fx.Node:
"""
Mapping from quantzed Conv2d module to acc_op.conv. We unpack all the parameters
in this mapper and pass them directly to conv2d node.
"""
assert isinstance(node.target, str)
conv_module = dict(mod.named_modules())[node.target]
prefix = node.target.replace(".", "_")
weight_name = f"{prefix}_weight"
bias_name = f"{prefix}_bias"
# Store weight and bias in the main module
mod.register_buffer(weight_name, conv_module.weight())
if conv_module.bias() is not None:
mod.register_buffer(bias_name, conv_module.bias())
with node.graph.inserting_before(node):
# Insert get_attr nodes for weight and bias
get_weight = node.graph.get_attr(weight_name)
get_weight.meta["tensor_meta"] = _extract_tensor_metadata(conv_module.weight())
get_bias = None
if conv_module.bias() is not None:
get_bias = node.graph.get_attr(bias_name)
get_bias.meta["tensor_meta"] = _extract_tensor_metadata(conv_module.bias())
qparams = {"scale": conv_module.scale, "zero_point": conv_module.zero_point}
# Create kwargs for acc_op.conv
kwargs = {
"input": node.kwargs["input"],
"weight": get_weight,
"bias": get_bias,
"stride": conv_module.stride,
"padding": conv_module.padding,
"dilation": conv_module.dilation,
"groups": conv_module.groups,
"padding_mode": conv_module.padding_mode,
"acc_out_ty": acc_utils.build_raw_tensor_meta(qparams=qparams),
}
new_node = node.graph.call_function(quantized_conv2d, kwargs=kwargs)
new_node.meta = node.meta
return new_node
@register_custom_acc_mapper_fn(
op_and_target=("call_function", torch.ops.quantized.add_relu),
arg_replacement_tuples=[
("input", "input"),
("other", "other"),
("scale", "scale"),
("zero_point", "zero_point"),
],
)
def add_relu_unfuse_mapper(
node: torch.fx.Node, mod: torch.fx.GraphModule
) -> torch.fx.Node:
with node.graph.inserting_before(node):
qparams = {
"scale": node.kwargs["scale"],
"zero_point": node.kwargs["zero_point"],
}
add_kwargs = {
"input": node.kwargs["input"],
"other": node.kwargs["other"],
"acc_out_ty": acc_utils.build_raw_tensor_meta(qparams=qparams),
}
add_node = node.graph.call_function(quantized_add, kwargs=add_kwargs)
add_node.meta = node.meta.copy()
relu_node = node.graph.call_function(
relu, kwargs={"input": add_node, "inplace": False}
)
relu_node.meta = node.meta
return relu_node
@register_custom_acc_mapper_fn(
op_and_target=("call_module", nn.intrinsic.quantized.ConvReLU2d),
arg_replacement_tuples=[
("input", "input"),
],
)
def packed_quantized_convrelu2d_mapper(
node: torch.fx.Node, mod: nn.Module
) -> torch.fx.Node:
"""
Mapping from quantized ConvReLU2d module to acc_op.relu. We use packed_quantized_conv2d_mapper to unpack all the parameters
in this mapper and pass the returned conv2d node directly to relu node.
"""
with node.graph.inserting_before(node):
# conv2d op
conv2d_node = packed_quantized_conv2d_mapper(node, mod)
# relu op
relu_node = node.graph.call_function(
relu, kwargs={"input": conv2d_node, "inplace": False}
)
relu_node.meta = node.meta
return relu_node
@register_acc_op_properties(AccOpProperty.pointwise, AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.nn.functional.gelu))
@register_acc_op_mapping(op_and_target=("call_method", "gelu"))
@register_acc_op
def gelu(*, input):
return torch.nn.functional.gelu(**locals())
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.cumsum))
@register_acc_op_mapping(op_and_target=("call_method", "cumsum"))
@register_acc_op
def cumsum(*, input, dim, dtype=None):
return torch.cumsum(**locals())
@register_acc_op_properties(AccOpProperty.unary)
@register_acc_op_mapping(op_and_target=("call_function", torch.chunk))
@register_acc_op_mapping(op_and_target=("call_method", "chunk"))
@register_acc_op
def chunk(*, input, chunks, dim=0):
return torch.chunk(input, chunks, dim)
| [
"[email protected]"
] | |
6a89e67c4330943ffd882ccd41577f252a47d876 | d4abaedd47e5a3ce3e8aa7893cb63faaa4064551 | /spoj/bearseg.py | 0e1ced7689b4dc52b995ffd978e630c5f46fadb7 | [] | no_license | shiv125/Competetive_Programming | fc1a39be10c0588e0222efab8809b966430fe20f | 9c949c6d6b5f83a35d6f5f6a169c493f677f4003 | refs/heads/master | 2020-03-15T19:47:12.944241 | 2018-05-06T08:18:11 | 2018-05-06T08:18:11 | 132,317,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | t=input()
MAX=10**5+1
lookup=[0]*MAX
while t>0:
t-=1
n,p=map(int,raw_input().split())
arr=map(int,raw_input().split())
lookup[0]=arr[0]
for i in range(1,n):
lookup[i]=lookup[i-1]+arr[i]
count={}
ma=-1
for i in range(n):
for j in range(i,n):
if i!=0:
z=(lookup[j]-lookup[i-1]+p)%p
else:
z=lookup[j]%p
ma=max(ma,z)
if z not in count:
count[z]=1
else:
count[z]+=1
print ma,count[ma]
| [
"shivdutt@shivdutt-Lenovo-G50-80"
] | shivdutt@shivdutt-Lenovo-G50-80 |
e57399c656326fe5aad20cd04948bccb429a51df | 274eb3a3c4202c86a40e13d2de7c2d6f2a982fcb | /tests/unit/altimeter/aws/resource/ec2/test_vpc_endpoint_service.py | 6823edb92f519c7b51b496fae25c2ff3d1253997 | [
"MIT",
"Python-2.0"
] | permissive | tableau/altimeter | 6199b8827d193946bb0d0d1e29e462fc8749d3e4 | eb7d5d18f3d177973c4105c21be9d251250ca8d6 | refs/heads/master | 2023-08-15T16:21:31.265590 | 2023-07-04T13:13:32 | 2023-07-04T13:13:32 | 212,153,766 | 75 | 25 | MIT | 2023-08-02T02:05:22 | 2019-10-01T17:10:16 | Python | UTF-8 | Python | false | false | 2,670 | py | import unittest
from altimeter.aws.resource.ec2.vpc_endpoint_service import VpcEndpointServiceResourceSpec
from altimeter.core.resource.resource import Resource
from altimeter.core.graph.links import LinkCollection, SimpleLink, TagLink
class TestVpcEndpointServiceResourceSpec(unittest.TestCase):
maxDiff = None
def test_schema_parse(self):
resource_arn = "arn:aws:ec2:us-west-2:111122223333:vpc-endpoint-service/com.amazonaws.vpce.us-west-2.vpce-svc-01234abcd5678ef01"
aws_resource_dict = {
"ServiceType": [{"ServiceType": "Interface"}],
"ServiceId": "vpce-svc-01234abcd5678ef01",
"ServiceName": "com.amazonaws.vpce.us-west-2.vpce-svc-01234abcd5678ef01",
"ServiceState": "Available",
"AvailabilityZones": ["us-west-2a", "us-west-2b"],
"AcceptanceRequired": True,
"ManagesVpcEndpoints": False,
"NetworkLoadBalancerArns": [
"arn:aws:elasticloadbalancing:us-west-2:111122223333:loadbalancer/net/splunk-hwf-lb/1a7ff9c18eeaaf9b"
],
"BaseEndpointDnsNames": ["vpce-svc-01234abcd5678ef01.us-west-2.vpce.amazonaws.com"],
"PrivateDnsNameConfiguration": {},
"Tags": [{"Key": "Name", "Value": "Splunk HEC"}],
}
link_collection = VpcEndpointServiceResourceSpec.schema.parse(
data=aws_resource_dict, context={"account_id": "111122223333", "region": "us-west-2"}
)
resource = Resource(
resource_id=resource_arn,
type=VpcEndpointServiceResourceSpec.type_name,
link_collection=link_collection,
)
expected_resource = Resource(
resource_id="arn:aws:ec2:us-west-2:111122223333:vpc-endpoint-service/com.amazonaws.vpce.us-west-2.vpce-svc-01234abcd5678ef01",
type="vpc-endpoint-service",
link_collection=LinkCollection(
simple_links=(
SimpleLink(pred="service_type", obj="Interface"),
SimpleLink(
pred="service_name",
obj="com.amazonaws.vpce.us-west-2.vpce-svc-01234abcd5678ef01",
),
SimpleLink(pred="service_state", obj="Available"),
SimpleLink(pred="acceptance_required", obj=True),
SimpleLink(pred="availability_zones", obj="us-west-2a"),
SimpleLink(pred="availability_zones", obj="us-west-2b"),
),
tag_links=(TagLink(pred="Name", obj="Splunk HEC"),),
),
)
self.assertEqual(resource, expected_resource)
| [
"[email protected]"
] | |
9e73235485cb9b7abfd44cdb5134347f883ec5e0 | c31d185fb65da94dc15e16869ef1fbaf0dabc736 | /base/views.py | 0ec041e02c0386a93c8196390322f023f393e73e | [] | no_license | Joacoco/nosotrosusamos | 2777907b3df3b4022770f9ccef554d4c2b2c53c8 | 593d087c303f72136f6c425e908631357298c1eb | refs/heads/master | 2020-12-26T05:01:28.674841 | 2013-12-02T13:37:49 | 2013-12-02T13:37:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | # -*- coding: utf-8 -*-
""" This file contains some generic purpouse views """
from django.shortcuts import render_to_response
from django.template import RequestContext
def index(request):
""" view that renders a default home"""
return render_to_response('index.jade',
context_instance=RequestContext(request))
| [
"[email protected]"
] | |
d0e7793917bd8df51c7afbc5a6fd7696ae7ba824 | 17a3418a6143ea2d953cf6509aeca7cc6e074686 | /Final-Project/backend/venv/lib/python3.5/site-packages/spotdl/spotdl.py | de67a73c1760448f2bb4d525c6580a9f01e7cf5c | [] | no_license | francolmenar-USYD/Internet-Software-Platforms | addb69a5582a63877e5f3408d64485a7ca942721 | 9e82ab6e7d0f8d4b3d55789cf5cfcd8e524a85df | refs/heads/master | 2022-04-22T02:07:25.419086 | 2020-04-22T10:02:43 | 2020-04-22T10:02:43 | 256,714,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,396 | py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from spotdl import __version__
from spotdl import const
from spotdl import handle
from spotdl import metadata
from spotdl import convert
from spotdl import internals
from spotdl import spotify_tools
from spotdl import youtube_tools
from slugify import slugify
import spotipy
import urllib.request
import os
import sys
import time
import platform
import pprint
def check_exists(music_file, raw_song, meta_tags):
""" Check if the input song already exists in the given folder. """
log.debug('Cleaning any temp files and checking '
'if "{}" already exists'.format(music_file))
songs = os.listdir(const.args.folder)
for song in songs:
if song.endswith('.temp'):
os.remove(os.path.join(const.args.folder, song))
continue
# check if a song with the same name is already present in the given folder
if os.path.splitext(song)[0] == music_file:
log.debug('Found an already existing song: "{}"'.format(song))
if internals.is_spotify(raw_song):
# check if the already downloaded song has correct metadata
# if not, remove it and download again without prompt
already_tagged = metadata.compare(os.path.join(const.args.folder, song),
meta_tags)
log.debug('Checking if it is already tagged correctly? {}',
already_tagged)
if not already_tagged:
os.remove(os.path.join(const.args.folder, song))
return False
log.warning('"{}" already exists'.format(song))
if const.args.overwrite == 'prompt':
log.info('"{}" has already been downloaded. '
'Re-download? (y/N): '.format(song))
prompt = input('> ')
if prompt.lower() == 'y':
os.remove(os.path.join(const.args.folder, song))
return False
else:
return True
elif const.args.overwrite == 'force':
os.remove(os.path.join(const.args.folder, song))
log.info('Overwriting "{}"'.format(song))
return False
elif const.args.overwrite == 'skip':
log.info('Skipping "{}"'.format(song))
return True
return False
def download_list(text_file):
""" Download all songs from the list. """
with open(text_file, 'r') as listed:
# read tracks into a list and remove any duplicates
lines = listed.read().splitlines()
lines = list(set(lines))
# ignore blank lines in text_file (if any)
try:
lines.remove('')
except ValueError:
pass
log.info(u'Preparing to download {} songs'.format(len(lines)))
downloaded_songs = []
for number, raw_song in enumerate(lines, 1):
print('')
try:
download_single(raw_song, number=number)
# token expires after 1 hour
except spotipy.client.SpotifyException:
# refresh token when it expires
log.debug('Token expired, generating new one and authorizing')
new_token = spotify_tools.generate_token()
spotify_tools.spotify = spotipy.Spotify(auth=new_token)
download_single(raw_song, number=number)
# detect network problems
except (urllib.request.URLError, TypeError, IOError):
lines.append(raw_song)
# remove the downloaded song from file
internals.trim_song(text_file)
# and append it at the end of file
with open(text_file, 'a') as myfile:
myfile.write(raw_song + '\n')
log.warning('Failed to download song. Will retry after other songs\n')
# wait 0.5 sec to avoid infinite looping
time.sleep(0.5)
continue
downloaded_songs.append(raw_song)
log.debug('Removing downloaded song from text file')
internals.trim_song(text_file)
return downloaded_songs
def download_single(raw_song, number=None):
""" Logic behind downloading a song. """
if internals.is_youtube(raw_song):
log.debug('Input song is a YouTube URL')
content = youtube_tools.go_pafy(raw_song, meta_tags=None)
raw_song = slugify(content.title).replace('-', ' ')
meta_tags = spotify_tools.generate_metadata(raw_song)
else:
meta_tags = spotify_tools.generate_metadata(raw_song)
content = youtube_tools.go_pafy(raw_song, meta_tags)
if content is None:
log.debug('Found no matching video')
return
if const.args.download_only_metadata and meta_tags is None:
log.info('Found no metadata. Skipping the download')
return
# "[number]. [artist] - [song]" if downloading from list
# otherwise "[artist] - [song]"
youtube_title = youtube_tools.get_youtube_title(content, number)
log.info('{} ({})'.format(youtube_title, content.watchv_url))
# generate file name of the song to download
songname = content.title
if meta_tags is not None:
refined_songname = internals.format_string(const.args.file_format,
meta_tags,
slugification=True)
log.debug('Refining songname from "{0}" to "{1}"'.format(songname, refined_songname))
if not refined_songname == ' - ':
songname = refined_songname
else:
log.warning('Could not find metadata')
songname = internals.sanitize_title(songname)
if const.args.dry_run:
return
if not check_exists(songname, raw_song, meta_tags):
# deal with file formats containing slashes to non-existent directories
songpath = os.path.join(const.args.folder, os.path.dirname(songname))
os.makedirs(songpath, exist_ok=True)
input_song = songname + const.args.input_ext
output_song = songname + const.args.output_ext
if youtube_tools.download_song(input_song, content):
print('')
try:
convert.song(input_song, output_song, const.args.folder,
avconv=const.args.avconv, trim_silence=const.args.trim_silence)
except FileNotFoundError:
encoder = 'avconv' if const.args.avconv else 'ffmpeg'
log.warning('Could not find {0}, skipping conversion'.format(encoder))
const.args.output_ext = const.args.input_ext
output_song = songname + const.args.output_ext
if not const.args.input_ext == const.args.output_ext:
os.remove(os.path.join(const.args.folder, input_song))
if not const.args.no_metadata and meta_tags is not None:
metadata.embed(os.path.join(const.args.folder, output_song), meta_tags)
return True
def main():
const.args = handle.get_arguments()
if const.args.version:
print('spotdl {version}'.format(version=__version__))
sys.exit()
internals.filter_path(const.args.folder)
youtube_tools.set_api_key()
const.log = const.logzero.setup_logger(formatter=const._formatter,
level=const.args.log_level)
global log
log = const.log
log.debug('Python version: {}'.format(sys.version))
log.debug('Platform: {}'.format(platform.platform()))
log.debug(pprint.pformat(const.args.__dict__))
try:
if const.args.song:
download_single(raw_song=const.args.song)
elif const.args.list:
download_list(text_file=const.args.list)
elif const.args.playlist:
spotify_tools.write_playlist(playlist_url=const.args.playlist)
elif const.args.album:
spotify_tools.write_album(album_url=const.args.album)
elif const.args.username:
spotify_tools.write_user_playlist(username=const.args.username)
# actually we don't necessarily need this, but yeah...
# explicit is better than implicit!
sys.exit(0)
except KeyboardInterrupt as e:
log.exception(e)
sys.exit(3)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a6084bfe94a88078593f1b6359db3a036eae22de | c6759b857e55991fea3ef0b465dbcee53fa38714 | /tools/nntool/nntool/importer/tflite2/tflite_schema_head/PackOptions.py | 80d5354a3e6d6c474f93bce81eb2b362de5ab65e | [
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
] | permissive | GreenWaves-Technologies/gap_sdk | 1b343bba97b7a5ce62a24162bd72eef5cc67e269 | 3fea306d52ee33f923f2423c5a75d9eb1c07e904 | refs/heads/master | 2023-09-01T14:38:34.270427 | 2023-08-10T09:04:44 | 2023-08-10T09:04:44 | 133,324,605 | 145 | 96 | Apache-2.0 | 2023-08-27T19:03:52 | 2018-05-14T07:50:29 | C | UTF-8 | Python | false | false | 2,002 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite_schema_head
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class PackOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = PackOptions()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsPackOptions(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def PackOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# PackOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# PackOptions
def ValuesCount(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# PackOptions
def Axis(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
def PackOptionsStart(builder): builder.StartObject(2)
def Start(builder):
return PackOptionsStart(builder)
def PackOptionsAddValuesCount(builder, valuesCount): builder.PrependInt32Slot(0, valuesCount, 0)
def AddValuesCount(builder, valuesCount):
return PackOptionsAddValuesCount(builder, valuesCount)
def PackOptionsAddAxis(builder, axis): builder.PrependInt32Slot(1, axis, 0)
def AddAxis(builder, axis):
return PackOptionsAddAxis(builder, axis)
def PackOptionsEnd(builder): return builder.EndObject()
def End(builder):
return PackOptionsEnd(builder) | [
"[email protected]"
] | |
6b2a348908d35fad77fed82f1775983dd9ffd7cf | cc096d321ab5c6abf54fdcea67f10e77cd02dfde | /flex-backend/pypy/translator/jvm/methods.py | 582a5f04b0c2e370370a0d62c2866cc366e73d0a | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | limweb/flex-pypy | 310bd8fcd6a9ddc01c0b14a92f0298d0ae3aabd2 | 05aeeda183babdac80f9c10fca41e3fb1a272ccb | refs/heads/master | 2021-01-19T22:10:56.654997 | 2008-03-19T23:51:59 | 2008-03-19T23:51:59 | 32,463,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,487 | py | """
Special methods which we hand-generate, such as toString(), equals(), and hash().
These are generally added to methods listing of node.Class, and the
only requirement is that they must have a render(self, gen) method.
"""
import pypy.translator.jvm.generator as jvmgen
import pypy.translator.jvm.typesystem as jvmtype
from pypy.rpython.ootypesystem import ootype, rclass
class BaseDumpMethod(object):
def __init__(self, db, OOCLASS, clsobj):
self.db = db
self.OOCLASS = OOCLASS
self.clsobj = clsobj
self.name = "toString"
self.jargtypes = [clsobj]
self.jrettype = jvmtype.jString
def _print_field_value(self, fieldnm, FIELDOOTY):
self.gen.emit(jvmgen.DUP)
self.gen.load_this_ptr()
fieldobj = self.clsobj.lookup_field(fieldnm)
fieldobj.load(self.gen)
dumpmethod = self.db.toString_method_for_ootype(FIELDOOTY)
self.gen.emit(dumpmethod)
self.gen.emit(jvmgen.PYPYAPPEND)
def _print(self, str):
self.gen.emit(jvmgen.DUP)
self.gen.load_string(str)
self.gen.emit(jvmgen.PYPYAPPEND)
def render(self, gen):
self.gen = gen
gen.begin_function(
self.name, (), self.jargtypes, self.jrettype, static=False)
gen.new_with_jtype(jvmtype.jStringBuilder)
self._render_guts(gen)
gen.emit(jvmgen.OBJTOSTRING)
gen.emit(jvmgen.RETURN.for_type(jvmtype.jString))
gen.end_function()
self.gen = None
class InstanceDumpMethod(BaseDumpMethod):
def _render_guts(self, gen):
clsobj = self.clsobj
genprint = self._print
# Start the dump
genprint("InstanceWrapper(")
genprint("'" + self.OOCLASS._name + "', ")
genprint("{")
for fieldnm, (FIELDOOTY, fielddef) in self.OOCLASS._fields.iteritems():
if FIELDOOTY is ootype.Void: continue
genprint('"'+fieldnm+'":')
print "fieldnm=%r fieldty=%r" % (fieldnm, FIELDOOTY)
# Print the value of the field:
self._print_field_value(fieldnm, FIELDOOTY)
# Dump close
genprint("})")
class RecordDumpMethod(BaseDumpMethod):
def _render_guts(self, gen):
clsobj = self.clsobj
genprint = self._print
# We only render records that represent tuples:
# In that case, the field names look like item0, item1, etc
# Otherwise, we just do nothing... this is because we
# never return records that do not represent tuples from
# a testing function
for f_name in self.OOCLASS._fields:
if not f_name.startswith('item'):
return
# Start the dump
genprint("StructTuple((")
numfields = len(self.OOCLASS._fields)
for i in range(numfields):
f_name = 'item%d' % i
FIELD_TYPE, f_default = self.OOCLASS._fields[f_name]
if FIELD_TYPE is ootype.Void:
continue
# Print the value of the field:
self._print_field_value(f_name, FIELD_TYPE)
genprint(',')
# Decrement indent and dump close
genprint("))")
class ConstantStringDumpMethod(BaseDumpMethod):
""" Just prints out a string """
def __init__(self, clsobj, str):
BaseDumpMethod.__init__(self, None, None, clsobj)
self.constant_string = str
def _render_guts(self, gen):
genprint = self._print
genprint("'" + self.constant_string + "'")
class DeepEqualsMethod(object):
def __init__(self, db, OOCLASS, clsobj):
self.db = db
self.OOCLASS = OOCLASS
self.clsobj = clsobj
self.name = "equals"
self.jargtypes = [clsobj, jvmtype.jObject]
self.jrettype = jvmtype.jBool
def render(self, gen):
self.gen = gen
gen.begin_function(
self.name, (), self.jargtypes, self.jrettype, static=False)
# Label to branch to should the items prove to be unequal
unequal_lbl = gen.unique_label('unequal')
gen.add_comment('check that the argument is of the correct type')
gen.load_jvm_var(self.clsobj, 1)
gen.instanceof(self.OOCLASS)
gen.goto_if_false(unequal_lbl)
gen.add_comment('Cast it to the right type:')
gen.load_jvm_var(self.clsobj, 1)
gen.downcast(self.OOCLASS)
gen.store_jvm_var(self.clsobj, 1)
# If so, compare field by field
for fieldnm, (FIELDOOTY, fielddef) in self.OOCLASS._fields.iteritems():
if FIELDOOTY is ootype.Void: continue
fieldobj = self.clsobj.lookup_field(fieldnm)
gen.add_comment('Compare field %s of type %s' % (fieldnm, FIELDOOTY))
# Load the field from both this and the argument:
gen.load_jvm_var(self.clsobj, 0)
gen.emit(fieldobj)
gen.load_jvm_var(self.clsobj, 1)
gen.emit(fieldobj)
# And compare them:
gen.compare_values(FIELDOOTY, unequal_lbl)
# Return true or false as appropriate
gen.push_primitive_constant(ootype.Bool, True)
gen.return_val(jvmtype.jBool)
gen.mark(unequal_lbl)
gen.push_primitive_constant(ootype.Bool, False)
gen.return_val(jvmtype.jBool)
gen.end_function()
class DeepHashMethod(object):
def __init__(self, db, OOCLASS, clsobj):
self.db = db
self.OOCLASS = OOCLASS
self.clsobj = clsobj
self.name = "hashCode"
self.jargtypes = [clsobj]
self.jrettype = jvmtype.jInt
def render(self, gen):
self.gen = gen
gen.begin_function(
self.name, (), self.jargtypes, self.jrettype, static=False)
# Initial hash: 0
gen.push_primitive_constant(ootype.Signed, 0)
# Get hash of each field
for fieldnm, (FIELDOOTY, fielddef) in self.OOCLASS._fields.iteritems():
if FIELDOOTY is ootype.Void: continue
fieldobj = self.clsobj.lookup_field(fieldnm)
gen.add_comment('Hash field %s of type %s' % (fieldnm, FIELDOOTY))
# Load the field and hash it:
gen.load_jvm_var(self.clsobj, 0)
gen.emit(fieldobj)
gen.hash_value(FIELDOOTY)
# XOR that with the main hash
gen.emit(jvmgen.IXOR)
# Return the final hash
gen.return_val(jvmtype.jInt)
gen.end_function()
| [
"lucio.torre@dbd81ab4-9648-0410-a770-9b81666e587d"
] | lucio.torre@dbd81ab4-9648-0410-a770-9b81666e587d |
72caef93226a4cb05201677c911b3789efdba9fd | d400c32010a414a2f536c5c0a3490c8b8e2e9d5a | /modules/m16e/ods_doc.py | 75ba3b1fea0134d2c04d2e55576de59bd5e2f548 | [
"LicenseRef-scancode-public-domain"
] | permissive | CarlosCorreiaM16e/chirico_cms | 3e521eae8f38b732497a2b808950c6a534e69d4f | 73897cbddb230630e13f22333b9094d0a047acb3 | refs/heads/master | 2020-12-30T07:59:04.100330 | 2020-05-02T12:26:58 | 2020-05-02T12:26:58 | 238,917,321 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,946 | py | # -*- coding: utf-8 -*-
import sys
import traceback
from m16e.kommon import KDT_CHAR, KDT_INT, KDT_DEC, KDT_PERCENT
from m16e.odslib import odslib
#----------------------------------------------------------------------
class OdsDoc( object ):
#----------------------------------------------------------------------
def __init__(self):
self.doc = odslib.ODS()
#----------------------------------------------------------------------
def set_cell( self,
row,
col,
value,
val_type=KDT_CHAR,
row_span=1,
col_span=1,
font_size=None,
font_bold=False,
h_align=None,
v_align=None ):
# term.printDebug( 'row: %d, col: %d' % (row, col) )
for i in range( 6 ):
try:
cell = self.doc.content.getCell( col, row )
if val_type in (KDT_INT, KDT_DEC, KDT_PERCENT):
cell.floatValue( value )
else:
cell.stringValue( str( value ) )
if font_size:
cell.setFontSize( "%dpt" % font_size )
if font_bold:
cell.setBold( True )
if v_align:
cell.setAlignVertical( v_align )
if h_align:
cell.setAlignHorizontal( h_align )
if row_span > 1 or col_span > 1:
self.doc.content.mergeCells( col, row, col_span, row_span )
# if completed successfuly
break
except:
t, v, tb = sys.exc_info()
traceback.print_exception( t, v, tb )
#----------------------------------------------------------------------
def save( self, filename ):
self.doc.save( filename )
| [
"[email protected]"
] | |
5b36220be41518a877ad62d96ad26ec30da2a3a7 | 8ecf4930f9aa90c35e5199d117068b64a8d779dd | /TopQuarkAnalysis/SingleTop/test/W3Jets_part_1_cfg.py | f8bb10ce78a88541601740748c24f63154e9a910 | [] | no_license | fabozzi/ST_44 | 178bd0829b1aff9d299528ba8e85dc7b7e8dd216 | 0becb8866a7c758d515e70ba0b90c99f6556fef3 | refs/heads/master | 2021-01-20T23:27:07.398661 | 2014-04-14T15:12:32 | 2014-04-14T15:12:32 | 18,765,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,779 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("SingleTopSystematics")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
FailPath = cms.untracked.vstring('ProductNotFound','Type Mismatch')
)
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff") ### real data
process.GlobalTag.globaltag = cms.string("START44_V13::All")
#Load B-Tag
#MC measurements from 36X
#process.load ("RecoBTag.PerformanceDB.PoolBTagPerformanceDBMC36X")
#process.load ("RecoBTag.PerformanceDB.BTagPerformanceDBMC36X")
##Measurements from Fall10
#process.load ("RecoBTag.PerformanceDB.BTagPerformanceDB1011")
#process.load ("RecoBTag.PerformanceDB.PoolBTagPerformanceDB1011")
#Spring11
process.load ("RecoBTag.PerformanceDB.PoolBTagPerformanceDB1107")
process.load ("RecoBTag.PerformanceDB.BTagPerformanceDB1107")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
# Process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(20000))
process.source = cms.Source ("PoolSource",
fileNames = cms.untracked.vstring (
'file:/tmp/mmerola/W3JetsMerged.root',
#'rfio:/castor/cern.ch/user/m/mmerola/SingleTop_2012/MergedJune/W3JetsMerged.root',
),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
#eventsToProcess = cms.untracked.VEventRange('1:19517967-1:19517969'),
)
#from W3Jets import *
#process.source.fileNames = W3Jets_ntuple
#process.source.fileNames = cms.untracked.vstring("file:/tmp/mmerola/W3JetsMerged.root")
#PileUpSync
#Output
#process.TFileService = cms.Service("TFileService", fileName = cms.string("/castor/cern.ch/user/m/mmerola/SingleTop_2012/TreesJune/W3Jets_part_1.root"))
process.TFileService = cms.Service("TFileService", fileName = cms.string("/tmp/mmerola/W3Jets_part_1.root"))
#process.TFileService = cms.Service("TFileService", fileName = cms.string("testNoPU.root"))
#process.load("SingleTopAnalyzers_cfi")
process.load("SingleTopRootPlizer_cfi")
process.load("SingleTopFilters_cfi")
#from SingleTopPSets_cfi import *
#from SingleTopPSetsFall11_cfi import *
from SingleTopPSetsFall_cfi import *
process.TreesEle.dataPUFile = cms.untracked.string("pileUpDistr.root")
process.TreesMu.dataPUFile = cms.untracked.string("pileUpDistr.root")
#process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.channelInfo = W3JetsEle
process.TreesMu.channelInfo = W3JetsMu
#process.PlotsEle.channelInfo = W3JetsEle
#process.PlotsMu.channelInfo = W3JetsMu
#process.TreesMu.systematics = cms.untracked.vstring();
#doPU = cms.untracked.bool(False)
#process.WeightProducer.doPU = cms.untracked.bool(False)
#process.TreesMu.doQCD = cms.untracked.bool(False)
#process.TreesEle.doQCD = cms.untracked.bool(False)
#process.TreesMu.doResol = cms.untracked.bool(False)
#process.TreesEle.doResol = cms.untracked.bool(False)
#process.TreesMu.doPU = cms.untracked.bool(False)
#process.TreesEle.doPU = cms.untracked.bool(False)
channel_instruction = "allmc" #SWITCH_INSTRUCTION
#channel_instruction = "allmc" #SWITCH_INSTRUCTION
MC_instruction = True #TRIGGER_INSTRUCTION
process.HLTFilterMu.isMC = MC_instruction
process.HLTFilterEle.isMC = MC_instruction
process.HLTFilterMuOrEle.isMC = MC_instruction
process.HLTFilterMuOrEleMC.isMC = MC_instruction
#process.PUWeightsPath = cms.Path(
# process.WeightProducer
#)
if channel_instruction == "allmc":
# process.TreesMu.doResol = cms.untracked.bool(True)
# process.TreesEle.doResol = cms.untracked.bool(True)
# process.TreesEle.doTurnOn = cms.untracked.bool(True)
process.PathSysMu = cms.Path(
process.HLTFilterMuMC *
process.TreesMu
)
process.PathSysEle = cms.Path(
process.HLTFilterEleMC *
process.TreesEle
)
if channel_instruction == "all":
process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.doPU = cms.untracked.bool(False)
process.TreesMu.doPU = cms.untracked.bool(False)
process.PathSys = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterMuOrEle *
process.TreesMu +
process.TreesEle
)
if channel_instruction == "mu":
process.TreesMu.doPU = cms.untracked.bool(False)
process.TreesMu.doResol = cms.untracked.bool(False)
process.PathSysMu = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
# process.HLTFilterMu *
process.HLTFilterMuData *
process.TreesMu
)
if channel_instruction == "ele":
process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.doPU = cms.untracked.bool(False)
process.TreesEle.doResol = cms.untracked.bool(False)
process.PathSysMu = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterEle *
process.TreesEle
)
if channel_instruction == "muqcd":
process.TreesMu.doPU = cms.untracked.bool(False)
process.TreesMu.doResol = cms.untracked.bool(False)
process.PathSysMu = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterMuQCD *
process.TreesMu
)
if channel_instruction == "eleqcd":
process.TreesEle.doTurnOn = cms.untracked.bool(False)
process.TreesEle.doPU = cms.untracked.bool(False)
process.TreesEle.doResol = cms.untracked.bool(False)
process.TreesEle.isControlSample = cms.untracked.bool(True)
process.PathSysEle = cms.Path(
# process.PlotsMu +
# process.PlotsEle +
process.HLTFilterEleQCD *
process.TreesEle
)
process.source.fileNames = cms.untracked.vstring('file:/tmp/mmerola/W3Jets_part_1Merged.root',) | [
"[email protected]"
] | |
e703967804eb0f8cb2843481d277c85bc8c71b2e | a9fc496e0724866093dbb9cba70a8fdce12b67a9 | /scripts/field/cygnus_Summon.py | 3881deea825b33318e178ca09b9972f8f7486987 | [
"MIT"
] | permissive | ryantpayton/Swordie | b2cd6b605f7f08f725f5e35d23ba3c22ef2ae7c0 | ca6f42dd43f63b1d2e6bb5cdc8fc051c277f326e | refs/heads/master | 2022-12-01T09:46:47.138072 | 2020-03-24T10:32:20 | 2020-03-24T10:32:20 | 253,997,319 | 2 | 0 | MIT | 2022-11-24T08:17:54 | 2020-04-08T05:50:22 | Java | UTF-8 | Python | false | false | 153 | py | if sm.getFieldID() == 271040100:
sm.spawnMob(8850111, -147, 115, False)
elif sm.getFieldID() == 211070102:
sm.spawnMob(8850111, -147, 115, False) | [
"[email protected]"
] | |
840e453e531447a4fa54106cccc088175874e5c4 | ed06e8278f34e1b14b3f228684fbc37b8b66d1bd | /tfmiss/keras/layers/wordvec_test.py | 30b0a63f767a033c3f468276202bbaea5781285e | [
"MIT"
] | permissive | shkarupa-alex/tfmiss | 7fa798e9e3b2e7142dcf0062eb188ae8e2bb69fa | a8ce1bdc57fc72381bdfed96098be3edef5fa535 | refs/heads/master | 2023-08-17T04:11:26.764935 | 2023-08-15T10:00:28 | 2023-08-15T10:00:28 | 190,878,990 | 1 | 2 | MIT | 2023-04-18T06:57:51 | 2019-06-08T11:17:38 | Python | UTF-8 | Python | false | false | 39,892 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from collections import Counter
from keras.saving import register_keras_serializable
from keras.src.testing_infra import test_combinations, test_utils
from tfmiss.keras.layers.wordvec import WordEmbedding, NgramEmbedding, BpeEmbedding, CnnEmbedding, Highway
@test_combinations.run_all_keras_modes
class WordEmbeddingTest(test_combinations.TestCase):
def test_reserved_words(self):
layer = WordEmbedding()
self.assertListEqual(layer._reserved_words, [layer.UNK_MARK])
layer = WordEmbedding(reserved_words=['~TesT~'])
self.assertListEqual(layer._reserved_words, [layer.UNK_MARK, '~TesT~'])
def test_merged_vocab(self):
vocab = ['the', 'fox', 'jumps', '\u1E69']
layer = WordEmbedding(vocab)
self.assertListEqual(layer._vocabulary, [layer.UNK_MARK] + vocab)
layer = WordEmbedding(vocab, reserved_words=['~TesT~'])
self.assertListEqual(layer._vocabulary, [layer.UNK_MARK, '~TesT~'] + vocab)
layer = WordEmbedding(vocab + ['~TesT~'], reserved_words=['~TesT~'])
self.assertListEqual(layer._vocabulary, [layer.UNK_MARK, '~TesT~'] + vocab)
def test_build_vocab(self):
counts = Counter({'the': 4, 'fox': 2, 'jumps': 2, '\u1E9B\u0323': 2, 'dog': 1})
expected = [('the', 4), ('fox', 2), ('jumps', 2), ('\u1E69', 2), ('dog', 1)]
layer = WordEmbedding()
self.assertListEqual(layer.vocab(counts).most_common(), expected)
def test_adapt_1d(self):
data = ['[UNK]', 'the', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog']
layer = WordEmbedding()
result = layer.adapt(data)
result = self.evaluate(result)
result = np.char.decode(result.astype('S'), 'utf-8').tolist()
self.assertListEqual(result, data)
def test_adapt_2d(self):
data = [['[UNK]', 'the', 'fox', 'jumps'], ['over', 'the', 'lazy', 'dog']]
layer = WordEmbedding()
result = layer.adapt(data)
result = self.evaluate(result)
result = np.char.decode(result.astype('S'), 'utf-8').tolist()
self.assertListEqual(result, data)
def test_adapt_ragged(self):
data = [[[b'[UNK]', b'the', b'fox']], [[b'jumps', b'over'], [b'the', b'lazy', b'dog']]]
layer = WordEmbedding()
result = layer.adapt(tf.ragged.constant(data))
result = self.evaluate(result).to_list()
self.assertListEqual(result, data)
def test_adapt_prep_odd(self):
data = ['[UNK]', '\u0041\u030A', 'Fox', 'jump99', 'abcdefghik', 'abcdefghikl']
expected = ['[UNK]', '\u00E5', 'fox', 'jump00', 'abc\uFFFDik', 'abc\uFFFDkl']
layer = WordEmbedding(normalize_unicode='NFC', lower_case=True, zero_digits=True, max_len=6)
result = layer.adapt(data)
result = self.evaluate(result)
result = np.char.decode(result.astype('S'), 'utf-8').tolist()
self.assertListEqual(expected, result)
def test_adapt_prep_even(self):
data = ['[UNK]', '\u0041\u030A', 'Fox', 'jum99', 'abcdefghik', 'abcdefghikl']
expected = ['[UNK]', '\u00E5', 'fox', 'jum00', 'ab\uFFFDik', 'ab\uFFFDkl']
layer = WordEmbedding(normalize_unicode='NFC', lower_case=True, zero_digits=True, max_len=5)
result = layer.adapt(data)
result = self.evaluate(result)
result = np.char.decode(result.astype('S'), 'utf-8').tolist()
self.assertListEqual(expected, result)
def test_preprocess(self):
data = ['[UNK]', 'the', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog']
vocab = ['the', 'fox', 'jumps']
layer = WordEmbedding(vocab)
result = layer.preprocess(data)
result = self.evaluate(result).tolist()
self.assertListEqual(result, [0, 1, 2, 3, 0, 1, 0, 0])
def test_layer(self):
data = np.array(['[UNK]', 'the', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog'])
vocab = ['the', 'fox', 'jumps']
inputs = WordEmbedding(vocab).preprocess(data)
inputs = self.evaluate(inputs)
test_utils.layer_test(
WordEmbedding,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': False,
'zero_digits': False, 'max_len': None, 'reserved_words': None, 'embed_type': 'dense_auto',
'adapt_cutoff': None, 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
test_utils.layer_test(
WordEmbedding,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'normalize_unicode': None, 'lower_case': False,
'zero_digits': False, 'max_len': None, 'reserved_words': None, 'embed_type': 'dense_auto',
'adapt_cutoff': None, 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
test_utils.layer_test(
WordEmbedding,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': True,
'zero_digits': False, 'max_len': None, 'reserved_words': None, 'embed_type': 'dense_auto',
'adapt_cutoff': None, 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
test_utils.layer_test(
WordEmbedding,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': False,
'zero_digits': True, 'max_len': 4, 'reserved_words': None, 'embed_type': 'dense_auto',
'adapt_cutoff': None, 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
test_utils.layer_test(
WordEmbedding,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': False,
'zero_digits': False, 'max_len': None, 'reserved_words': None, 'embed_type': 'dense_auto',
'adapt_cutoff': None, 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
test_utils.layer_test(
WordEmbedding,
kwargs={'vocabulary': ['[UNK]'] + vocab, 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': False,
'zero_digits': False, 'max_len': None, 'reserved_words': None, 'embed_type': 'dense_auto',
'adapt_cutoff': None, 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
test_utils.layer_test(
WordEmbedding,
kwargs={'vocabulary': vocab + ['[UNK]'], 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': False,
'zero_digits': False, 'max_len': None, 'reserved_words': None, 'embed_type': 'dense_auto',
'adapt_cutoff': None, 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
test_utils.layer_test(
WordEmbedding,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': False,
'zero_digits': False, 'max_len': None, 'reserved_words': ['[UNK]', '~TesT~'],
'embed_type': 'dense_auto', 'adapt_cutoff': None, 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
test_utils.layer_test(
WordEmbedding,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': False,
'zero_digits': False, 'max_len': None, 'reserved_words': None, 'embed_type': 'dense_cpu',
'adapt_cutoff': None, 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
test_utils.layer_test(
WordEmbedding,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': False,
'zero_digits': False, 'max_len': None, 'reserved_words': None, 'embed_type': 'adapt',
'adapt_cutoff': [2], 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
test_utils.layer_test(
WordEmbedding,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': False,
'zero_digits': False, 'max_len': None, 'reserved_words': None, 'embed_type': 'dense_auto',
'adapt_cutoff': None, 'adapt_factor': 4, 'with_prep': True},
input_data=np.array(data),
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
def test_layer_2d(self):
data = [['[UNK]', 'the', 'fox', 'jumps'], ['over', 'the', 'lazy', 'dog']]
vocab = ['the', 'fox', 'jumps']
inputs = WordEmbedding(vocab).preprocess(data)
inputs = self.evaluate(inputs)
test_utils.layer_test(
WordEmbedding,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': False,
'zero_digits': False, 'max_len': None, 'reserved_words': None, 'embed_type': 'dense_auto',
'adapt_cutoff': None, 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, 4, 12]
)
def test_layer_ragged(self):
data = tf.ragged.constant([[['[UNK]', 'the', 'fox']], [['jumps', 'over'], ['the', 'lazy', 'dog']]])
vocab = ['the', 'fox', 'jumps']
outputs = WordEmbedding(vocab, 5, with_prep=True)(data)
outputs = self.evaluate(outputs).to_list()
self.assertLen(outputs, 2)
self.assertLen(outputs[0], 1)
self.assertLen(outputs[0][0], 3)
self.assertLen(outputs[0][0][0], 5)
self.assertLen(outputs[1], 2)
self.assertLen(outputs[1][1], 3)
self.assertLen(outputs[1][1][2], 5)
inputs = WordEmbedding(vocab).preprocess(data)
outputs = WordEmbedding(vocab, 5)(inputs)
outputs = self.evaluate(outputs).to_list()
self.assertLen(outputs, 2)
self.assertLen(outputs[0], 1)
self.assertLen(outputs[0][0], 3)
self.assertLen(outputs[0][0][0], 5)
self.assertLen(outputs[1], 2)
self.assertLen(outputs[1][1], 3)
self.assertLen(outputs[1][1][2], 5)
@register_keras_serializable(package='Miss')
class NgramEmbeddingWrap(NgramEmbedding):
def call(self, inputs, **kwargs):
dense_shape = tf.unstack(tf.shape(inputs))
row_lengths = []
for r in range(inputs.shape.rank - 2):
row_lengths.append(tf.repeat(dense_shape[r + 1], dense_shape[r]))
val_mask = inputs >= 0
row_length = tf.reduce_sum(tf.cast(val_mask, 'int32'), axis=-1)
row_length = tf.reshape(row_length, [-1])
row_lengths.append(row_length)
outputs = tf.RaggedTensor.from_nested_row_lengths(inputs[val_mask], row_lengths)
outputs = super().call(outputs, **kwargs)
if isinstance(outputs, tf.RaggedTensor):
outputs = outputs.to_tensor(0.)
outputs.set_shape(self.compute_output_shape(inputs.shape))
return outputs
@test_combinations.run_all_keras_modes
class NgramEmbeddingTest(test_combinations.TestCase):
def test_reserved_words(self):
layer = NgramEmbedding()
self.assertListEqual(layer._reserved_words, [layer.UNK_MARK])
layer = NgramEmbedding(reserved_words=['~TesT~'])
self.assertListEqual(layer._reserved_words, [layer.UNK_MARK, '~TesT~'])
def test_merged_vocab(self):
vocab = [
'<th', '<the', '<the>', 'he>', 'the', 'the>', '<fo', '<fox', '<fox>', '<ju', '<jum', '<jump', '<jumps>',
'<\u1E69>', 'fox', 'fox>', 'jum', 'jump', 'jumps', 'mps', 'mps>', 'ox>', 'ps>', 'ump', 'umps', 'umps>']
layer = NgramEmbedding(vocab)
self.assertListEqual(layer._vocabulary, [layer.UNK_MARK] + vocab)
layer = NgramEmbedding(vocab, reserved_words=['~TesT~'])
self.assertListEqual(layer._vocabulary, [layer.UNK_MARK, '~TesT~'] + vocab)
layer = NgramEmbedding(vocab + ['~TesT~'], reserved_words=['~TesT~'])
self.assertListEqual(layer._vocabulary, [layer.UNK_MARK, '~TesT~'] + vocab)
def test_build_vocab(self):
counts = Counter({'the': 4, 'fox': 2, 'jumps': 2, '\u1E9B\u0323': 2, 'dog': 1})
expected = [
('<th', 4), ('the', 4), ('he>', 4), ('<the', 4), ('the>', 4), ('<the>', 4), ('<fo', 2), ('fox', 2),
('ox>', 2), ('<fox', 2), ('fox>', 2), ('<fox>', 2), ('<ju', 2), ('jum', 2), ('ump', 2), ('mps', 2),
('ps>', 2), ('<jum', 2), ('jump', 2), ('umps', 2), ('mps>', 2), ('<jump', 2), ('jumps', 2), ('umps>', 2),
('<jumps>', 2), ('<\u1E69>', 2), ('<do', 1), ('dog', 1), ('og>', 1), ('<dog', 1), ('dog>', 1), ('<dog>', 1)]
layer = NgramEmbedding()
self.assertListEqual(layer.vocab(counts).most_common(), expected)
def test_adapt_1d(self):
data = ['[UNK]', 'the', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog']
expected = [
[b'[UNK]'],
[b'<th', b'the', b'he>', b'<the', b'the>', b'<the>'],
[b'<fo', b'fox', b'ox>', b'<fox', b'fox>', b'<fox>'],
[b'<ju', b'jum', b'ump', b'mps', b'ps>', b'<jum', b'jump', b'umps', b'mps>', b'<jump', b'jumps', b'umps>',
b'<jumps>'],
[b'<ov', b'ove', b'ver', b'er>', b'<ove', b'over', b'ver>', b'<over', b'over>', b'<over>'],
[b'<th', b'the', b'he>', b'<the', b'the>', b'<the>'],
[b'<la', b'laz', b'azy', b'zy>', b'<laz', b'lazy', b'azy>', b'<lazy', b'lazy>', b'<lazy>'],
[b'<do', b'dog', b'og>', b'<dog', b'dog>', b'<dog>']]
layer = NgramEmbedding()
result = layer.adapt(data)
result = self.evaluate(result).to_list()
self.assertListEqual(expected, result)
def test_adapt_2d(self):
data = [['[UNK]', 'the', 'fox', 'jumps'], ['over', 'the', 'lazy', 'dog']]
expected = [
[[b'[UNK]'],
[b'<th', b'the', b'he>', b'<the', b'the>', b'<the>'],
[b'<fo', b'fox', b'ox>', b'<fox', b'fox>', b'<fox>'],
[b'<ju', b'jum', b'ump', b'mps', b'ps>', b'<jum', b'jump', b'umps', b'mps>', b'<jump', b'jumps', b'umps>',
b'<jumps>']],
[[b'<ov', b'ove', b'ver', b'er>', b'<ove', b'over', b'ver>', b'<over', b'over>', b'<over>'],
[b'<th', b'the', b'he>', b'<the', b'the>', b'<the>'],
[b'<la', b'laz', b'azy', b'zy>', b'<laz', b'lazy', b'azy>', b'<lazy', b'lazy>', b'<lazy>'],
[b'<do', b'dog', b'og>', b'<dog', b'dog>', b'<dog>']]]
layer = NgramEmbedding()
result = layer.adapt(data)
result = self.evaluate(result).to_list()
self.assertListEqual(expected, result)
def test_adapt_ragged(self):
data = [[[b'[UNK]', b'the', b'fox']], [[b'jumps', b'over'], [b'the', b'lazy', b'dog']]]
expected = [
[[[b'[UNK]'],
[b'<th', b'the', b'he>', b'<the', b'the>', b'<the>'],
[b'<fo', b'fox', b'ox>', b'<fox', b'fox>', b'<fox>']]],
[[[b'<ju', b'jum', b'ump', b'mps', b'ps>', b'<jum', b'jump', b'umps', b'mps>', b'<jump', b'jumps', b'umps>',
b'<jumps>'],
[b'<ov', b'ove', b'ver', b'er>', b'<ove', b'over', b'ver>', b'<over', b'over>', b'<over>']],
[[b'<th', b'the', b'he>', b'<the', b'the>', b'<the>'],
[b'<la', b'laz', b'azy', b'zy>', b'<laz', b'lazy', b'azy>', b'<lazy', b'lazy>', b'<lazy>'],
[b'<do', b'dog', b'og>', b'<dog', b'dog>', b'<dog>']]]]
layer = NgramEmbedding()
result = layer.adapt(tf.ragged.constant(data))
result = self.evaluate(result).to_list()
self.assertListEqual(expected, result)
def test_preprocess(self):
data = ['[UNK]', 'the', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog']
vocab = [
'<th', 'the', 'he>', '<the', 'the>', '<the>', '<fo', 'fox', 'ox>', '<fox', 'fox>', '<fox>', '<ju', 'jum',
'ump', 'mps', 'ps>', '<jum', 'jump', 'umps', 'mps>', '<jump', 'jumps', 'umps>', '<jumps>', '<ṩ>', '<do',
'dog', 'og>', '<dog', 'dog>', '<dog>']
expected = [
[0], [1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12], [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2, 3, 4, 5, 6], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[27, 28, 29, 30, 31, 32]]
layer = NgramEmbedding(vocab)
result = layer.preprocess(data)
result = self.evaluate(result).to_list()
self.assertListEqual(expected, result)
def test_layer(self):
data = np.array(['[UNK]', 'the', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog'])
vocab = [
'<th', 'the', 'he>', '<the', 'the>', '<the>', '<fo', 'fox', 'ox>', '<fox', 'fox>', '<fox>', '<ju', 'jum',
'ump', 'mps', 'ps>', '<jum', 'jump', 'umps', 'mps>', '<jump', 'jumps', 'umps>', '<jumps>', '<ṩ>', '<do',
'dog', 'og>', '<dog', 'dog>', '<dog>']
inputs = NgramEmbedding(vocab).preprocess(data)
inputs = inputs.to_tensor(-1)
inputs = self.evaluate(inputs)
test_utils.layer_test(
NgramEmbeddingWrap,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': False,
'zero_digits': False, 'max_len': None, 'minn': 2, 'maxn': 5, 'itself': 'always',
'reduction': 'mean', 'reserved_words': None, 'embed_type': 'dense_auto', 'adapt_cutoff': None,
'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
test_utils.layer_test(
NgramEmbedding,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': False,
'zero_digits': False, 'max_len': None, 'minn': 2, 'maxn': 5, 'itself': 'always',
'reduction': 'mean', 'reserved_words': None, 'embed_type': 'dense_auto', 'adapt_cutoff': None,
'adapt_factor': 4, 'with_prep': True},
input_data=data,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
def test_layer_2d(self):
data = [['[UNK]', 'the', 'fox', 'jumps'], ['over', 'the', 'lazy', 'dog']]
vocab = [
'<th', 'the', 'he>', '<the', 'the>', '<the>', '<fo', 'fox', 'ox>', '<fox', 'fox>', '<fox>', '<ju', 'jum',
'ump', 'mps', 'ps>', '<jum', 'jump', 'umps', 'mps>', '<jump', 'jumps', 'umps>', '<jumps>', '<ṩ>', '<do',
'dog', 'og>', '<dog', 'dog>', '<dog>']
inputs = NgramEmbedding(vocab).preprocess(data)
inputs = inputs.to_tensor(-1)
inputs = self.evaluate(inputs)
test_utils.layer_test(
NgramEmbeddingWrap,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': False,
'zero_digits': False, 'max_len': None, 'reserved_words': None, 'embed_type': 'dense_auto',
'adapt_cutoff': None, 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, None, 12]
)
def test_layer_ragged(self):
data = tf.ragged.constant([[['[UNK]', 'the', 'fox']], [['jumps', 'over'], ['the', 'lazy', 'dog']]])
vocab = [
'<th', 'the', 'he>', '<the', 'the>', '<the>', '<fo', 'fox', 'ox>', '<fox', 'fox>', '<fox>', '<ju', 'jum',
'ump', 'mps', 'ps>', '<jum', 'jump', 'umps', 'mps>', '<jump', 'jumps', 'umps>', '<jumps>', '<ṩ>', '<do',
'dog', 'og>', '<dog', 'dog>', '<dog>']
outputs = NgramEmbedding(vocab, 5, with_prep=True)(data)
outputs = self.evaluate(outputs).to_list()
self.assertLen(outputs, 2)
self.assertLen(outputs[0], 1)
self.assertLen(outputs[0][0], 3)
self.assertLen(outputs[0][0][0], 5)
self.assertLen(outputs[1], 2)
self.assertLen(outputs[1][1], 3)
self.assertLen(outputs[1][1][2], 5)
inputs = NgramEmbedding(vocab).preprocess(data)
outputs = NgramEmbedding(vocab, 5)(inputs)
outputs = self.evaluate(outputs).to_list()
self.assertLen(outputs, 2)
self.assertLen(outputs[0], 1)
self.assertLen(outputs[0][0], 3)
self.assertLen(outputs[0][0][0], 5)
self.assertLen(outputs[1], 2)
self.assertLen(outputs[1][1], 3)
self.assertLen(outputs[1][1][2], 5)
@register_keras_serializable(package='Miss')
class BpeEmbeddingWrap(BpeEmbedding):
def call(self, inputs, **kwargs):
dense_shape = tf.unstack(tf.shape(inputs))
row_lengths = []
for r in range(inputs.shape.rank - 2):
row_lengths.append(tf.repeat(dense_shape[r + 1], dense_shape[r]))
val_mask = inputs >= 0
row_length = tf.reduce_sum(tf.cast(val_mask, 'int32'), axis=-1)
row_length = tf.reshape(row_length, [-1])
row_lengths.append(row_length)
outputs = tf.RaggedTensor.from_nested_row_lengths(inputs[val_mask], row_lengths)
outputs = super().call(outputs, **kwargs)
if isinstance(outputs, tf.RaggedTensor):
outputs = outputs.to_tensor(0.)
outputs.set_shape(self.compute_output_shape(inputs.shape))
return outputs
@test_combinations.run_all_keras_modes
class BpeEmbeddingTest(test_combinations.TestCase):
def test_reserved_words(self):
layer = BpeEmbedding()
self.assertListEqual(layer._reserved_words, [layer.UNK_MARK, layer.UNK_CHAR])
layer = BpeEmbedding(reserved_words=['~TesT~'])
self.assertListEqual(layer._reserved_words, [layer.UNK_MARK, layer.UNK_CHAR, '~TesT~'])
def test_merged_vocab(self):
vocab = ['the', '##o', 'f', '##u', '##m', '##s', 'o', '##v', '##er', 'l', '##a', '##y', 'd', '##g']
layer = BpeEmbedding(vocab)
self.assertListEqual(layer._vocabulary, [layer.UNK_MARK, layer.UNK_CHAR] + vocab)
layer = BpeEmbedding(vocab, reserved_words=['~TesT~'])
self.assertListEqual(layer._vocabulary, [layer.UNK_MARK, layer.UNK_CHAR, '~TesT~'] + vocab)
layer = BpeEmbedding(vocab + ['~TesT~'], reserved_words=['~TesT~'])
self.assertListEqual(layer._vocabulary, [layer.UNK_MARK, layer.UNK_CHAR, '~TesT~'] + vocab)
def test_build_vocab(self):
counts = Counter({'the': 4, 'fox': 2, 'jumps': 2, '\u1E9B\u0323': 2, 'dog': 1})
expected = [
('the', 4), ('##o', 3), ('f', 2), ('##x', 2), ('j', 2), ('##u', 2), ('##m', 2), ('##p', 2), ('##s', 2),
('[UNK]', 2), ('d', 1), ('##g', 1)]
layer = BpeEmbedding(vocab_size=4)
self.assertListEqual(layer.vocab(counts).most_common(), expected)
def test_adapt_1d(self):
data = ['[UNK]', 'the', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog']
vocab = ['the', '##o', 'f', '##u', '##m', '##s', 'o', '##v', '##er', 'l', '##a', '##y', 'd', '##g']
expected = [
[b'[UNK]'],
[b'the'],
[b'f', b'##o', b'##[UNK]'],
[b'[UNK]', b'##u', b'##m', b'##[UNK]', b'##s'],
[b'o', b'##v', b'##er'],
[b'the'],
[b'l', b'##a', b'##[UNK]', b'##y'],
[b'd', b'##o', b'##g']]
layer = BpeEmbedding(vocab)
result = layer.adapt(data)
result = self.evaluate(result).to_list()
self.assertListEqual(expected, result)
def test_adapt_2d(self):
data = [['[UNK]', 'the', 'fox', 'jumps'], ['over', 'the', 'lazy', 'dog']]
vocab = ['the', '##o', 'f', '##u', '##m', '##s', 'o', '##v', '##er', 'l', '##a', '##y', 'd', '##g']
expected = [
[[b'[UNK]'],
[b'the'],
[b'f', b'##o', b'##[UNK]'],
[b'[UNK]', b'##u', b'##m', b'##[UNK]', b'##s']],
[[b'o', b'##v', b'##er'],
[b'the'],
[b'l', b'##a', b'##[UNK]', b'##y'],
[b'd', b'##o', b'##g']]]
layer = BpeEmbedding(vocab)
result = layer.adapt(data)
result = self.evaluate(result).to_list()
self.assertListEqual(expected, result)
def test_adapt_ragged(self):
data = [[[b'[UNK]', b'the', b'fox']], [[b'jumps', b'over'], [b'the', b'lazy', b'dog']]]
vocab = ['the', '##o', 'f', '##u', '##m', '##s', 'o', '##v', '##er', 'l', '##a', '##y', 'd', '##g']
expected = [
[[[b'[UNK]'],
[b'the'],
[b'f', b'##o', b'##[UNK]']]],
[[[b'[UNK]', b'##u', b'##m', b'##[UNK]', b'##s'],
[b'o', b'##v', b'##er']],
[[b'the'],
[b'l', b'##a', b'##[UNK]', b'##y'],
[b'd', b'##o', b'##g']]]]
layer = BpeEmbedding(vocab)
result = layer.adapt(tf.ragged.constant(data))
result = self.evaluate(result).to_list()
self.assertListEqual(expected, result)
def test_preprocess(self):
data = ['[UNK]', 'the', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog']
vocab = ['the', '##o', 'f', '##u', '##m', '##s', 'o', '##v', '##er', 'l', '##a', '##y', 'd', '##g']
expected = [[0], [2], [4, 3, 1], [0, 5, 6, 1, 7], [8, 9, 10], [2], [11, 12, 1, 13], [14, 3, 15]]
layer = BpeEmbedding(vocab)
result = layer.preprocess(data)
result = self.evaluate(result).to_list()
self.assertListEqual(expected, result)
def test_layer(self):
data = np.array(['[UNK]', 'the', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog'])
vocab = ['the', '##o', 'f', '##u', '##m', '##s', 'o', '##v', '##er', 'l', '##a', '##y', 'd', '##g']
inputs = BpeEmbedding(vocab).preprocess(data)
inputs = inputs.to_tensor(-1)
inputs = self.evaluate(inputs)
test_utils.layer_test(
BpeEmbeddingWrap,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'reduction': 'mean', 'reserved_words': None,
'embed_type': 'dense_auto', 'adapt_cutoff': None, 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
test_utils.layer_test(
BpeEmbedding,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'reduction': 'mean', 'reserved_words': None,
'embed_type': 'dense_auto', 'adapt_cutoff': None, 'adapt_factor': 4, 'with_prep': True},
input_data=data,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
def test_layer_2d(self):
data = [['[UNK]', 'the', 'fox', 'jumps'], ['over', 'the', 'lazy', 'dog']]
vocab = ['the', '##o', 'f', '##u', '##m', '##s', 'o', '##v', '##er', 'l', '##a', '##y', 'd', '##g']
inputs = BpeEmbedding(vocab).preprocess(data)
inputs = inputs.to_tensor(-1)
inputs = self.evaluate(inputs)
test_utils.layer_test(
BpeEmbeddingWrap,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': False,
'zero_digits': False, 'max_len': None, 'reserved_words': None, 'embed_type': 'dense_auto',
'adapt_cutoff': None, 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, None, 12]
)
def test_layer_ragged(self):
data = tf.ragged.constant([[['[UNK]', 'the', 'fox']], [['jumps', 'over'], ['the', 'lazy', 'dog']]])
vocab = ['the', '##o', 'f', '##u', '##m', '##s', 'o', '##v', '##er', 'l', '##a', '##y', 'd', '##g']
outputs = BpeEmbedding(vocab, 5, with_prep=True)(data)
outputs = self.evaluate(outputs).to_list()
self.assertLen(outputs, 2)
self.assertLen(outputs[0], 1)
self.assertLen(outputs[0][0], 3)
self.assertLen(outputs[0][0][0], 5)
self.assertLen(outputs[1], 2)
self.assertLen(outputs[1][1], 3)
self.assertLen(outputs[1][1][2], 5)
inputs = BpeEmbedding(vocab).preprocess(data)
outputs = BpeEmbedding(vocab, 5)(inputs)
outputs = self.evaluate(outputs).to_list()
self.assertLen(outputs, 2)
self.assertLen(outputs[0], 1)
self.assertLen(outputs[0][0], 3)
self.assertLen(outputs[0][0][0], 5)
self.assertLen(outputs[1], 2)
self.assertLen(outputs[1][1], 3)
self.assertLen(outputs[1][1][2], 5)
@register_keras_serializable(package='Miss')
class CnnEmbeddingWrap(CnnEmbedding):
def call(self, inputs, **kwargs):
dense_shape = tf.unstack(tf.shape(inputs))
row_lengths = []
for r in range(inputs.shape.rank - 2):
row_lengths.append(tf.repeat(dense_shape[r + 1], dense_shape[r]))
val_mask = inputs >= 0
row_length = tf.reduce_sum(tf.cast(val_mask, 'int32'), axis=-1)
row_length = tf.reshape(row_length, [-1])
row_lengths.append(row_length)
outputs = tf.RaggedTensor.from_nested_row_lengths(inputs[val_mask], row_lengths)
outputs = super().call(outputs, **kwargs)
if isinstance(outputs, tf.RaggedTensor):
outputs = outputs.to_tensor(0.)
outputs.set_shape(self.compute_output_shape(inputs.shape))
return outputs
@test_combinations.run_all_keras_modes
class CnnEmbeddingTest(test_combinations.TestCase):
def test_reserved_words(self):
layer = CnnEmbedding()
self.assertListEqual(layer._reserved_words, [layer.UNK_MARK, layer.BOW_MARK, layer.EOW_MARK])
layer = CnnEmbedding(reserved_words=['~TesT~'])
self.assertListEqual(layer._reserved_words, [layer.UNK_MARK, layer.BOW_MARK, layer.EOW_MARK, '~TesT~'])
def test_merged_vocab(self):
vocab = ['e', 'o', 't', 'h']
layer = CnnEmbedding(vocab)
self.assertListEqual(layer._vocabulary, [layer.UNK_MARK, layer.BOW_MARK, layer.EOW_MARK] + vocab)
layer = CnnEmbedding(vocab, reserved_words=['~TesT~'])
self.assertListEqual(layer._vocabulary, [layer.UNK_MARK, layer.BOW_MARK, layer.EOW_MARK, '~TesT~'] + vocab)
layer = CnnEmbedding(vocab + ['~TesT~'], reserved_words=['~TesT~'])
self.assertListEqual(layer._vocabulary, [layer.UNK_MARK, layer.BOW_MARK, layer.EOW_MARK, '~TesT~'] + vocab)
def test_build_vocab(self):
counts = Counter({'the': 4, 'fox': 2, 'jumps': 2, '\u1E9B\u0323': 2, 'dog': 1})
expected = [
('[BOW]', 11), ('[EOW]', 11), ('t', 4), ('h', 4), ('e', 4), ('o', 3), ('f', 2), ('x', 2), ('j', 2),
('u', 2), ('m', 2), ('p', 2), ('s', 2), ('\u1E69', 2), ('d', 1), ('g', 1)]
layer = CnnEmbedding()
self.assertListEqual(layer.vocab(counts).most_common(), expected)
def test_adapt_1d(self):
data = ['[UNK]', 'the', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog']
expected = [
[b'[BOW]', b'[UNK]', b'[EOW]'],
[b'[BOW]', b't', b'h', b'e', b'[EOW]'],
[b'[BOW]', b'f', b'o', b'x', b'[EOW]'],
[b'[BOW]', b'j', b'u', b'm', b'p', b's', b'[EOW]'],
[b'[BOW]', b'o', b'v', b'e', b'r', b'[EOW]'],
[b'[BOW]', b't', b'h', b'e', b'[EOW]'],
[b'[BOW]', b'l', b'a', b'z', b'y', b'[EOW]'],
[b'[BOW]', b'd', b'o', b'g', b'[EOW]']]
layer = CnnEmbedding()
result = layer.adapt(data)
result = self.evaluate(result).to_list()
self.assertListEqual(expected, result)
def test_adapt_2d(self):
data = [['[UNK]', 'the', 'fox', 'jumps'], ['over', 'the', 'lazy', 'dog']]
expected = [
[[b'[BOW]', b'[UNK]', b'[EOW]'],
[b'[BOW]', b't', b'h', b'e', b'[EOW]'],
[b'[BOW]', b'f', b'o', b'x', b'[EOW]'],
[b'[BOW]', b'j', b'u', b'm', b'p', b's', b'[EOW]']],
[[b'[BOW]', b'o', b'v', b'e', b'r', b'[EOW]'],
[b'[BOW]', b't', b'h', b'e', b'[EOW]'],
[b'[BOW]', b'l', b'a', b'z', b'y', b'[EOW]'],
[b'[BOW]', b'd', b'o', b'g', b'[EOW]']]]
layer = CnnEmbedding()
result = layer.adapt(data)
result = self.evaluate(result).to_list()
self.assertListEqual(expected, result)
def test_adapt_ragged(self):
data = [[['[UNK]', 'the', 'fox']], [['jumps', 'over'], ['the', 'lazy', 'dog']]]
expected = [
[[[b'[BOW]', b'[UNK]', b'[EOW]'],
[b'[BOW]', b't', b'h', b'e', b'[EOW]'],
[b'[BOW]', b'f', b'o', b'x', b'[EOW]']]],
[[[b'[BOW]', b'j', b'u', b'm', b'p', b's', b'[EOW]'],
[b'[BOW]', b'o', b'v', b'e', b'r', b'[EOW]']],
[[b'[BOW]', b't', b'h', b'e', b'[EOW]'],
[b'[BOW]', b'l', b'a', b'z', b'y', b'[EOW]'],
[b'[BOW]', b'd', b'o', b'g', b'[EOW]']]]]
layer = CnnEmbedding()
result = layer.adapt(tf.ragged.constant(data))
result = self.evaluate(result).to_list()
self.assertListEqual(expected, result)
def test_adapt_prep_odd(self):
data = ['[UNK]', 'the', 'fox', '0123456789abcdefghij', '0123456789abcdefghijk']
expected = [
[b'[BOW]', b'[UNK]', b'[EOW]'],
[b'[BOW]', b't', b'h', b'e', b'[EOW]'],
[b'[BOW]', b'f', b'o', b'x', b'[EOW]'],
[b'[BOW]', b'0', b'1', b'2', b'3', b'\xef\xbf\xbd', b'h', b'i', b'j', b'[EOW]'],
[b'[BOW]', b'0', b'1', b'2', b'3', b'\xef\xbf\xbd', b'i', b'j', b'k', b'[EOW]']]
layer = CnnEmbedding(max_len=8)
result = layer.adapt(data)
result = self.evaluate(result).to_list()
self.assertListEqual(expected, result)
def test_adapt_prep_even(self):
data = ['[UNK]', 'the', 'fox', '0123456789abcdefghij', '0123456789abcdefghijk']
expected = [
[b'[BOW]', b'[UNK]', b'[EOW]'],
[b'[BOW]', b't', b'h', b'e', b'[EOW]'],
[b'[BOW]', b'f', b'o', b'x', b'[EOW]'],
[b'[BOW]', b'0', b'1', b'2', b'\xef\xbf\xbd', b'h', b'i', b'j', b'[EOW]'],
[b'[BOW]', b'0', b'1', b'2', b'\xef\xbf\xbd', b'i', b'j', b'k', b'[EOW]']]
layer = CnnEmbedding(max_len=7)
result = layer.adapt(data)
result = self.evaluate(result).to_list()
self.assertListEqual(expected, result)
def test_preprocess(self):
data = ['[UNK]', 'the', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog']
vocab = ['e', 'o', 't', 'h']
expected = [
[1, 0, 2], [1, 5, 6, 3, 2], [1, 0, 4, 0, 2], [1, 0, 0, 0, 0, 0, 2], [1, 4, 0, 3, 0, 2], [1, 5, 6, 3, 2],
[1, 0, 0, 0, 0, 2], [1, 0, 4, 0, 2]]
layer = CnnEmbedding(vocab)
result = layer.preprocess(data)
result = self.evaluate(result).to_list()
self.assertListEqual(expected, result)
def test_layer(self):
data = np.array(['[UNK]', 'the', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog'])
vocab = ['e', 'o', 't', 'h']
inputs = CnnEmbedding(vocab).preprocess(data)
inputs = inputs.to_tensor(-1)
inputs = self.evaluate(inputs)
test_utils.layer_test(
CnnEmbeddingWrap,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'filters': [32, 32, 64, 128, 256, 512, 1024],
'kernels': [1, 2, 3, 4, 5, 6, 7], 'char_dim': 16, 'activation': 'tanh', 'highways': 2,
'normalize_unicode': 'NFKC', 'lower_case': False, 'zero_digits': False, 'max_len': 50,
'reserved_words': None, 'embed_type': 'dense_auto', 'adapt_cutoff': None, 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
test_utils.layer_test(
CnnEmbedding,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'filters': [32, 32, 64, 128, 256, 512, 1024],
'kernels': [1, 2, 3, 4, 5, 6, 7], 'char_dim': 16, 'activation': 'tanh', 'highways': 2,
'normalize_unicode': 'NFKC', 'lower_case': False, 'zero_digits': False, 'max_len': 50,
'reserved_words': None, 'embed_type': 'dense_auto', 'adapt_cutoff': None, 'adapt_factor': 4,
'with_prep': True},
input_data=data,
expected_output_dtype='float32',
expected_output_shape=[None, 12]
)
def test_layer_2d(self):
data = [['[UNK]', 'the', 'fox', 'jumps'], ['over', 'the', 'lazy', 'dog']]
vocab = ['e', 'o', 't', 'h']
inputs = CnnEmbedding(vocab).preprocess(data)
inputs = inputs.to_tensor(-1)
inputs = self.evaluate(inputs)
test_utils.layer_test(
CnnEmbeddingWrap,
kwargs={'vocabulary': vocab, 'output_dim': 12, 'normalize_unicode': 'NFKC', 'lower_case': False,
'zero_digits': False, 'max_len': None, 'reserved_words': None, 'embed_type': 'dense_auto',
'adapt_cutoff': None, 'adapt_factor': 4},
input_data=inputs,
expected_output_dtype='float32',
expected_output_shape=[None, None, 12]
)
def test_layer_ragged(self):
data = tf.ragged.constant([[['[UNK]', 'the', 'fox']], [['jumps', 'over'], ['the', 'lazy', 'dog']]])
vocab = ['e', 'o', 't', 'h']
outputs = CnnEmbedding(vocab, 5, with_prep=True)(data)
outputs = self.evaluate(outputs).to_list()
self.assertLen(outputs, 2)
self.assertLen(outputs[0], 1)
self.assertLen(outputs[0][0], 3)
self.assertLen(outputs[0][0][0], 5)
self.assertLen(outputs[1], 2)
self.assertLen(outputs[1][1], 3)
self.assertLen(outputs[1][1][2], 5)
inputs = CnnEmbedding(vocab).preprocess(data)
outputs = CnnEmbedding(vocab, 5)(inputs)
outputs = self.evaluate(outputs).to_list()
self.assertLen(outputs, 2)
self.assertLen(outputs[0], 1)
self.assertLen(outputs[0][0], 3)
self.assertLen(outputs[0][0][0], 5)
self.assertLen(outputs[1], 2)
self.assertLen(outputs[1][1], 3)
self.assertLen(outputs[1][1][2], 5)
@test_combinations.run_all_keras_modes
class HighwayTest(test_combinations.TestCase):
def test_layer(self):
test_utils.layer_test(
Highway,
kwargs={},
input_shape=[2, 16, 8],
input_dtype='float32',
expected_output_dtype='float32',
expected_output_shape=[None, 16, 8]
)
test_utils.layer_test(
Highway,
kwargs={},
input_shape=[2, 16, 8, 4],
input_dtype='float32',
expected_output_dtype='float32',
expected_output_shape=[None, 16, 8, 4]
)
if __name__ == "__main__":
tf.test.main()
| [
"[email protected]"
] | |
6e7da8d8db94998e492bafe969437e9b40e5a8f4 | 4d0f3e2d7455f80caea978e4e70621d50c6c7561 | /MongoDB/CRUD/QueryingByObjectId.py | efc3f90c9e14317b40ca16aed85eda0d2c3df484 | [] | no_license | mhdr/PythonSamples | 66940ee2353872d2947c459e3865be42140329c6 | 1a9dccc05962033ea02b081a39cd67c1e7b29d0c | refs/heads/master | 2020-04-14T01:10:13.033940 | 2016-05-28T15:33:52 | 2016-05-28T15:33:52 | 30,691,539 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | __author__ = 'mahmood'
from pymongo import MongoClient
client=MongoClient()
# drop last db
client.drop_database("test-db")
# database
db=client["test-db"]
# table
people=db["People"]
new_person1={"FirstName" : "Mahmood",
"LastName" : "Ramzani",
"Gender": "Male",
"BirthDate" : {"Year" : 1985 , "Month" : 5 , "Day" : 22}}
new_person2={"FirstName" : "Javad",
"LastName" : "Najafi",
"Gender": "Male",
"BirthDate" : {"Year" : 1984 , "Month" : 7 , "Day" : 13}}
new_person3={"FirstName" : "Mahmood",
"LastName" : "Rohani",
"Gender": "Male",
"BirthDate" : {"Year" : 1985 , "Month" : 8 , "Day" : 8}}
id1= people.insert(new_person1)
id2= people.insert(new_person2)
id3= people.insert(new_person3)
print(id1)
print(id2)
print(id3)
match1=people.find_one({"_id":id2})
print(match1)
# if id is string we need to convert
# for example if id comes from web
from bson.objectid import ObjectId
match2=people.find_one({"_id":ObjectId(id3)})
print(match2) | [
"[email protected]"
] | |
dbf7640a313af2358461941e02b18b6d1e7c1709 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /time/high_work_and_long_life/group/case/good_child_and_case.py | fdb05e9779fa6e59a4688c46e5b2f2dee289fb94 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py |
#! /usr/bin/env python
def long_world(str_arg):
group(str_arg)
print('little_hand_and_week')
def group(str_arg):
print(str_arg)
if __name__ == '__main__':
long_world('high_person_and_big_child')
| [
"[email protected]"
] | |
e5fc5b2e2f076a3deb4a132618a1ce25e94c26cc | e204cdd8a38a247aeac3d07f6cce6822472bdcc5 | /.history/app_test_django/views_20201116131730.py | f28ad5d7088723c0254f3cead00c09213ae31ad9 | [] | no_license | steven-halla/python-test | 388ad8386662ad5ce5c1a0976d9f054499dc741b | 0b760a47d154078002c0272ed1204a94721c802a | refs/heads/master | 2023-04-08T03:40:00.453977 | 2021-04-09T19:12:29 | 2021-04-09T19:12:29 | 354,122,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,095 | py | from django.shortcuts import render, redirect
from .models import *
from django.contrib import messages
def index(request):
return render(request, "index.html")
def register_new_user(request):
errors = User.objects.user_registration_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
error_msg = key + ' - ' + value
messages.error(request, error_msg)
return redirect("/")
else:
first_name_from_post = request.POST['first_name']
last_name_from_post = request.POST['last_name']
email_from_post = request.POST['email']
password_from_post = request.POST['password']
new_user = User.objects.create(
first_name=first_name_from_post,
last_name=last_name_from_post,
email=email_from_post,
password=password_from_post
)
print(new_user.id)
request.session['user_id'] = new_user.id
return redirect('/dashboard')
def login(request):
# user did provide email/password, now lets check database
email_from_post = request.POST['email']
password_from_post = request.POST['password']
# this will return all users that have the email_from_post
# in future we should require email to be unique
users = User.objects.filter(email=email_from_post)
if len(users) == 0:
messages.error(request, "email/password does not exist")
return redirect("/")
user = users[0]
print(user)
if (user.password != password_from_post):
messages.error(request, "email/password does not exist")
return redirect("/")
request.session['user_id'] = user.id
return redirect("/dashboard")
def logout(request):
request.session.clear()
return redirect("/")
def view_dashboard(request):
if 'user_id' not in request.session:
return redirect("/")
this_user = User.objects.get(id=request.session['user_id'])
context = {
"user":this_user
}
return render(request, "dash_board.html", context)
| [
"[email protected]"
] | |
b2087e7207c8d5906a761f9b5d0333fdf5fa7128 | 20f89f49400feb9d2885dc2daf3ea3ca189556e7 | /day09/proctice/05 粘包问题解决/mod_struct.py | 08c0855ed616bb6cfc08acaaf5767a5483a48750 | [] | no_license | xiaobaiskill/python | 201511b1b1bddec8c33c4efa7ca2cc4afed24a89 | 540693baad757369ff811fb622a949c99fb6b4ba | refs/heads/master | 2021-04-12T03:43:30.308110 | 2018-07-13T01:41:19 | 2018-07-13T01:41:19 | 125,884,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author Jmz
import struct
total = 16374
res = struct.pack('i',total)
print(len(res))
| [
"[email protected]"
] | |
f16a18a60451d63b0c3e3ea9dfd47962690ed967 | eb19175c18053e5d414b4f6442bdfd0f9f97e24d | /tests/starwars_django/test_connections.py | 9992f0f3809a991f64dc9a760a689072066a1616 | [
"MIT"
] | permissive | jhgg/graphene | 6c4c5a64b7b0f39c8f6b32d17f62e1c31ca03825 | 67904e8329de3d69fec8c82ba8c3b4fe598afa8e | refs/heads/master | 2020-12-25T21:23:22.556227 | 2015-10-15T19:56:40 | 2015-10-15T19:56:40 | 43,073,008 | 1 | 0 | null | 2015-09-24T14:47:19 | 2015-09-24T14:47:19 | null | UTF-8 | Python | false | false | 987 | py | import pytest
from graphql.core import graphql
from .models import *
from .schema import schema
from .data import initialize
pytestmark = pytest.mark.django_db
def test_correct_fetch_first_ship_rebels():
initialize()
query = '''
query RebelsShipsQuery {
rebels {
name,
hero {
name
}
ships(first: 1) {
edges {
node {
name
}
}
}
}
}
'''
expected = {
'rebels': {
'name': 'Alliance to Restore the Republic',
'hero': {
'name': 'Human'
},
'ships': {
'edges': [
{
'node': {
'name': 'X-Wing'
}
}
]
}
}
}
result = schema.execute(query)
assert not result.errors
assert result.data == expected
| [
"[email protected]"
] | |
a5cfd192a6bdd36268a411986e5257ad3f206711 | a2e638cd0c124254e67963bda62c21351881ee75 | /Python modules/HP_2021_01_04_XTP_Trades.py | 4313930c4f2639f99725c878fd6a2d0aae8b3bfc | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | import acm
import csv
with open( 'C:\\Temp\\1\\CA_Stock Input.csv', mode='r') as csv_file:
readCSV = csv.DictReader(csv_file)
print 'trade number', "|", 'instrument', "|", 'trade price', "|", 'Portfolio'
for row in readCSV:
trade=acm.FTrade()
trade.Instrument(row['Instrument'])
trade.Quantity(row['Quantity'])
trade.Price(row['Price'])
trade.Acquirer(row['Acquirer'])
trade.Portfolio(row['Portfolio'])
trade.Counterparty("JSE")
trade.Status('Simulated')
trade.Currency('ZAR')
trade.TradeTime('2021-01-04')
trade.ValueDay('2021-01-07')
trade.Commit()
premium=trade.Quantity() *-1*trade.Price()/100
trade.Premium(premium)
trade.AcquireDay(trade.ValueDay())
trade.Commit()
print trade.Name(), "|", trade.Instrument().Name(), "|", trade.Price(), "|", trade.Portfolio().Name()
| [
"[email protected]"
] | |
5a8b2cdde3908ffa52271f85cdfd0d8883b5c8db | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /young_government/want_thing.py | fe79d6bc7b16f04663bb72a76609bd5cc5bbb80f | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py |
#! /usr/bin/env python
def group(str_arg):
small_work(str_arg)
print('old_work_and_young_company')
def small_work(str_arg):
print(str_arg)
if __name__ == '__main__':
group('have_thing')
| [
"[email protected]"
] | |
956ef7f9bbda19c259de876c23f18bfa189bc407 | 2b167e29ba07e9f577c20c54cb943861d0ccfa69 | /numerical_analysis_backup/large-scale-multiobj2/core-arch4-guard2-beta0-hebbe/pareto0.py | 0000998404917886a9c21b69d9c51c32cf41c0cc | [] | no_license | LiYan1988/kthOld_OFC | 17aeeed21e195d1a9a3262ec2e67d6b1d3f9ff0f | b1237577ea68ad735a65981bf29584ebd889132b | refs/heads/master | 2021-01-11T17:27:25.574431 | 2017-01-23T05:32:35 | 2017-01-23T05:32:35 | 79,773,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,399 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize both throughput and connections
"""
#import sys
#sys.path.insert(0, '/home/li/Dropbox/KTH/numerical_analysis/ILPs')
import csv
from gurobipy import *
import numpy as np
from arch4_decomposition_new import Arch4_decompose
np.random.seed(2010)
num_cores=10
num_slots=320
i = 0
time_limit_routing = 3600
time_limit_sa = 10800
filename = 'traffic_matrix_pod250_load50_'+str(i)+'.csv'
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
row = [float(u) for u in row]
tm.append(row)
tm = np.array(tm)
#%% arch2
corev = np.array([1, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
#corev = np.array([1, 2])
connection_ub = []
throughput_ub = []
obj_ub = []
connection_lb = []
throughput_lb = []
obj_lb = []
connection_he = []
throughput_he = []
obj_he = []
for c in corev:
m = Arch4_decompose(tm, num_slots=num_slots, num_cores=c,
alpha=1,beta=0, num_guard_slot=2)
m.create_model_routing(mipfocus=1,timelimit=7200,mipgap=0.01, method=2)
connection_ub.append(m.connection_ub_)
throughput_ub.append(m.throughput_ub_)
obj_ub.append(m.obj_ub_)
# m.create_model_sa(mipfocus=1,timelimit=10800,mipgap=0.01, method=2,
# SubMIPNodes=2000, heuristics=0.8)
# connection_lb.append(m.connection_lb_)
# throughput_lb.append(m.throughput_lb_)
# obj_lb.append(m.obj_lb_)
# m.write_result_csv('cnklist_lb_%d_%d.csv'%(i,c), m.cnklist_lb)
connection_lb.append(0)
throughput_lb.append(0)
obj_lb.append(0)
m.heuristic()
connection_he.append(m.obj_heuristic_connection_)
throughput_he.append(m.obj_heuristic_throughput_)
obj_he.append(m.obj_heuristic_)
m.write_result_csv('cnklist_heuristic_arch4_i%d_c%d.csv'%(i,c), m.cnklist_heuristic_)
result = np.array([corev,
connection_ub,throughput_ub,obj_ub,
connection_lb,throughput_lb,obj_lb,
connection_he,throughput_he,obj_he]).T
file_name = "result_pareto_arch4_old_pod100_i{}.csv".format(i)
with open(file_name, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['#cores', 'connection_ub', 'throughput_ub',
'obj_ub', 'connection_lb', 'throughput_lb', 'obj_lb',
'connection_he', 'throughput_he', 'obj_he'])
writer.writerows(result)
| [
"[email protected]"
] | |
ddd352a932897eab44ab466a1462189f4656689c | c934e7c27f0e72385218a14b4e2a7e94a747a360 | /google-cloud-sdk/lib/googlecloudsdk/third_party/apis/gsuiteaddons/v1/gsuiteaddons_v1_messages.py | 3c7225d36f055cd65a71467d6cc9a202871a533a | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | PrateekKhatri/gcloud_cli | 5f74b97494df4f61816026af9460b9c4d8e89431 | 849d09dd7863efecbdf4072a504e1554e119f6ae | refs/heads/master | 2023-03-27T05:53:53.796695 | 2021-03-10T04:08:14 | 2021-03-10T04:08:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,183 | py | """Generated message classes for gsuiteaddons version v1.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'gsuiteaddons'
class GoogleAppsScriptTypeAddOnWidgetSet(_messages.Message):
r"""The widget subset used by an add-on.
Enums:
UsedWidgetsValueListEntryValuesEnum:
Fields:
usedWidgets: The list of widgets used in an add-on.
"""
class UsedWidgetsValueListEntryValuesEnum(_messages.Enum):
r"""UsedWidgetsValueListEntryValuesEnum enum type.
Values:
WIDGET_TYPE_UNSPECIFIED: The default widget set.
DATE_PICKER: The date picker.
STYLED_BUTTONS: Styled buttons include filled buttons and disabled
buttons.
PERSISTENT_FORMS: Persistent forms allow persisting form values during
actions.
FIXED_FOOTER: Fixed footer in card.
UPDATE_SUBJECT_AND_RECIPIENTS: Update the subject and recipients of a
draft.
GRID_WIDGET: The grid widget.
ADDON_COMPOSE_UI_ACTION: A Gmail add-on action that applies to the add-
on compose UI.
"""
WIDGET_TYPE_UNSPECIFIED = 0
DATE_PICKER = 1
STYLED_BUTTONS = 2
PERSISTENT_FORMS = 3
FIXED_FOOTER = 4
UPDATE_SUBJECT_AND_RECIPIENTS = 5
GRID_WIDGET = 6
ADDON_COMPOSE_UI_ACTION = 7
usedWidgets = _messages.EnumField('UsedWidgetsValueListEntryValuesEnum', 1, repeated=True)
class GoogleAppsScriptTypeCalendarCalendarAddOnManifest(_messages.Message):
r"""Properties customizing the appearance and execution of a Calendar add-
on.
Enums:
CurrentEventAccessValueValuesEnum: Defines the level of data access when
an event add-on is triggered.
Fields:
conferenceSolution: Defines conference solutions provided by this add-on.
createSettingsUrlFunction: An endpoint to execute that creates a URL to
the add-on's settings page.
currentEventAccess: Defines the level of data access when an event add-on
is triggered.
eventOpenTrigger: An endpoint that triggers when an event is opened to be
viewed or edited.
eventUpdateTrigger: An endpoint that triggers when the open event is
updated.
homepageTrigger: Defines an endpoint that will be executed in contexts
that don't match a declared contextual trigger. Any cards generated by
this function will always be available to the user, but might be
eclipsed by contextual content when this add-on declares more targeted
triggers. If present, this overrides the configuration from
`addOns.common.homepageTrigger`.
"""
class CurrentEventAccessValueValuesEnum(_messages.Enum):
r"""Defines the level of data access when an event add-on is triggered.
Values:
UNSPECIFIED: Default value when nothing is set for EventAccess.
METADATA: Gives event triggers the permission to access the metadata of
events such as event ID and calendar ID.
READ: Gives event triggers access to all provided event fields including
the metadata, attendees, and conference data.
WRITE: Gives event triggers access to the metadata of events and the
ability to perform all actions, including adding attendees and setting
conference data.
READ_WRITE: Gives event triggers access to all provided event fields
including the metadata, attendees, and conference data and the ability
to perform all actions.
"""
UNSPECIFIED = 0
METADATA = 1
READ = 2
WRITE = 3
READ_WRITE = 4
conferenceSolution = _messages.MessageField('GoogleAppsScriptTypeCalendarConferenceSolution', 1, repeated=True)
createSettingsUrlFunction = _messages.StringField(2)
currentEventAccess = _messages.EnumField('CurrentEventAccessValueValuesEnum', 3)
eventOpenTrigger = _messages.MessageField('GoogleAppsScriptTypeCalendarCalendarExtensionPoint', 4)
eventUpdateTrigger = _messages.MessageField('GoogleAppsScriptTypeCalendarCalendarExtensionPoint', 5)
homepageTrigger = _messages.MessageField('GoogleAppsScriptTypeHomepageExtensionPoint', 6)
class GoogleAppsScriptTypeCalendarCalendarExtensionPoint(_messages.Message):
r"""Common format for declaring a calendar add-on's triggers.
Fields:
runFunction: Required. The endpoint to execute when this extension point
is activated.
"""
runFunction = _messages.StringField(1)
class GoogleAppsScriptTypeCalendarConferenceSolution(_messages.Message):
r"""Defines conference related values.
Fields:
id: Required. IDs should be uniquely assigned across ConferenceSolutions
within one add-on, otherwise the wrong ConferenceSolution might be used
when the add-on is triggered. While you can change the display name of
an add-on, the ID shouldn't be changed.
logoUrl: Required. The URL for the logo image of the ConferenceSolution.
name: Required. The display name of the ConferenceSolution.
onCreateFunction: Required. The endpoint to call when ConferenceData
should be created.
"""
id = _messages.StringField(1)
logoUrl = _messages.StringField(2)
name = _messages.StringField(3)
onCreateFunction = _messages.StringField(4)
class GoogleAppsScriptTypeCommonAddOnManifest(_messages.Message):
r"""Add-on configuration that is shared across all add-on host applications.
Fields:
addOnWidgetSet: The widgets used in the add-on. If this field is not
specified, the default set is used.
homepageTrigger: Defines an endpoint that will be executed in any context,
in any host. Any cards generated by this function will always be
available to the user, but might be eclipsed by contextual content when
this add-on declares more targeted triggers.
layoutProperties: Common layout properties for the add-on cards.
logoUrl: Required. The URL for the logo image shown in the add-on toolbar.
name: Required. The display name of the add-on.
openLinkUrlPrefixes: An OpenLink action can only use a URL with an HTTPS,
MAILTO or TEL scheme. For HTTPS links, the URL must also
[match](/gmail/add-ons/concepts/manifests#whitelisting_urls) one of the
prefixes specified in this whitelist. If the prefix omits the scheme,
HTTPS is assumed. Notice that HTTP links are automatically rewritten to
HTTPS links.
universalActions: Defines a list of extension points in the universal
action menu which serves as a settings menu for the add-on. The
extension point can be a link URL to open or an endpoint to execute as a
form submission.
useLocaleFromApp: Whether to pass locale information from host app.
"""
addOnWidgetSet = _messages.MessageField('GoogleAppsScriptTypeAddOnWidgetSet', 1)
homepageTrigger = _messages.MessageField('GoogleAppsScriptTypeHomepageExtensionPoint', 2)
layoutProperties = _messages.MessageField('GoogleAppsScriptTypeLayoutProperties', 3)
logoUrl = _messages.StringField(4)
name = _messages.StringField(5)
openLinkUrlPrefixes = _messages.MessageField('extra_types.JsonValue', 6, repeated=True)
universalActions = _messages.MessageField('GoogleAppsScriptTypeUniversalActionExtensionPoint', 7, repeated=True)
useLocaleFromApp = _messages.BooleanField(8)
class GoogleAppsScriptTypeDocsDocsAddOnManifest(_messages.Message):
r"""Properties customizing the appearance and execution of a Google Docs
add-on.
Fields:
homepageTrigger: If present, this overrides the configuration from
`addOns.common.homepageTrigger`.
onFileScopeGrantedTrigger: Endpoint to execute when file scope
authorization is granted for this document/user pair.
"""
homepageTrigger = _messages.MessageField('GoogleAppsScriptTypeHomepageExtensionPoint', 1)
onFileScopeGrantedTrigger = _messages.MessageField('GoogleAppsScriptTypeDocsDocsExtensionPoint', 2)
class GoogleAppsScriptTypeDocsDocsExtensionPoint(_messages.Message):
r"""Common format for declaring a Docs add-on's triggers.
Fields:
runFunction: Required. The endpoint to execute when this extension point
is activated.
"""
runFunction = _messages.StringField(1)
class GoogleAppsScriptTypeDriveDriveAddOnManifest(_messages.Message):
r"""Properties customizing the appearance and execution of a Drive add-on.
Fields:
homepageTrigger: If present, this overrides the configuration from
`addOns.common.homepageTrigger`.
onItemsSelectedTrigger: Corresponds to behavior that executes when items
are selected in the relevant Drive view, such as the My Drive Doclist.
"""
homepageTrigger = _messages.MessageField('GoogleAppsScriptTypeHomepageExtensionPoint', 1)
onItemsSelectedTrigger = _messages.MessageField('GoogleAppsScriptTypeDriveDriveExtensionPoint', 2)
class GoogleAppsScriptTypeDriveDriveExtensionPoint(_messages.Message):
r"""Common format for declaring a Drive add-on's triggers.
Fields:
runFunction: Required. The endpoint to execute when the extension point is
activated.
"""
runFunction = _messages.StringField(1)
class GoogleAppsScriptTypeGmailComposeTrigger(_messages.Message):
r"""A trigger that activates when user is composing an email.
Enums:
DraftAccessValueValuesEnum: Defines the level of data access when a
compose time add-on is triggered.
Fields:
actions: Defines the set of actions for a compose time add-on. These are
actions that users can trigger on a compose time add-on.
draftAccess: Defines the level of data access when a compose time add-on
is triggered.
"""
class DraftAccessValueValuesEnum(_messages.Enum):
r"""Defines the level of data access when a compose time add-on is
triggered.
Values:
UNSPECIFIED: Default value when nothing is set for DraftAccess.
NONE: The compose trigger can't access any data of the draft when a
compose add-on is triggered.
METADATA: Gives the compose trigger the permission to access the
metadata of the draft when a compose add-on is triggered. This
includes the audience list, such as the To and Cc list of a draft
message.
"""
UNSPECIFIED = 0
NONE = 1
METADATA = 2
actions = _messages.MessageField('GoogleAppsScriptTypeMenuItemExtensionPoint', 1, repeated=True)
draftAccess = _messages.EnumField('DraftAccessValueValuesEnum', 2)
class GoogleAppsScriptTypeGmailContextualTrigger(_messages.Message):
r"""Defines a trigger that fires when the open email meets a specific
criteria. When the trigger fires, it executes a specific endpoint, usually
in order to create new cards and update the UI.
Fields:
onTriggerFunction: Required. The name of the endpoint to call when a
message matches the trigger.
unconditional: UnconditionalTriggers are executed when any mail message is
opened.
"""
onTriggerFunction = _messages.StringField(1)
unconditional = _messages.MessageField('GoogleAppsScriptTypeGmailUnconditionalTrigger', 2)
class GoogleAppsScriptTypeGmailGmailAddOnManifest(_messages.Message):
r"""Properties customizing the appearance and execution of a Gmail add-on.
Fields:
authorizationCheckFunction: The name of an endpoint that verifies that the
add-on has all the required third-party authorizations, by probing the
third-party APIs. If the probe fails, the function should throw an
exception to initiate the authorization flow. This function is called
before each invocation of the add-on in order to ensure a smooth user
experience.
composeTrigger: Defines the compose time trigger for a compose time add-
on. This is the trigger that causes an add-on to take action when the
user is composing an email. All compose time add-ons must have the
`gmail.addons.current.action.compose` scope even though it might not
edit the draft.
contextualTriggers: Defines the set of conditions that trigger the add-on.
homepageTrigger: Defines an endpoint that will be executed in contexts
that don't match a declared contextual trigger. Any cards generated by
this function will always be available to the user, but may be eclipsed
by contextual content when this add-on declares more targeted triggers.
If present, this overrides the configuration from
`addOns.common.homepageTrigger`.
universalActions: Defines set of [universal actions](/gmail/add-ons/how-
tos/universal-actions) for the add-on. The user triggers universal
actions from the add-on toolbar menu.
"""
authorizationCheckFunction = _messages.StringField(1)
composeTrigger = _messages.MessageField('GoogleAppsScriptTypeGmailComposeTrigger', 2)
contextualTriggers = _messages.MessageField('GoogleAppsScriptTypeGmailContextualTrigger', 3, repeated=True)
homepageTrigger = _messages.MessageField('GoogleAppsScriptTypeHomepageExtensionPoint', 4)
universalActions = _messages.MessageField('GoogleAppsScriptTypeGmailUniversalAction', 5, repeated=True)
class GoogleAppsScriptTypeGmailUnconditionalTrigger(_messages.Message):
r"""A trigger that fires when any email message is opened."""
class GoogleAppsScriptTypeGmailUniversalAction(_messages.Message):
r"""An action that is always available in the add-on toolbar menu regardless
of message context.
Fields:
openLink: A link that is opened by Gmail when the user triggers the
action.
runFunction: An endpoint that is called when the user triggers the action.
See the [universal actions guide](/gmail/add-ons/how-tos/universal-
actions) for details.
text: Required. User-visible text describing the action, for example, "Add
a new contact."
"""
openLink = _messages.StringField(1)
runFunction = _messages.StringField(2)
text = _messages.StringField(3)
class GoogleAppsScriptTypeHomepageExtensionPoint(_messages.Message):
r"""Common format for declaring an add-on's homepage view.
Fields:
enabled: Optional. If set to `false`, deactivates the homepage view in
this context. Defaults to `true` if unset. If an add-on's custom
homepage view is disabled, a generic overview card is provided for users
instead.
runFunction: Required. The endpoint to execute when this extension point
is activated.
"""
enabled = _messages.BooleanField(1)
runFunction = _messages.StringField(2)
class GoogleAppsScriptTypeHttpOptions(_messages.Message):
r"""Options for sending requests to add-on HTTP endpoints
Enums:
AuthorizationHeaderValueValuesEnum: Configuration for the token sent in
the HTTP Authorization header
Fields:
authorizationHeader: Configuration for the token sent in the HTTP
Authorization header
"""
class AuthorizationHeaderValueValuesEnum(_messages.Enum):
r"""Configuration for the token sent in the HTTP Authorization header
Values:
HTTP_AUTHORIZATION_HEADER_UNSPECIFIED: Default value, equivalent to
`SYSTEM_ID_TOKEN`
SYSTEM_ID_TOKEN: Send an ID token for the project-specific Google
Workspace Add-on's system service account (default).
USER_ID_TOKEN: Send an ID token for the end user.
NONE: Do not send an Authentication header.
"""
HTTP_AUTHORIZATION_HEADER_UNSPECIFIED = 0
SYSTEM_ID_TOKEN = 1
USER_ID_TOKEN = 2
NONE = 3
authorizationHeader = _messages.EnumField('AuthorizationHeaderValueValuesEnum', 1)
class GoogleAppsScriptTypeLayoutProperties(_messages.Message):
r"""Card layout properties shared across all add-on host applications.
Fields:
primaryColor: The primary color of the add-on. It sets the color of
toolbar. If no primary color is set explicitly, the default value
provided by the framework is used.
secondaryColor: The secondary color of the add-on. It sets the color of
buttons. If primary color is set but no secondary color is set, the
secondary color is the same as the primary color. If neither primary
color nor secondary color is set, the default value provided by the
framework is used.
useNewMaterialDesign: Enable material design for cards.
"""
primaryColor = _messages.StringField(1)
secondaryColor = _messages.StringField(2)
useNewMaterialDesign = _messages.BooleanField(3)
class GoogleAppsScriptTypeMenuItemExtensionPoint(_messages.Message):
r"""Common format for declaring a menu item or button that appears within a
host app.
Fields:
label: Required. User-visible text that describes the action taken by
activating this extension point. For example, "Insert invoice."
logoUrl: The URL for the logo image shown in the add-on toolbar. If not
set, defaults to the add-on's primary logo URL.
runFunction: Required. The endpoint to execute when this extension point
is activated.
"""
label = _messages.StringField(1)
logoUrl = _messages.StringField(2)
runFunction = _messages.StringField(3)
class GoogleAppsScriptTypeSheetsSheetsAddOnManifest(_messages.Message):
r"""Properties customizing the appearance and execution of a Google Sheets
add-on.
Fields:
homepageTrigger: If present, this overrides the configuration from
`addOns.common.homepageTrigger`.
onFileScopeGrantedTrigger: Endpoint to execute when file scope
authorization is granted for this document/user pair.
"""
homepageTrigger = _messages.MessageField('GoogleAppsScriptTypeHomepageExtensionPoint', 1)
onFileScopeGrantedTrigger = _messages.MessageField('GoogleAppsScriptTypeSheetsSheetsExtensionPoint', 2)
class GoogleAppsScriptTypeSheetsSheetsExtensionPoint(_messages.Message):
r"""Common format for declaring a Sheets add-on's triggers.
Fields:
runFunction: Required. The endpoint to execute when this extension point
is activated.
"""
runFunction = _messages.StringField(1)
class GoogleAppsScriptTypeSlidesSlidesAddOnManifest(_messages.Message):
r"""Properties customizing the appearance and execution of a Google Slides
add-on.
Fields:
homepageTrigger: If present, this overrides the configuration from
`addOns.common.homepageTrigger`.
onFileScopeGrantedTrigger: Endpoint to execute when file scope
authorization is granted for this document/user pair.
"""
homepageTrigger = _messages.MessageField('GoogleAppsScriptTypeHomepageExtensionPoint', 1)
onFileScopeGrantedTrigger = _messages.MessageField('GoogleAppsScriptTypeSlidesSlidesExtensionPoint', 2)
class GoogleAppsScriptTypeSlidesSlidesExtensionPoint(_messages.Message):
r"""Common format for declaring a Slides add-on's triggers.
Fields:
runFunction: Required. The endpoint to execute when this extension point
is activated.
"""
runFunction = _messages.StringField(1)
class GoogleAppsScriptTypeUniversalActionExtensionPoint(_messages.Message):
r"""Format for declaring a universal action menu item extension point.
Fields:
label: Required. User-visible text that describes the action taken by
activating this extension point, for example, "Add a new contact."
openLink: URL to be opened by the UniversalAction.
runFunction: Endpoint to be run by the UniversalAction.
"""
label = _messages.StringField(1)
openLink = _messages.StringField(2)
runFunction = _messages.StringField(3)
class GoogleCloudGsuiteaddonsV1AddOns(_messages.Message):
r"""A Google Workspace Add-on configuration.
Fields:
calendar: Calendar add-on configuration.
common: Configuration that is common across all Google Workspace Add-ons.
docs: Docs add-on configuration.
drive: Drive add-on configuration.
gmail: Gmail add-on configuration.
httpOptions: Options for sending requests to add-on HTTP endpoints
sheets: Sheets add-on configuration.
slides: Slides add-on configuration.
"""
calendar = _messages.MessageField('GoogleAppsScriptTypeCalendarCalendarAddOnManifest', 1)
common = _messages.MessageField('GoogleAppsScriptTypeCommonAddOnManifest', 2)
docs = _messages.MessageField('GoogleAppsScriptTypeDocsDocsAddOnManifest', 3)
drive = _messages.MessageField('GoogleAppsScriptTypeDriveDriveAddOnManifest', 4)
gmail = _messages.MessageField('GoogleAppsScriptTypeGmailGmailAddOnManifest', 5)
httpOptions = _messages.MessageField('GoogleAppsScriptTypeHttpOptions', 6)
sheets = _messages.MessageField('GoogleAppsScriptTypeSheetsSheetsAddOnManifest', 7)
slides = _messages.MessageField('GoogleAppsScriptTypeSlidesSlidesAddOnManifest', 8)
class GoogleCloudGsuiteaddonsV1Authorization(_messages.Message):
r"""The authorization information used when invoking deployment endpoints.
Fields:
name: The canonical full name of this resource. Example:
`projects/123/authorization`
oauthClientId: The OAuth client ID used to obtain OAuth access tokens for
a user on the add-on's behalf.
serviceAccountEmail: The email address of the service account used to
authenticate requests to add-on callback endpoints.
"""
name = _messages.StringField(1)
oauthClientId = _messages.StringField(2)
serviceAccountEmail = _messages.StringField(3)
class GoogleCloudGsuiteaddonsV1Deployment(_messages.Message):
r"""A Google Workspace Add-on deployment
Fields:
addOns: The Google Workspace Add-on configuration.
etag: This value is computed by the server based on the version of the
deployment in storage, and may be sent on update and delete requests to
ensure the client has an up-to-date value before proceeding.
name: The deployment resource name. Example:
`projects/123/deployments/my_deployment`.
oauthScopes: The list of Google OAuth scopes for which to request consent
from the end user before executing an add-on endpoint.
"""
addOns = _messages.MessageField('GoogleCloudGsuiteaddonsV1AddOns', 1)
etag = _messages.StringField(2)
name = _messages.StringField(3)
oauthScopes = _messages.StringField(4, repeated=True)
class GoogleCloudGsuiteaddonsV1InstallDeploymentRequest(_messages.Message):
r"""Request message to install a deployment for testing."""
class GoogleCloudGsuiteaddonsV1InstallStatus(_messages.Message):
r"""Install status of a test deployment.
Fields:
installed: True if the deployment is installed for the user.
name: The canonical full resource name of the deployment install status.
Example: `projects/123/deployments/my_deployment/installStatus`.
"""
installed = _messages.BooleanField(1)
name = _messages.StringField(2)
class GoogleCloudGsuiteaddonsV1ListDeploymentsResponse(_messages.Message):
r"""Response message to list deployments.
Fields:
deployments: The list of deployments for the given project.
nextPageToken: A token, which can be sent as `page_token` to retrieve the
next page. If this field is omitted, there are no subsequent pages.
"""
deployments = _messages.MessageField('GoogleCloudGsuiteaddonsV1Deployment', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class GoogleCloudGsuiteaddonsV1UninstallDeploymentRequest(_messages.Message):
r"""Request message to uninstall a test deployment."""
class GoogleProtobufEmpty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo { rpc
Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON
representation for `Empty` is empty JSON object `{}`.
"""
class GsuiteaddonsProjectsDeploymentsCreateRequest(_messages.Message):
r"""A GsuiteaddonsProjectsDeploymentsCreateRequest object.
Fields:
deploymentId: Required. The ID to use for this deployment. The full name
of the created resource will be `projects//deployments/`.
googleCloudGsuiteaddonsV1Deployment: A GoogleCloudGsuiteaddonsV1Deployment
resource to be passed as the request body.
parent: Required. Name of the project in which to create the deployment.
Example: `projects/my_project`.
"""
deploymentId = _messages.StringField(1)
googleCloudGsuiteaddonsV1Deployment = _messages.MessageField('GoogleCloudGsuiteaddonsV1Deployment', 2)
parent = _messages.StringField(3, required=True)
class GsuiteaddonsProjectsDeploymentsDeleteRequest(_messages.Message):
r"""A GsuiteaddonsProjectsDeploymentsDeleteRequest object.
Fields:
etag: The etag of the deployment to delete. If this is provided, it must
match the server's etag.
name: Required. The full resource name of the deployment to delete.
Example: `projects/my_project/deployments/my_deployment`.
"""
etag = _messages.StringField(1)
name = _messages.StringField(2, required=True)
class GsuiteaddonsProjectsDeploymentsGetInstallStatusRequest(_messages.Message):
r"""A GsuiteaddonsProjectsDeploymentsGetInstallStatusRequest object.
Fields:
name: Required. The full resource name of the deployment. Example:
`projects/my_project/deployments/my_deployment/installStatus`.
"""
name = _messages.StringField(1, required=True)
class GsuiteaddonsProjectsDeploymentsGetRequest(_messages.Message):
r"""A GsuiteaddonsProjectsDeploymentsGetRequest object.
Fields:
name: Required. The full resource name of the deployment to get. Example:
`projects/my_project/deployments/my_deployment`.
"""
name = _messages.StringField(1, required=True)
class GsuiteaddonsProjectsDeploymentsInstallRequest(_messages.Message):
r"""A GsuiteaddonsProjectsDeploymentsInstallRequest object.
Fields:
googleCloudGsuiteaddonsV1InstallDeploymentRequest: A
GoogleCloudGsuiteaddonsV1InstallDeploymentRequest resource to be passed
as the request body.
name: Required. The full resource name of the deployment to install.
Example: `projects/my_project/deployments/my_deployment`.
"""
googleCloudGsuiteaddonsV1InstallDeploymentRequest = _messages.MessageField('GoogleCloudGsuiteaddonsV1InstallDeploymentRequest', 1)
name = _messages.StringField(2, required=True)
class GsuiteaddonsProjectsDeploymentsListRequest(_messages.Message):
r"""A GsuiteaddonsProjectsDeploymentsListRequest object.
Fields:
pageSize: The maximum number of deployments to return. The service might
return fewer than this value. If unspecified, at most 1,000 deployments
are returned. The maximum possible value is 1,000; values above 1,000
are changed to 1,000.
pageToken: A page token, received from a previous `ListDeployments` call.
Provide this to retrieve the subsequent page. When paginating, all other
parameters provided to `ListDeployments` must match the call that
provided the page token.
parent: Required. Name of the project in which to create the deployment.
Example: `projects/my_project`.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class GsuiteaddonsProjectsDeploymentsReplaceDeploymentRequest(_messages.Message):
r"""A GsuiteaddonsProjectsDeploymentsReplaceDeploymentRequest object.
Fields:
googleCloudGsuiteaddonsV1Deployment: A GoogleCloudGsuiteaddonsV1Deployment
resource to be passed as the request body.
name: The deployment resource name. Example:
`projects/123/deployments/my_deployment`.
"""
googleCloudGsuiteaddonsV1Deployment = _messages.MessageField('GoogleCloudGsuiteaddonsV1Deployment', 1)
name = _messages.StringField(2, required=True)
class GsuiteaddonsProjectsDeploymentsUninstallRequest(_messages.Message):
r"""A GsuiteaddonsProjectsDeploymentsUninstallRequest object.
Fields:
googleCloudGsuiteaddonsV1UninstallDeploymentRequest: A
GoogleCloudGsuiteaddonsV1UninstallDeploymentRequest resource to be
passed as the request body.
name: Required. The full resource name of the deployment to install.
Example: `projects/my_project/deployments/my_deployment`.
"""
googleCloudGsuiteaddonsV1UninstallDeploymentRequest = _messages.MessageField('GoogleCloudGsuiteaddonsV1UninstallDeploymentRequest', 1)
name = _messages.StringField(2, required=True)
class GsuiteaddonsProjectsGetAuthorizationRequest(_messages.Message):
r"""A GsuiteaddonsProjectsGetAuthorizationRequest object.
Fields:
name: Required. Name of the project for which to get the Google Workspace
Add-on authorization information. Example:
`projects/my_project/authorization`.
"""
name = _messages.StringField(1, required=True)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| [
"[email protected]"
] | |
d29f5685c461da483a731f28490384ef6e074cab | a0cf59e79154d6062c2dccfb2ff8b61aa9681486 | /10.py | 9f90c64b92e30c225325d27221e01584d6676600 | [] | no_license | PetraVidnerova/AdventOfCode | a211d77210ac1c43a9b2c0f546250f6d7c1d183c | 5b0e8017fcd8f71576a312b6c7b5bd3602be6e89 | refs/heads/master | 2021-01-10T16:07:03.336632 | 2016-02-28T14:02:54 | 2016-02-28T14:02:54 | 52,216,197 | 0 | 0 | null | 2016-02-21T17:17:37 | 2016-02-21T16:39:51 | null | UTF-8 | Python | false | false | 471 | py |
def read(input):
# one iteration of read
input = input + "x" # last char is ignored
result = ""
lastchar = None
count = 0
for ch in input:
if ch == lastchar:
count += 1
continue
if lastchar:
result = result + str(count) + str(lastchar)
count = 1
lastchar = ch
return result
input = "3113322113"
for x in range(50):
print(x)
input = read(input)
print(len(input))
| [
"[email protected]"
] | |
ce468ff5a9ed3402454e645b2be13c4a473020ea | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /gaternet/main.py | c4692b2cd0fdba89e13d15c53467b6b2f916be48 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 5,362 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads a GaterNet checkpoint and tests on Cifar-10 test set."""
import argparse
import io
import os
from backbone_resnet import Network as Backbone
from gater_resnet import Gater
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
def load_from_state(state_dict, model):
"""Loads the state dict of a checkpoint into model."""
tem_dict = dict()
for k in state_dict.keys():
tem_dict[k.replace('module.', '')] = state_dict[k]
state_dict = tem_dict
ckpt_key = set(state_dict.keys())
model_key = set(model.state_dict().keys())
print('Keys not in current model: {}\n'.format(ckpt_key - model_key))
print('Keys not in checkpoint: {}\n'.format(model_key - ckpt_key))
model.load_state_dict(state_dict, strict=True)
print('Successfully reload from state.')
return model
def test(backbone, gater, device, test_loader):
"""Tests the model on a test set."""
backbone.eval()
gater.eval()
loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
gate = gater(data)
output = backbone(data, gate)
loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
loss /= len(test_loader.dataset)
acy = 100. * correct / len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\n'.format(
loss, correct, len(test_loader.dataset), acy))
return acy
def run(args, device, test_loader):
"""Loads checkpoint into GaterNet and runs test on the test data."""
with open(args.checkpoint_file, 'rb') as fin:
inbuffer = io.BytesIO(fin.read())
state_dict = torch.load(inbuffer, map_location='cpu')
print('Successfully load checkpoint file.\n')
backbone = Backbone(depth=args.backbone_depth, num_classes=10)
print('Loading checkpoint weights into backbone.')
backbone = load_from_state(state_dict['backbone_state_dict'], backbone)
backbone = nn.DataParallel(backbone).to(device)
print('Backbone is ready after loading checkpoint and moving to device:')
print(backbone)
n_params_b = sum(
[param.view(-1).size()[0] for param in backbone.parameters()])
print('Number of parameters in backbone: {}\n'.format(n_params_b))
gater = Gater(depth=20,
bottleneck_size=8,
gate_size=backbone.module.gate_size)
print('Loading checkpoint weights into gater.')
gater = load_from_state(state_dict['gater_state_dict'], gater)
gater = nn.DataParallel(gater).to(device)
print('Gater is ready after loading checkpoint and moving to device:')
print(gater)
n_params_g = sum(
[param.view(-1).size()[0] for param in gater.parameters()])
print('Number of parameters in gater: {}'.format(n_params_g))
print('Total number of parameters: {}\n'.format(n_params_b + n_params_g))
print('Running test on test data.')
test(backbone, gater, device, test_loader)
def parse_flags():
"""Parses input arguments."""
parser = argparse.ArgumentParser(description='GaterNet')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--backbone-depth', type=int, default=20,
help='resnet depth of the backbone subnetwork')
parser.add_argument('--checkpoint-file', type=str, default=None,
help='checkpoint file to run test')
parser.add_argument('--data-dir', type=str, default=None,
help='the directory for storing data')
args = parser.parse_args()
return args
def main(args):
print('Input arguments:\n{}\n'.format(args))
use_cuda = not args.no_cuda and torch.cuda.is_available()
print('use_cuda: {}'.format(use_cuda))
device = torch.device('cuda' if use_cuda else 'cpu')
torch.backends.cudnn.benchmark = True
print('device: {}'.format(device))
if not os.path.isdir(args.data_dir):
os.mkdir(args.data_dir)
kwargs = {'num_workers': 8, 'pin_memory': True} if use_cuda else {}
normalize_mean = [0.4914, 0.4822, 0.4465]
normalize_std = [0.2470, 0.2435, 0.2616]
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(
args.data_dir,
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(normalize_mean, normalize_std)])
),
batch_size=1000, shuffle=False, drop_last=False, **kwargs)
print('Successfully get data loader.')
run(args, device, test_loader)
if __name__ == '__main__':
main(parse_flags())
| [
"[email protected]"
] | |
d3e688e2e0724426f98735db7408e549f3245eda | 717f7d68e5f36c1d30d223cb201407b0f9c11f9c | /statistical data analysis/02_variance_measure.py | dc22125bd55674bad964315424d4951b43a8ed47 | [] | no_license | raunakshakya/PythonPractice | 7f54508f9adb8541a9cedc7a58e3629bcfb9b6f5 | 3643e9f526401d31ca706f036af0cdae3da04984 | refs/heads/master | 2020-03-22T18:01:54.357735 | 2019-06-05T03:32:07 | 2019-06-05T03:32:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | # https://www.tutorialspoint.com/python/python_measuring_variance.htm
"""
Variance is a measure of how far a value in a data set lies from the mean value.
In other words, it indicates how dispersed the values are. It is measured by using standard deviation.
The other method commonly used is skewness.
"""
import pandas as pd
# Create a Dictionary of series
d = {'Name': pd.Series(['Tom', 'James', 'Ricky', 'Vin', 'Steve', 'Smith', 'Jack',
'Lee', 'Chanchal', 'Gasper', 'Naviya', 'Andres']),
'Age': pd.Series([25, 26, 25, 23, 30, 25, 23, 34, 40, 30, 25, 46]),
'Rating': pd.Series([4.23, 3.24, 3.98, 2.56, 3.20, 4.6, 3.8, 3.78, 2.98, 4.80, 4.10, 3.65])}
# Create a DataFrame
df = pd.DataFrame(d)
"""
Standard deviation is square root of variance.
"""
# Calculate the standard deviation
print(df.std())
print()
"""
Skewness is used to determine whether the data is symmetric or skewed.
If the index is between -1 and 1, then the distribution is symmetric.
If the index is no more than -1 then it is skewed to the left and if it is at least 1, then it is skewed to the right.
"""
# Calculate the skewness
print(df.skew())
| [
"[email protected]"
] | |
a97ed4333fd26e92040a8518e6c7c947a2d216c3 | 747f759311d404af31c0f80029e88098193f6269 | /addons/c2c_timesheet_reports/reminder.py | ab9dad36b4cf448e2f10b2624628cc6cc88d7346 | [] | no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | /home/openerp/production/extra-addons/c2c_timesheet_reports/reminder.py | [
"[email protected]"
] | |
d6271117a9193e0facb6f43251e36bccbc847ba1 | 9bb01fa882e713aa59345051fec07f4e3d3478b0 | /tests/cysparse_/sparse/common_attributes/test_common_attributes_matrices_likes_ConjugatedSparseMatrix_INT32_t_FLOAT32_t.py | 17904df851dbc456f945b9e24ed309bc76956c00 | [] | no_license | syarra/cysparse | f1169c496b54d61761fdecbde716328fd0fb131b | 7654f7267ab139d0564d3aa3b21c75b364bcfe72 | refs/heads/master | 2020-05-25T16:15:38.160443 | 2017-03-14T21:17:39 | 2017-03-14T21:17:39 | 84,944,993 | 0 | 0 | null | 2017-03-14T12:11:48 | 2017-03-14T12:11:48 | null | UTF-8 | Python | false | false | 7,395 | py | #!/usr/bin/env python
"""
This file tests basic common attributes for **all** matrix like objects.
Proxies are only tested for a :class:`LLSparseMatrix` object.
See file ``sparse_matrix_coherence_test_functions``.
"""
from sparse_matrix_like_common_attributes import common_matrix_like_attributes
import unittest
from cysparse.sparse.ll_mat import *
from cysparse.common_types.cysparse_types import *
########################################################################################################################
# Tests
########################################################################################################################
##################################
# Case Non Symmetric, Non Zero
##################################
class CySparseCommonAttributesMatrices_ConjugatedSparseMatrix_INT32_t_FLOAT32_t_TestCase(unittest.TestCase):
def setUp(self):
self.nrow = 10
self.ncol = 14
self.nnz = self.nrow * self.ncol
self.A = LinearFillLLSparseMatrix(nrow=self.nrow, ncol=self.ncol, dtype=FLOAT32_T, itype=INT32_T)
self.C = self.A.conj
self.nargin = self.ncol
self.nargout = self.nrow
self.base_type_str = self.A.base_type_str
def test_common_attributes(self):
is_OK, attribute = common_matrix_like_attributes(self.C)
self.assertTrue(is_OK, msg="Attribute '%s' is missing" % attribute)
def test_nrow_attribute(self):
self.assertTrue(self.C.nrow == self.nrow)
def test_ncol_attribute(self):
self.assertTrue(self.C.ncol == self.ncol)
def test_nnz_attribute(self):
self.assertTrue(self.C.nnz == self.nnz)
def test_symmetric_storage_attribute(self):
self.assertTrue(not self.C.store_symmetric)
def test_zero_storage_attribute(self):
self.assertTrue(not self.C.store_zero)
def test_is_mutable_attribute(self):
self.assertTrue(self.C.is_mutable)
def test_base_type_str(self):
self.assertTrue(self.C.base_type_str == self.base_type_str, "'%s' is not '%s'" % (self.C.base_type_str, self.base_type_str))
def test_is_symmetric(self):
self.assertTrue(not self.C.is_symmetric)
def test_nargin(self):
self.assertTrue(self.nargin == self.C.nargin)
def test_nargout(self):
self.assertTrue(self.nargout == self.C.nargout)
##################################
# Case Symmetric, Non Zero
##################################
class CySparseCommonAttributesSymmetricMatrices_ConjugatedSparseMatrix_INT32_t_FLOAT32_t_TestCase(unittest.TestCase):
def setUp(self):
self.size = 14
self.nnz = ((self.size + 1) * self.size) / 2
self.nargin = self.size
self.nargout = self.size
self.A = LinearFillLLSparseMatrix(size=self.size, dtype=FLOAT32_T, itype=INT32_T, store_symmetric=True)
self.C = self.A.conj
self.base_type_str = self.A.base_type_str
def test_common_attributes(self):
is_OK, attribute = common_matrix_like_attributes(self.C)
self.assertTrue(is_OK, msg="Attribute '%s' is missing" % attribute)
def test_nrow_attribute(self):
self.assertTrue(self.C.nrow == self.size)
def test_ncol_attribute(self):
self.assertTrue(self.C.ncol == self.size)
def test_nnz_attribute(self):
self.assertTrue(self.C.nnz == self.nnz, '%d is not %d' % (self.C.nnz, self.nnz))
def test_symmetric_storage_attribute(self):
self.assertTrue(self.C.store_symmetric)
def test_zero_storage_attribute(self):
self.assertTrue(not self.C.store_zero)
def test_is_mutable_attribute(self):
self.assertTrue(self.C.is_mutable)
def test_base_type_str(self):
self.assertTrue(self.C.base_type_str == self.base_type_str)
def test_is_symmetric(self):
self.assertTrue(self.C.is_symmetric)
def test_nargin(self):
self.assertTrue(self.nargin == self.C.nargin)
def test_nargout(self):
self.assertTrue(self.nargout == self.C.nargout)
##################################
# Case Non Symmetric, Zero
##################################
class CySparseCommonAttributesWithZeroMatrices_ConjugatedSparseMatrix_INT32_t_FLOAT32_t_TestCase(unittest.TestCase):
def setUp(self):
self.nrow = 10
self.ncol = 14
self.nnz = self.nrow * self.ncol
self.A = LinearFillLLSparseMatrix(nrow=self.nrow, ncol=self.ncol, dtype=FLOAT32_T, itype=INT32_T, store_zero=True)
self.C = self.A.conj
self.nargin = self.ncol
self.nargout = self.nrow
self.base_type_str = self.A.base_type_str
def test_common_attributes(self):
is_OK, attribute = common_matrix_like_attributes(self.C)
self.assertTrue(is_OK, msg="Attribute '%s' is missing" % attribute)
def test_nrow_attribute(self):
self.assertTrue(self.C.nrow == self.nrow)
def test_ncol_attribute(self):
self.assertTrue(self.C.ncol == self.ncol)
def test_nnz_attribute(self):
self.assertTrue(self.C.nnz == self.nnz)
def test_symmetric_storage_attribute(self):
self.assertTrue(not self.C.store_symmetric)
def test_zero_storage_attribute(self):
self.assertTrue(self.C.store_zero)
def test_is_mutable_attribute(self):
self.assertTrue(self.C.is_mutable)
def test_base_type_str(self):
self.assertTrue(self.C.base_type_str == self.base_type_str)
def test_is_symmetric(self):
self.assertTrue(not self.C.is_symmetric)
def test_nargin(self):
self.assertTrue(self.nargin == self.C.nargin)
def test_nargout(self):
self.assertTrue(self.nargout == self.C.nargout)
##################################
# Case Symmetric, Zero
##################################
class CySparseCommonAttributesSymmetricWithZeroMatrices_ConjugatedSparseMatrix_INT32_t_FLOAT32_t_TestCase(unittest.TestCase):
def setUp(self):
self.size = 14
self.nnz = ((self.size + 1) * self.size) / 2
self.A = LinearFillLLSparseMatrix(size=self.size, dtype=FLOAT32_T, itype=INT32_T, store_symmetric=True, store_zero=True)
self.nargin = self.size
self.nargout = self.size
self.C = self.A.conj
self.base_type_str = self.A.base_type_str
def test_common_attributes(self):
is_OK, attribute = common_matrix_like_attributes(self.C)
self.assertTrue(is_OK, msg="Attribute '%s' is missing" % attribute)
def test_nrow_attribute(self):
self.assertTrue(self.C.nrow == self.size)
def test_ncol_attribute(self):
self.assertTrue(self.C.ncol == self.size)
def test_nnz_attribute(self):
self.assertTrue(self.C.nnz, self.nnz)
def test_symmetric_storage_attribute(self):
self.assertTrue(self.C.store_symmetric)
def test_zero_storage_attribute(self):
self.assertTrue(self.C.store_zero)
def test_is_mutable_attribute(self):
self.assertTrue(self.C.is_mutable)
def test_base_type_str(self):
self.assertTrue(self.C.base_type_str == self.base_type_str)
def test_is_symmetric(self):
self.assertTrue(self.C.is_symmetric)
def test_nargin(self):
self.assertTrue(self.nargin == self.C.nargin)
def test_nargout(self):
self.assertTrue(self.nargout == self.C.nargout)
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
c284c62518399e7db958db634226e57614625b90 | 2f0ab8bdc939da79a314a749044a13a429596832 | /odin/bay/layers/dense.py | 7c455df3b1845c0f9ec7b08f8e4f5fe317632de0 | [
"MIT"
] | permissive | YashinaTatiana/odin-ai | 6bb65d691471cf632b4137fc21bd0b09ef76cf2a | f44c56c3f01ddb354bfc5df430bfd6587f95897c | refs/heads/master | 2022-04-20T15:26:20.617310 | 2020-04-23T17:35:57 | 2020-04-23T17:35:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,313 | py | from __future__ import absolute_import, division, print_function
import inspect
from functools import partial
from numbers import Number
from typing import Callable, Optional, Text, Type, Union
import numpy as np
import tensorflow as tf
from six import string_types
from tensorflow.python.keras import Model, Sequential
from tensorflow.python.keras import layers as layer_module
from tensorflow.python.keras.layers import Dense, Lambda
from tensorflow_probability.python.bijectors import FillScaleTriL
from tensorflow_probability.python.distributions import (Categorical,
Distribution,
Independent,
MixtureSameFamily,
MultivariateNormalDiag,
MultivariateNormalTriL,
Normal)
from tensorflow_probability.python.internal import \
distribution_util as dist_util
from tensorflow_probability.python.layers import DistributionLambda
from tensorflow_probability.python.layers.distribution_layer import (
DistributionLambda, _get_convert_to_tensor_fn, _serialize,
_serialize_function)
from odin import backend as bk
from odin.bay.distribution_alias import parse_distribution
from odin.bay.helpers import (KLdivergence, is_binary_distribution,
is_discrete_distribution, is_mixture_distribution,
is_zeroinflated_distribution, kl_divergence)
from odin.bay.layers.continuous import VectorDeterministicLayer
from odin.bay.layers.distribution_util_layers import Moments, Sampling
__all__ = [
'DenseDeterministic', 'DenseDistribution', 'MixtureDensityNetwork',
'MixtureMassNetwork'
]
def _params_size(layer, event_shape):
spec = inspect.getfullargspec(layer.params_size)
args = spec.args + spec.kwonlyargs
if 'event_size' == args[0]:
event_shape = tf.reduce_prod(event_shape)
# extra kwargs from function closure
kw = {}
if len(args) > 1:
fn = layer._make_distribution_fn
closures = {
k: v.cell_contents
for k, v in zip(fn.__code__.co_freevars, fn.__closure__)
}
for k in args[1:]:
if k in closures:
kw[k] = closures[k]
return layer.params_size(event_shape, **kw)
class DenseDistribution(Dense):
r""" Using `Dense` layer to parameterize the tensorflow_probability
`Distribution`
Arguments:
event_shape : `int`
number of output units.
posterior : the posterior distribution, a distribution alias or Distribution
type can be given for later initialization (Default: 'normal').
prior : {`None`, `tensorflow_probability.Distribution`}
prior distribution, used for calculating KL divergence later.
use_bias : `bool` (default=`True`)
enable biases for the Dense layers
posterior_kwargs : `dict`. Keyword arguments for initializing the posterior
`DistributionLambda`
Return:
`tensorflow_probability.Distribution`
"""
def __init__(self,
event_shape=(),
posterior='normal',
posterior_kwargs={},
prior=None,
convert_to_tensor_fn=Distribution.sample,
dropout=0.0,
activation='linear',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
disable_projection=False,
**kwargs):
assert prior is None or isinstance(prior, Distribution), \
"prior can be None or instance of tensorflow_probability.Distribution"
# duplicated event_shape or event_size in posterior_kwargs
posterior_kwargs = dict(posterior_kwargs)
if 'event_shape' in posterior_kwargs:
event_shape = posterior_kwargs.pop('event_shape')
if 'event_size' in posterior_kwargs:
event_shape = posterior_kwargs.pop('event_size')
convert_to_tensor_fn = posterior_kwargs.pop('convert_to_tensor_fn',
Distribution.sample)
# process the posterior
# TODO: support give instance of DistributionLambda directly
if inspect.isclass(posterior) and issubclass(posterior, DistributionLambda):
post_layer_cls = posterior
else:
post_layer_cls, _ = parse_distribution(posterior)
# create layers
self._convert_to_tensor_fn = convert_to_tensor_fn
self._posterior = posterior
self._prior = prior
self._event_shape = event_shape
self._posterior_class = post_layer_cls
self._posterior_kwargs = posterior_kwargs
self._dropout = dropout
# set more descriptive name
name = kwargs.pop('name', None)
if name is None:
name = 'dense_%s' % (posterior if isinstance(posterior, string_types) else
posterior.__class__.__name__)
kwargs['name'] = name
# params_size could be static function or method
params_size = _params_size(self.posterior_layer(), event_shape)
self._disable_projection = bool(disable_projection)
super(DenseDistribution,
self).__init__(units=params_size,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
# store the distribution from last call
self._last_distribution = None
# if 'input_shape' in kwargs and not self.built:
# self.build(kwargs['input_shape'])
def build(self, input_shape):
if self._disable_projection:
self.built = True
else:
super().build(input_shape)
@property
def is_binary(self):
return is_binary_distribution(self.posterior_layer)
@property
def is_discrete(self):
return is_discrete_distribution(self.posterior_layer)
@property
def is_mixture(self):
return is_mixture_distribution(self.posterior_layer)
@property
def is_zero_inflated(self):
return is_zeroinflated_distribution(self.posterior_layer)
@property
def event_shape(self):
shape = self._event_shape
if not (tf.is_tensor(shape) or isinstance(shape, tf.TensorShape)):
shape = tf.nest.flatten(shape)
return shape
@property
def event_size(self):
return tf.cast(tf.reduce_prod(self._event_shape), tf.int32)
@property
def prior(self) -> Distribution:
return self._prior
@prior.setter
def prior(self, p):
assert isinstance(p, (Distribution, type(None)))
self._prior = p
def posterior_layer(self, sample_shape=()) -> DistributionLambda:
if self._convert_to_tensor_fn == Distribution.sample:
fn = partial(Distribution.sample, sample_shape=sample_shape)
else:
fn = self._convert_to_tensor_fn
return self._posterior_class(self._event_shape,
convert_to_tensor_fn=fn,
**self._posterior_kwargs)
@property
def posterior(self) -> Distribution:
r""" Return the last parametrized distribution, i.e. the result from the
last `call` """
return self._last_distribution
@tf.function
def sample(self, sample_shape=(), seed=None):
r""" Sample from prior distribution """
if self._prior is None:
raise RuntimeError("prior hasn't been provided for the %s" %
self.__class__.__name__)
return self.prior.sample(sample_shape=sample_shape, seed=seed)
def call(self,
inputs,
training=None,
sample_shape=(),
projection=True,
prior=None):
# projection by Dense layer could be skipped by setting projection=False
# NOTE: a 2D inputs is important here, but we don't want to flatten
# automatically
if projection and not self._disable_projection:
params = super().call(inputs)
else:
params = inputs
# applying dropout
if self._dropout > 0:
params = bk.dropout(params, p_drop=self._dropout, training=training)
# create posterior distribution (this will create a new layer everytime)
posterior = self.posterior_layer(sample_shape=sample_shape)(
params, training=training)
self._last_distribution = posterior
# NOTE: all distribution has the method kl_divergence, so we cannot use it
prior = self.prior if prior is None else prior
posterior.KL_divergence = KLdivergence(
posterior, prior=prior,
sample_shape=None) # None mean reuse samples here
assert not hasattr(posterior, 'prior'), "Cannot assign prior to the output"
posterior.prior = prior
return posterior
def kl_divergence(self,
prior=None,
analytic=True,
sample_shape=1,
reverse=True):
r""" KL(q||p) where `p` is the posterior distribution returned from last
call
Arguments:
prior : instance of `tensorflow_probability.Distribution`
prior distribution of the latent
analytic : `bool` (default=`True`). Using closed form solution for
calculating divergence, otherwise, sampling with MCMC
reverse : `bool`. If `True`, calculate `KL(q||p)` else `KL(p||q)`
sample_shape : `int` (default=`1`)
number of MCMC sample if `analytic=False`
Return:
kullback_divergence : Tensor [sample_shape, batch_size, ...]
"""
if prior is None:
prior = self._prior
assert isinstance(prior, Distribution), "prior is not given!"
if self.posterior is None:
raise RuntimeError(
"DenseDistribution must be called to create the distribution before "
"calculating the kl-divergence.")
kullback_div = kl_divergence(q=self.posterior,
p=prior,
analytic=bool(analytic),
reverse=reverse,
q_sample=sample_shape,
auto_remove_independent=True)
if analytic:
kullback_div = tf.expand_dims(kullback_div, axis=0)
if isinstance(sample_shape, Number) and sample_shape > 1:
ndims = kullback_div.shape.ndims
kullback_div = tf.tile(kullback_div, [sample_shape] + [1] * (ndims - 1))
return kullback_div
def log_prob(self, x):
r""" Calculating the log probability (i.e. log likelihood) using the last
distribution returned from call """
return self.posterior.log_prob(x)
def __repr__(self):
return self.__str__()
def __str__(self):
text = "<Dense proj:%s shape:%s #params:%d posterior:%s prior:%s dropout:%.2f kw:%s>" % \
(not self._disable_projection, self.event_shape, self.units,
self._posterior_class.__name__, str(self.prior),
self._dropout, str(self._posterior_kwargs))
text = text.replace("tfp.distributions.", "")
return text
def get_config(self):
config = super().get_config()
config['convert_to_tensor_fn'] = _serialize(self._convert_to_tensor_fn)
config['event_shape'] = self._event_shape
config['posterior'] = self._posterior
config['prior'] = self._prior
config['dropout'] = self._dropout
config['posterior_kwargs'] = self._posterior_kwargs
config['disable_projection'] = self._disable_projection
return config
# ===========================================================================
# Shortcuts
# ===========================================================================
class MixtureDensityNetwork(DenseDistribution):
def __init__(self,
units,
n_components=2,
covariance='none',
loc_activation='linear',
scale_activation='softplus1',
convert_to_tensor_fn=Distribution.sample,
use_bias=True,
dropout=0.0,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
self.covariance = covariance
self.n_components = n_components
super().__init__(event_shape=units,
posterior='mixgaussian',
posterior_kwargs=dict(n_components=int(n_components),
covariance=str(covariance),
loc_activation=loc_activation,
scale_activation=scale_activation),
convert_to_tensor_fn=convert_to_tensor_fn,
dropout=dropout,
activation='linear',
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
def set_prior(self, loc=0., log_scale=np.log(np.expm1(1)), mixture_logits=1.):
r""" Set the prior for mixture density network
loc : Scalar or Tensor with shape `[n_components, event_size]`
log_scale : Scalar or Tensor with shape
`[n_components, event_size]` for 'none' and 'diag' component, and
`[n_components, event_size*(event_size +1)//2]` for 'full' component.
mixture_logits : Scalar or Tensor with shape `[n_components]`
"""
event_size = self.event_size
if self.covariance == 'diag':
scale_shape = [self.n_components, event_size]
fn = lambda l, s: MultivariateNormalDiag(loc=l,
scale_diag=tf.nn.softplus(s))
elif self.covariance == 'none':
scale_shape = [self.n_components, event_size]
fn = lambda l, s: Independent(Normal(loc=l, scale=tf.math.softplus(s)), 1)
elif self.covariance == 'full':
scale_shape = [self.n_components, event_size * (event_size + 1) // 2]
fn = lambda l, s: MultivariateNormalTriL(
loc=l, scale_tril=FillScaleTriL(diag_shift=1e-5)(tf.math.softplus(s)))
#
if isinstance(log_scale, Number) or tf.rank(log_scale) == 0:
loc = tf.fill([self.n_components, self.event_size], loc)
#
if isinstance(log_scale, Number) or tf.rank(log_scale) == 0:
log_scale = tf.fill(scale_shape, log_scale)
#
if mixture_logits is None:
mixture_logits = 1.
if isinstance(mixture_logits, Number) or tf.rank(mixture_logits) == 0:
mixture_logits = tf.fill([self.n_components], mixture_logits)
#
loc = tf.cast(loc, self.dtype)
log_scale = tf.cast(log_scale, self.dtype)
mixture_logits = tf.cast(mixture_logits, self.dtype)
self._prior = MixtureSameFamily(
components_distribution=fn(loc, log_scale),
mixture_distribution=Categorical(logits=mixture_logits),
name="prior")
return self
class MixtureMassNetwork(DenseDistribution):
def __init__(self,
event_shape=(),
n_components=2,
mean_activation='softplus1',
disp_activation=None,
dispersion='full',
alternative=False,
zero_inflated=False,
convert_to_tensor_fn=Distribution.sample,
use_bias=True,
dropout=0.0,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
self.n_components = n_components
self.dispersion = dispersion
self.zero_inflated = zero_inflated
self.alternative = alternative
super().__init__(event_shape=event_shape,
posterior='mixnb',
prior=None,
posterior_kwargs=dict(
n_components=int(n_components),
mean_activation=mean_activation,
disp_activation=disp_activation,
dispersion=dispersion,
alternative=alternative,
zero_inflated=zero_inflated,
),
convert_to_tensor_fn=convert_to_tensor_fn,
dropout=dropout,
activation='linear',
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
class DenseDeterministic(DenseDistribution):
r""" Similar to `keras.Dense` layer but return a
`tensorflow_probability.VectorDeterministic` distribution to represent
the output, hence, making it compatible to the probabilistic framework.
"""
def __init__(self,
units,
dropout=0.0,
activation='linear',
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(event_shape=int(units),
posterior='vdeterministic',
posterior_kwargs={},
prior=None,
convert_to_tensor_fn=Distribution.sample,
dropout=dropout,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
| [
"[email protected]"
] | |
4ea282c215b39bd1c20dba68c9aa0b0f34066973 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/yaml-3.10/yaml/composer.py | 09bc60b808f475856445853ce5b7bc7054ef576a | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/yaml-3.10/yaml/composer.py | [
"[email protected]"
] | |
8e5530438c97d0e7a5e092c7bb90b4885627ccac | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/e2e/jit/test_Conv1DTranspose_7.py | 38603397c5765f6c7bc79e052c30df74cd538a9e | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 635 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_Conv1DTranspose_7():
"""test Conv1DTranspose_7"""
jit_case = JitTrans(case=yml.get_case_info("Conv1DTranspose_7"))
jit_case.jit_run()
| [
"[email protected]"
] | |
8761eaff518d705e57ceb09085af5d4088e080fb | 71047c4c45e97f474b45e0989202d8d88b0e4895 | /sparse_dot/sparse_dot.py | 3e82d3d8e3094bf5d04af57799a660e9c4e6e47a | [
"Apache-2.0"
] | permissive | pluralsight/sparse_dot | bd86e676e323ef84f51b2b5ae7b453ba7fa66916 | 271131a3a28f12d7dcca0fbeca8591e9d052c98a | refs/heads/master | 2023-04-01T06:03:51.845211 | 2016-12-13T19:33:30 | 2016-12-13T19:33:30 | 72,578,014 | 3 | 1 | Apache-2.0 | 2023-03-27T18:25:07 | 2016-11-01T21:24:52 | Python | UTF-8 | Python | false | false | 2,459 | py | '''The main script'''
# To update with any Cython changes, just run:
# python setup.py build_ext --inplace
import numpy as np
import cy_sparse_dot
def to_saf(arr1d):
arr1d = np.asanyarray(arr1d)
locs = np.nonzero(arr1d)
return {'locs': locs[0].astype(np.uint32),
'array': arr1d[locs].astype(np.float32)}
def to_saf_list(arr2d):
return map(to_saf, arr2d)
def validate_saf(saf, verbose=True):
'''True if the locs (indices) in a saf are ordered
AND the data types of the arrays are uint32 and float32 respectively'''
def vpr(x):
if verbose:
print x
if not ('locs' in saf and 'array' in saf):
vpr('missing members')
return False
if not (hasattr(saf['locs'], 'dtype') and
hasattr(saf['array'], 'dtype')):
vpr('members not arrays')
return False
if not (saf['locs'].dtype == np.uint32 and
saf['array'].dtype == np.float32):
vpr('bad dtype')
return False
if not np.all(saf['locs'][1:] > saf['locs'][:-1]):
vpr('locs not ordered')
return False
return True
def sparse_dot_full(saf_list, validate=True, verbose=True):
'''Takes a list of arrays in locs/array dict form and '''
if validate:
assert all(validate_saf(saf, verbose=verbose) for saf in saf_list)
return cy_sparse_dot.cy_sparse_dot_full(saf_list)
def dot_full_using_sparse(arr):
'''Takes a 2d array and runs dot products against every
combination of rows'''
return sparse_dot_full(to_saf_list(arr), validate=False)
def sparse_cos_similarity(saf_list, validate=True, verbose=True):
norms = np.array([np.linalg.norm(saf['array']) for saf in saf_list])
dots = sparse_dot_full(saf_list, validate=validate, verbose=verbose)
norm_i, norm_j = norms[(dots['i'],)], norms[(dots['j'],)]
dots['sparse_result'] /= norm_i * norm_j
return dots
def sparse_cos_distance(saf_list, validate=True, verbose=True):
dots = sparse_cos_similarity(saf_list, validate=validate, verbose=verbose)
dots['sparse_result'] *= -1
dots['sparse_result'] += 1
return dots
def cos_similarity_using_sparse(arr):
return sparse_cos_similarity(to_saf_list(arr))
def cos_distance_using_sparse(arr):
return sparse_cos_distance(to_saf_list(arr))
if __name__ == '__main__':
r = dot_full_using_sparse([[1, 0, 0, 1, 3, 1],
[2, 0, 0, 0, 1, 5]])
print r
| [
"[email protected]"
] | |
9a8cac4b3e32f6a077d7eb78d0f5051c0512f4a2 | 936dc2666f27de7a7d1428c7ad2ded62a722b8fa | /src/aids/migrations/0176_alter_aid_subvention_comment.py | 9b1d57c10296955b4148ac4b92dc72f523a577cb | [
"ISC"
] | permissive | MTES-MCT/aides-territoires | 03451a32bdeaab3812b8593bfe3a27c1b1d9a182 | af9f6e6e8b1918363793fbf291f3518ef1454169 | refs/heads/master | 2023-09-04T22:15:17.819264 | 2023-08-25T13:19:17 | 2023-08-25T13:19:17 | 124,301,398 | 21 | 11 | NOASSERTION | 2023-09-12T13:46:49 | 2018-03-07T22:19:11 | Python | UTF-8 | Python | false | false | 553 | py | # Generated by Django 4.1.7 on 2023-04-25 12:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("aids", "0175_aid_ds_id_aid_ds_mapping_aid_ds_schema_exists"),
]
operations = [
migrations.AlterField(
model_name="aid",
name="subvention_comment",
field=models.CharField(
blank=True,
max_length=255,
verbose_name="Taux de subvention (commentaire optionnel)",
),
),
]
| [
"[email protected]"
] | |
54ca12d67db93ad73a903f15cd006cccfdefb7b9 | 38b8bceafb4d80afc7c77196eb9ee99694191bcf | /scrapy/study/w3school/w3school/pipelines.py | f7b1b2c843306a1645d8a3cfe490cb83fe27c0f5 | [] | no_license | tangc1986/PythonStudy | f6c5b384874e82fbf0b5f51cfb7a7a89a48ec0ff | 1ed1956758e971647426e7096ac2e8cbcca585b4 | refs/heads/master | 2021-01-23T20:39:23.930754 | 2017-10-08T07:40:32 | 2017-10-08T07:42:38 | 42,122,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import codecs
class W3SchoolPipeline(object):
def __init__(self):
self.file = codecs.open('w3school_data_utf8.json', 'wb', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item)) + '\n'
# print line
self.file.write(line.decode("unicode_escape"))
return item
| [
"[email protected]"
] | |
bc84b1d032b1997b9821ff3ddf33c6d2c324154e | a6f9b3b1503fc08e7d77161bb2b5beac3b89480d | /app/main/views.py | d9d53ba612eb73e52f381e20e54f8542edc6833b | [] | no_license | AugustineOchieng/newsFeed | e779d7e4cf91c6769947cb5e140d417001ecf7c2 | b08720540c31453f895f395524177edb489add97 | refs/heads/master | 2020-05-09T14:53:23.346539 | 2019-04-16T10:04:09 | 2019-04-16T10:04:09 | 181,212,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,064 | py | from flask import render_template,request,redirect,url_for
from . import main
from flask import render_template,request,redirect,url_for
from ..request import get_source,article_source,get_category,get_headlines
#our views
@main.route('/')
def index():
'''
Root function returning index/home page with data
'''
source= get_source()
headlines = get_headlines()
return render_template('index.html',sources=source, headlines = headlines)
@main.route('/article/<id>')
def article(id):
'''
View article page function that returns the various article details page and its data
'''
# title= 'Articles'
articles = article_source(id)
return render_template('article.html',articles= articles,id=id )
@main.route('/categories/<cat_name>')
def category(cat_name):
'''
function to return the categories.html page and its content
'''
category = get_category(cat_name)
title = f'{cat_name}'
cat = cat_name
return render_template('categories.html',title = title,category = category, cat= cat_name)
| [
"[email protected]"
] | |
ec7f623c696363ec82e593a0d6aac0e53ec677b5 | 26d450ba94d06ce9ff2ced9e41c2daf6ea011118 | /demo/demo3.py | 49f164c4895737804908e1e03a4f0aff2eb7e256 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | Correct-Syntax/ui-style-lang | d0a9ac478dc9c7572d0d663b48eea0c07dd48705 | 5c6dc9f1cd50de35c5745082dd02dc572f794ccc | refs/heads/master | 2023-04-28T20:01:03.866809 | 2021-05-14T14:38:49 | 2021-05-14T14:38:49 | 267,696,517 | 12 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,331 | py | # Demo for UI Style Lang
# ======================
# This demo file (demo3.py) is MIT licensed - (C) 2020 Noah Rahm, Correct Syntax
# Usage:
# It's a custom button...click it!
# This demo is up-to-date
import wx
from uistylelang import UIStylePDC, UIStyleApp, UIStyleFrame
class MainApplication(UIStyleFrame):
def __init__(self):
UIStyleFrame.__init__(self, None, title="UI Style Lang Demo", pos=(0, 0), size=(1000, 800), name="main-frame")
# Create the DC
self._pdc = UIStylePDC(
self,
'./custom-widget-demo.uiss'
)
self.DrawDrawing(self._pdc)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, lambda x: None)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMotion)
def OnPaint(self, event):
dc = wx.BufferedPaintDC(self)
dc = wx.GCDC(dc)
# We need to clear the dc BEFORE calling PrepareDC.
dc.Clear()
# Draw to the dc using the calculated clipping rect.
self._pdc.DrawToDCClipped(
dc, wx.Rect(0, 0, self.Size[0], self.Size[1])
)
def OnMotion(self, event):
pnt = event.GetPosition()
elem_rect = self._pdc.GetWxRect('button')
mouse_pnt = wx.Rect(pnt[0], pnt[1], 1, 1)
if mouse_pnt.Intersects(elem_rect) == True:
self._pdc.UpdateElem('button:hover')
else:
self._pdc.UpdateElem('button')
self.RefreshDemo()
def OnLeftDown(self, event):
pnt = event.GetPosition()
elem_rect = self._pdc.GetWxRect('button')
# Create a 1x1 rect for the mouse pointer
mouse_pnt = wx.Rect(pnt[0], pnt[1], 1, 1)
if mouse_pnt.Intersects(elem_rect) == True:
self._pdc.UpdateElem('button:press')
self._pdc.UpdateElem('button-text:hover')
self.RefreshDemo()
def OnLeftUp(self, event):
pnt = event.GetPosition()
elem_rect = self._pdc.GetWxRect('button')
# Create a 1x1 rect for the mouse pointer
mouse_pnt = wx.Rect(pnt[0], pnt[1], 1, 1)
if mouse_pnt.Intersects(elem_rect) == True:
self._pdc.UpdateElem('button')
self._pdc.UpdateElem('button-text')
self.ButtonCallback()
self.RefreshDemo()
def DrawDrawing(self, dc):
# Initial
dc.InitElem('button')
dc.InitElem('button-text', "TEXT", "UI Style Lang Demo")
dc.InitElem('text', "TEXT", "UI Style Lang Demo")
def RefreshDemo(self):
rect = wx.Rect(0, 0, self.Size[0], self.Size[1])
self.RefreshRect(rect, False)
self.Refresh()
def ButtonCallback(self):
notify = wx.adv.NotificationMessage(
title="Button Clicked",
message="You clicked the UI Style Lang custom button",
parent=None, flags=wx.ICON_INFORMATION)
notify.Show(timeout=2) # 1 for short timeout, 100 for long timeout
if __name__ == '__main__':
# Create the app and startup
app = UIStyleApp(file='./custom-widget-demo.uiss', redirect=False)
frame = MainApplication()
frame.Show()
app.SetTopWindow(frame)
app.MainLoop()
| [
"[email protected]"
] | |
a9e9e39af8ee92b0bba4707c28c6f54219d54710 | 3f327d2654b85b922909925b9f475315d78f4652 | /Backend/lib/python3.6/site-packages/twilio/rest/api/v2010/account/outgoing_caller_id.py | 628e08a85f4247c7f5276900e177aa432bd198b0 | [
"MIT"
] | permissive | brianwang1217/SelfImprovementWebApp | 8db45914027537aee9614f9d218c93cc08dc90f8 | 7892fc4ee5434307b74b14257b29a5f05a0a0dd7 | refs/heads/master | 2022-12-13T15:01:08.595735 | 2018-06-23T04:46:06 | 2018-06-23T04:46:06 | 137,548,289 | 1 | 1 | MIT | 2022-05-25T01:28:29 | 2018-06-16T02:48:52 | Python | UTF-8 | Python | false | false | 15,446 | py | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class OutgoingCallerIdList(ListResource):
""" """
def __init__(self, version, account_sid):
"""
Initialize the OutgoingCallerIdList
:param Version version: Version that contains the resource
:param account_sid: The unique sid that identifies this account
:returns: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList
"""
super(OutgoingCallerIdList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
}
self._uri = '/Accounts/{account_sid}/OutgoingCallerIds.json'.format(**self._solution)
def stream(self, phone_number=values.unset, friendly_name=values.unset,
limit=None, page_size=None):
"""
Streams OutgoingCallerIdInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode phone_number: Filter by phone number
:param unicode friendly_name: Filter by friendly name
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
phone_number=phone_number,
friendly_name=friendly_name,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, phone_number=values.unset, friendly_name=values.unset,
limit=None, page_size=None):
"""
Lists OutgoingCallerIdInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode phone_number: Filter by phone number
:param unicode friendly_name: Filter by friendly name
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdInstance]
"""
return list(self.stream(
phone_number=phone_number,
friendly_name=friendly_name,
limit=limit,
page_size=page_size,
))
def page(self, phone_number=values.unset, friendly_name=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of OutgoingCallerIdInstance records from the API.
Request is executed immediately
:param unicode phone_number: Filter by phone number
:param unicode friendly_name: Filter by friendly name
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of OutgoingCallerIdInstance
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdPage
"""
params = values.of({
'PhoneNumber': phone_number,
'FriendlyName': friendly_name,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return OutgoingCallerIdPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of OutgoingCallerIdInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of OutgoingCallerIdInstance
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return OutgoingCallerIdPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a OutgoingCallerIdContext
:param sid: Fetch by unique outgoing-caller-id Sid
:returns: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdContext
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdContext
"""
return OutgoingCallerIdContext(
self._version,
account_sid=self._solution['account_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a OutgoingCallerIdContext
:param sid: Fetch by unique outgoing-caller-id Sid
:returns: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdContext
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdContext
"""
return OutgoingCallerIdContext(
self._version,
account_sid=self._solution['account_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.OutgoingCallerIdList>'
class OutgoingCallerIdPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the OutgoingCallerIdPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The unique sid that identifies this account
:returns: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdPage
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdPage
"""
super(OutgoingCallerIdPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of OutgoingCallerIdInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdInstance
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdInstance
"""
return OutgoingCallerIdInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.OutgoingCallerIdPage>'
class OutgoingCallerIdContext(InstanceContext):
""" """
def __init__(self, version, account_sid, sid):
"""
Initialize the OutgoingCallerIdContext
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param sid: Fetch by unique outgoing-caller-id Sid
:returns: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdContext
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdContext
"""
super(OutgoingCallerIdContext, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'sid': sid,
}
self._uri = '/Accounts/{account_sid}/OutgoingCallerIds/{sid}.json'.format(**self._solution)
def fetch(self):
"""
Fetch a OutgoingCallerIdInstance
:returns: Fetched OutgoingCallerIdInstance
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return OutgoingCallerIdInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
def update(self, friendly_name=values.unset):
"""
Update the OutgoingCallerIdInstance
:param unicode friendly_name: A human readable description of the caller ID
:returns: Updated OutgoingCallerIdInstance
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdInstance
"""
data = values.of({
'FriendlyName': friendly_name,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return OutgoingCallerIdInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the OutgoingCallerIdInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.OutgoingCallerIdContext {}>'.format(context)
class OutgoingCallerIdInstance(InstanceResource):
""" """
def __init__(self, version, payload, account_sid, sid=None):
"""
Initialize the OutgoingCallerIdInstance
:returns: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdInstance
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdInstance
"""
super(OutgoingCallerIdInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'friendly_name': payload['friendly_name'],
'account_sid': payload['account_sid'],
'phone_number': payload['phone_number'],
'uri': payload['uri'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: OutgoingCallerIdContext for this OutgoingCallerIdInstance
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdContext
"""
if self._context is None:
self._context = OutgoingCallerIdContext(
self._version,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: A string that uniquely identifies this outgoing-caller-ids
:rtype: unicode
"""
return self._properties['sid']
@property
def date_created(self):
"""
:returns: The date this resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date this resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def friendly_name(self):
"""
:returns: A human readable description for this resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def account_sid(self):
"""
:returns: The unique sid that identifies this account
:rtype: unicode
"""
return self._properties['account_sid']
@property
def phone_number(self):
"""
:returns: The incoming phone number
:rtype: unicode
"""
return self._properties['phone_number']
@property
def uri(self):
"""
:returns: The URI for this resource
:rtype: unicode
"""
return self._properties['uri']
def fetch(self):
"""
Fetch a OutgoingCallerIdInstance
:returns: Fetched OutgoingCallerIdInstance
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdInstance
"""
return self._proxy.fetch()
def update(self, friendly_name=values.unset):
"""
Update the OutgoingCallerIdInstance
:param unicode friendly_name: A human readable description of the caller ID
:returns: Updated OutgoingCallerIdInstance
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
)
def delete(self):
"""
Deletes the OutgoingCallerIdInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.OutgoingCallerIdInstance {}>'.format(context)
| [
"[email protected]"
] | |
3a28f8f33d4492d8f2c8922de5dcebfd6fbd2cff | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.4/django/contrib/comments/templatetags/__init__.py | f5f7397defa1565158775adfb5cee392fb466f4d | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.4/django/contrib/comments/templatetags/__init__.py | [
"[email protected]"
] | |
9e2d3cebcf48028ed81634ea26699d80d74e08bd | 74f0c966d09786f447ad60bf837ea342cb405874 | /neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge_test_base.py | 1437e0cd4203167b172302fcb4514f6b1a8a20d2 | [
"Apache-2.0"
] | permissive | mrwukang/neutron | ad354d19a6ba4ec9a92b4e54d02cf1bbfd66e47e | ebdb2ad1213eaf09c6a3f061a94ff4453c3e7506 | refs/heads/master | 2020-04-11T18:24:09.601969 | 2019-01-02T15:42:05 | 2019-01-02T15:42:05 | 161,997,228 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,153 | py | # Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import importutils
from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \
import ovs_test_base
call = mock.call # short hand
class OVSBridgeTestBase(ovs_test_base.OVSRyuTestBase):
_ARP_MODULE = 'ryu.lib.packet.arp'
_ETHER_TYPES_MODULE = 'ryu.lib.packet.ether_types'
_ICMPV6_MODULE = 'ryu.lib.packet.icmpv6'
_IN_PROTO_MODULE = 'ryu.lib.packet.in_proto'
_OFP_MODULE = 'ryu.ofproto.ofproto_v1_3'
_OFPP_MODULE = 'ryu.ofproto.ofproto_v1_3_parser'
def setup_bridge_mock(self, name, cls):
self.br = cls(name)
self.dp = mock.Mock()
self.ofp = importutils.import_module(self._OFP_MODULE)
self.ofpp = importutils.import_module(self._OFPP_MODULE)
self.arp = importutils.import_module(self._ARP_MODULE)
self.ether_types = importutils.import_module(self._ETHER_TYPES_MODULE)
self.icmpv6 = importutils.import_module(self._ICMPV6_MODULE)
self.in_proto = importutils.import_module(self._IN_PROTO_MODULE)
mock.patch.object(self.br, '_get_dp', autospec=True,
return_value=self._get_dp()).start()
mock__send_msg = mock.patch.object(self.br, '_send_msg').start()
mock_delete_flows = mock.patch.object(self.br, 'delete_flows').start()
self.mock = mock.Mock()
self.mock.attach_mock(mock__send_msg, '_send_msg')
self.mock.attach_mock(mock_delete_flows, 'delete_flows')
def _get_dp(self):
return self.dp, self.ofp, self.ofpp
def test_drop_port(self):
in_port = 2345
self.br.drop_port(in_port=in_port)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(
ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[],
match=ofpp.OFPMatch(in_port=in_port),
priority=2,
table_id=0)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_goto(self):
dest_table_id = 123
priority = 99
in_port = 666
self.br.install_goto(dest_table_id=dest_table_id,
priority=priority, in_port=in_port)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(
ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionGotoTable(table_id=dest_table_id),
],
match=ofpp.OFPMatch(in_port=in_port),
priority=priority,
table_id=0)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_drop(self):
priority = 99
in_port = 666
self.br.install_drop(priority=priority, in_port=in_port)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(
ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[],
match=ofpp.OFPMatch(in_port=in_port),
priority=priority,
table_id=0)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_normal(self):
priority = 99
in_port = 666
self.br.install_normal(priority=priority, in_port=in_port)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(
ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0)
]),
],
match=ofpp.OFPMatch(in_port=in_port),
priority=priority,
table_id=0)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test__cidr_to_ryu(self):
f = self.br._cidr_to_ryu
self.assertEqual('192.168.0.1', f('192.168.0.1'))
self.assertEqual('192.168.0.1', f('192.168.0.1/32'))
self.assertEqual(('192.168.0.0', '255.255.255.0'), f('192.168.0.0/24'))
class OVSDVRProcessTestMixin(object):
def test_install_dvr_process_ipv4(self):
vlan_tag = 999
gateway_ip = '192.0.2.1'
self.br.install_dvr_process_ipv4(vlan_tag=vlan_tag,
gateway_ip=gateway_ip)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[],
match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_ARP,
arp_tpa=gateway_ip,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
priority=3,
table_id=self.dvr_process_table_id)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_dvr_process_ipv4(self):
vlan_tag = 999
gateway_ip = '192.0.2.1'
self.br.delete_dvr_process_ipv4(vlan_tag=vlan_tag,
gateway_ip=gateway_ip)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call.delete_flows(table_id=self.dvr_process_table_id,
match=ofpp.OFPMatch(
eth_type=self.ether_types.ETH_TYPE_ARP,
arp_tpa=gateway_ip,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_dvr_process_ipv6(self):
vlan_tag = 999
gateway_mac = '08:60:6e:7f:74:e7'
self.br.install_dvr_process_ipv6(vlan_tag=vlan_tag,
gateway_mac=gateway_mac)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[],
match=ofpp.OFPMatch(
eth_src=gateway_mac,
eth_type=self.ether_types.ETH_TYPE_IPV6,
icmpv6_type=self.icmpv6.ND_ROUTER_ADVERT,
ip_proto=self.in_proto.IPPROTO_ICMPV6,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
priority=3,
table_id=self.dvr_process_table_id)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_dvr_process_ipv6(self):
vlan_tag = 999
gateway_mac = '08:60:6e:7f:74:e7'
self.br.delete_dvr_process_ipv6(vlan_tag=vlan_tag,
gateway_mac=gateway_mac)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call.delete_flows(table_id=self.dvr_process_table_id,
match=ofpp.OFPMatch(
eth_src=gateway_mac,
eth_type=self.ether_types.ETH_TYPE_IPV6,
icmpv6_type=self.icmpv6.ND_ROUTER_ADVERT,
ip_proto=self.in_proto.IPPROTO_ICMPV6,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_dvr_process(self):
vlan_tag = 999
vif_mac = '00:0e:0c:5e:95:d0'
dvr_mac_address = 'f2:0b:a4:5b:b2:ab'
self.br.install_dvr_process(vlan_tag=vlan_tag,
vif_mac=vif_mac,
dvr_mac_address=dvr_mac_address)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[],
match=ofpp.OFPMatch(
eth_dst=vif_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
priority=2,
table_id=self.dvr_process_table_id)),
call._send_msg(ofpp.OFPFlowMod(dp,
cookie=0,
instructions=[
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [
ofpp.OFPActionSetField(eth_src=dvr_mac_address),
]),
ofpp.OFPInstructionGotoTable(
table_id=self.dvr_process_next_table_id),
],
match=ofpp.OFPMatch(
eth_src=vif_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT),
priority=1,
table_id=self.dvr_process_table_id)),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_dvr_process(self):
vlan_tag = 999
vif_mac = '00:0e:0c:5e:95:d0'
self.br.delete_dvr_process(vlan_tag=vlan_tag,
vif_mac=vif_mac)
(dp, ofp, ofpp) = self._get_dp()
expected = [
call.delete_flows(table_id=self.dvr_process_table_id,
match=ofpp.OFPMatch(
eth_dst=vif_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
call.delete_flows(table_id=self.dvr_process_table_id,
match=ofpp.OFPMatch(
eth_src=vif_mac,
vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)),
]
self.assertEqual(expected, self.mock.mock_calls)
| [
"[email protected]"
] | |
590926c548bbf4d2c80cd8848ec14070f263882b | 4d0bbeb8ab52f7e450aff20056f7509e12751258 | /functional_tests/test_list_item_validation.py | b76997f17a27784079ace3f2665c4571d9f5f356 | [] | no_license | chicocheco/tdd_book | f7c9246dcb4eb5327704c72f655bf6e187b28849 | 574b1082aa523c7434f50e0c4cbdf5777ddf50ef | refs/heads/master | 2022-05-02T17:44:27.217329 | 2020-03-13T18:57:22 | 2020-03-13T18:57:22 | 197,633,503 | 0 | 0 | null | 2022-04-22T22:19:12 | 2019-07-18T17:56:43 | JavaScript | UTF-8 | Python | false | false | 4,895 | py | from selenium.webdriver.common.keys import Keys
from unittest import skip
from .base import FunctionalTest
class ItemValidationTest(FunctionalTest):
# YAGNI, 3 strikes and refactor, not moving helper methods to base.py if not needed elsewhere
def get_error_element(self):
return self.browser.find_element_by_css_selector('.has-error')
def test_cannot_add_empty_list_items(self):
# Tania jde na domovskou stranku a omylem zkusi na seznam pridat prazdny text.
self.browser.get(self.live_server_url)
self.get_item_input_box().send_keys(Keys.ENTER) # every click or enter should be followed by some wait
# prohlizec zastavi pozadavek protoze jsme nezadali nic do 'required' policka
# (HTML5) browser adds a CSS pseudoselector ":invalid" to the id parameter of the element
# and pops up "Fill out the field" alert
# fallback: if the browser like Safari, does not fully implement HTML5, so the custom error message will be used
self.wait_for(lambda: self.browser.find_element_by_css_selector('#id_text:invalid'))
"""
lambda: when you want to save a function with arguments to a variable/parameter but not executing it yet
>>> myfn = lambda: addthree(2) # note addthree is not called immediately here
>>> myfn
<function <lambda> at 0x7f3b140339d8>
>>> myfn() # execute it here
5
"""
# Nyni to zkusi znovu s nejakym textem pro polozku, coz funguje, chyba zmizi
# CSS pseudoselector changes from #id_text:invalid to #id_text:valid
self.get_item_input_box().send_keys('Buy milk')
self.wait_for(lambda: self.browser.find_element_by_css_selector('#id_text:valid'))
# a muze bez problemu potvrdit predmet
self.get_item_input_box().send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy milk')
# Skodolibe, ted zkusi znovu pridat druhou prazdnou polozku (jiz pro existujici seznam)
self.get_item_input_box().send_keys(Keys.ENTER)
# Dostane podobne varovani na strance listu, prohlizec nadava
self.wait_for(lambda: self.browser.find_element_by_css_selector('#id_text:invalid'))
# Ted to muze opravit vyplnenim pole nejakym textem
self.get_item_input_box().send_keys('Make tea')
self.wait_for(lambda: self.browser.find_element_by_css_selector('#id_text:valid'))
self.get_item_input_box().send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy milk')
self.wait_for_row_in_list_table('2: Make tea')
# self.fail('Finish this test!')
def test_cannot_add_duplicate_items(self):
# Tania jde na domovskou stranku a zacne novy list
self.browser.get(self.live_server_url)
self.add_list_item('Buy wellies')
# omylem zkusi zadat stejnou polozku znovu (novy list jiz existuje)
self.get_item_input_box().send_keys('Buy wellies')
self.get_item_input_box().send_keys(Keys.ENTER)
# vidi uzitecnou chybovou zpravu ze zadava duplikat
self.wait_for(lambda: self.assertEqual(
self.get_error_element().text,
"You've already got this in your list"
))
def test_error_messages_are_cleared_on_input(self):
# Tania otevre novy seznam a zpusobi validaci error
# this uses JavaScript and can be tested as:
# python manage.py test
# functional_tests.test_list_item_validation.ItemValidationTest.test_error_messages_are_cleared_on_input
self.browser.get(self.live_server_url)
self.add_list_item('Banter too thick')
self.get_item_input_box().send_keys('Banter too thick')
self.get_item_input_box().send_keys(Keys.ENTER)
self.wait_for(lambda: self.assertTrue(
self.get_error_element().is_displayed()
))
# zacne neco psat do policka aby zmizela chybova hlaska o jiz existujici polozce
self.get_item_input_box().send_keys('a')
# ma radost, ze chybova hlaska zmizi
self.wait_for((lambda: self.assertFalse(
self.get_error_element().is_displayed()
)))
def test_error_messages_are_cleared_on_focus(self):
# Tania otevre novy seznam a zpusobi validaci error
self.browser.get(self.live_server_url)
self.add_list_item('Banter too thin')
self.get_item_input_box().send_keys('Banter too thin')
self.get_item_input_box().send_keys(Keys.ENTER)
self.wait_for(lambda: self.assertTrue(
self.get_error_element().is_displayed()
))
# klikne do policka aby zmizela chybova hlaska o jiz existujici polozce
self.get_item_input_box().click()
# ma radost, ze chybova hlaska zmizi
self.wait_for((lambda: self.assertFalse(
self.get_error_element().is_displayed()
)))
| [
"[email protected]"
] | |
bf50837fd80b831d40a6bba91fc419a1019c4bd2 | fa346a2d5886420e22707a7be03599e634b230a9 | /temboo/Library/Amazon/S3/__init__.py | 905fa3010c0e9c654a3d2c2b1a47f51be344b1f9 | [] | no_license | elihuvillaraus/entity-resolution | cebf937499ed270c3436b1dd25ab4aef687adc11 | 71dd49118a6e11b236861289dcf36436d31f06bc | refs/heads/master | 2021-12-02T17:29:11.864065 | 2014-01-08T04:29:30 | 2014-01-08T04:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | from PutBucketACL import *
from GetBucketLocation import *
from DeleteMultipleObjects import *
from GetBucketWebsite import *
from DeleteBucketCORS import *
from DeleteBucketTagging import *
from GetObjectACL import *
from ZipBucket import *
from DeleteBucket import *
from PutBucketWebsiteRedirectAll import *
from GetBucketVersioning import *
from PutBucketWebsiteRedirect import *
from GetBucketList import *
from PutObjectACL import *
from PutBucketCORS import *
from GetService import *
from GetBucketCORS import *
from GetBucketNotification import *
from GetBase64EncodedObject import *
from DeleteBucketLifecycle import *
from GetBucketLifecycle import *
from GetObjectTorrent import *
from PutBucketNotification import *
from PutBucketTagging import *
from PutBucketVersioning import *
from GetBucketPolicy import *
from GetBucketTagging import *
from GetBucketACL import *
from CopyObject import *
from PutBucketLifecycle import *
from ZipObject import *
from DeleteObject import *
from PutBucket import *
from PutBucketPolicy import *
from GetBucketLogging import *
from PutObject import *
from PutBucketLogging import *
from DeleteBucketPolicy import *
from PutBucketRequestPayment import *
from DeleteBucketWebsite import *
from GetBucketRequestPayment import *
| [
"[email protected]"
] | |
8487f1f63135e4bffeb4b1e070046c863dd458cf | 52f734b8f04ed0c88e3a41de2b5fb4aa3b2c1a8b | /data_process.py | 3eb1ff7174c0319da6678cce0e541f8ad5cfd25f | [] | no_license | hongjy127/CNN_ultrasonics | d79398d947ffca0014da2f917b2871362d624e91 | 11c5614ac734f4d7af9a4a488ddc52bb4be28b87 | refs/heads/master | 2023-02-27T00:10:58.396549 | 2021-02-03T16:39:40 | 2021-02-03T16:39:40 | 328,112,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,863 | py | import numpy as np
import matplotlib.pyplot as plt
from pyts.image import RecurrencePlot
# 설정 정보 불러오기
class Configuration:
def __init__(self):
config = self.load()
self.fname = config['FNAME']
def load(self):
config = {}
with open('config.ini','rt') as f:
entries = f.readlines()
for entry in entries:
key, value = entry.split('=')
config[key.strip()] = value.strip()
return config
def __str__(self):
return f'<Configuration fname {self.fname}>'
# data.csv file 불러오기
def load(fname):
datas = np.loadtxt(fname, delimiter=',', dtype=np.float32)
signals = datas[:, :-1]
labels = datas[:, -1].astype('int')
data = (signals, labels)
return data
# scaling
def scaling(signals):
signals = signals/np.max(np.abs(signals))
return signals
# 이미지로 변환 (memory 문제 - 사용 X)
def sig2img(signals):
rp = RecurrencePlot(dimension=1, time_delay=1, threshold=None)
signals.reshape(1,-1)
img = rp.fit_transform(signals)
return img
# Cross validation
def CV(signals, labels):
pass
if __name__ == "__main__":
# config 확인
config = Configuration()
fname = config.fname
print(fname)
# load 확인
data = load(fname)
print(data[0][-1])
print(data[1][-1])
print(data[0].shape, data[1].shape)
signals = data[0]
plt.plot(signals[-1,:])
# scaling 확인
signals = scaling(signals)
plt.plot(signals[-1,:])
plt.show()
# sig2img 확인
signal = signals[0:2]
img = sig2img(signal)
print(img.shape)
fig = plt.figure()
rows = 1
cols = 2
ax1 = fig.add_subplot(rows, cols, 1)
ax1.imshow(img[0])
ax2 = fig.add_subplot(rows, cols, 2)
ax2.imshow(img[1])
plt.show()
| [
"[email protected]"
] | |
5b730b0e8f6cecaf216b0cf318e622fb159d42ce | a4b81839fe6d7726eb6c9d2c9fd0d9a70cf3ef3f | /day11_20191119/exercise00002.py | d84024cddff13dcadc302524c26401f03a257578 | [] | no_license | Python87-com/PythonExercise | 3f84e0f194254d0f0a8b106348b214ccdeebf842 | 9bef64c6d3b143236bf06131e5f7a5aabcf6980b | refs/heads/master | 2021-01-31T15:56:27.743718 | 2020-01-02T12:10:43 | 2020-01-02T12:10:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | """
4. 请用面向对象思想,描述以下场景:
玩家(攻击力)攻击敌人(血量),敌人受伤(掉血),还可能死亡(掉装备,加分)。
敌人(攻击力)攻击玩家,玩家(血量)受伤(掉血/碎屏),还可能死亡(游戏结束)。
""" | [
"[email protected]"
] | |
1a4dbf5f1ac9cc33c0301f9ba2db8c21e1972c06 | 781029dcc468a7d1467a17727870d526da1df985 | /algorithm/2806_N-queens/sol.py | d0219b8816f66160b7d8c92f7d7a7d00ec2b6996 | [] | no_license | Huijiny/TIL | 5f0edec5ad187029e04ed2d69e85ae4d278e048d | d1a974b3cacfb45b2718f87d5c262a23986c6574 | refs/heads/master | 2023-09-03T15:28:11.744287 | 2021-10-21T12:38:10 | 2021-10-21T12:38:10 | 335,220,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | import sys
sys.stdin = open('sample_input (3).txt')
def is_exist_diagnal(cur_pos):
for queen in queens:
if abs(cur_pos[0] - queen[0]) == abs(cur_pos[1] - queen[1]):
return True
return False
def n_queens(row):
global visited, count
if row == N:
count += 1
else:
for col in range(N):
# 같은 열에 다른 퀸이 존재하는지 체크 및 대각선 체크
if not visited[col] and not is_exist_diagnal((col, row)):
visited[col] = True
queens.append((col, row))
n_queens(row + 1)
queens.pop()
visited[col] = False
T = int(input())
for tc in range(1, T+1):
N = int(input())
count = 0
visited = [False] * N
queens = []
n_queens(0)
print("#{} {}".format(tc, count)) | [
"[email protected]"
] | |
97b61b984b05740f9ba96560cbecd106998ce823 | d4ca0866381e577e3d36a22735d02eb4bf817b10 | /roman_to_integer.py | 17099c8e9ce1347818f6937355f5dd6828331434 | [] | no_license | haiwenzhu/leetcode | e842936b69bbaf5695de1f98c8c17507819435dd | bc068c2b00793ae72439efe5bdecaeed029e9f65 | refs/heads/master | 2021-01-15T13:11:22.672952 | 2015-07-25T04:45:36 | 2015-07-25T04:45:36 | 36,838,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | class Solution:
"""
@see https://oj.leetcode.com/problems/roman-to-integer/
"""
# @return an integer
def romanToInt(self, s):
chart = dict(I=1, V=5, X=10, L=50, C=100, D=500, M=1000)
if s == "":
return 0
n = chart[s[-1]]
for i in range(2, len(s)+1):
if chart[s[-i]] < chart[s[-i+1]]:
n -= chart[s[-i]]
else:
n += chart[s[-i]]
return n
if __name__ == "__main__":
solution = Solution()
print(solution.romanToInt(""))
print(solution.romanToInt("VII") == 7)
print(solution.romanToInt("XXXIX") == 39)
print(solution.romanToInt("DCCCXC") == 890)
| [
"[email protected]"
] | |
30bded28cc0dcda07789d41d085dfdb3d9e6e17c | 82fce9aae9e855a73f4e92d750e6a8df2ef877a5 | /Lab/venv/lib/python3.8/site-packages/OpenGL/GLES2/NV/shader_atomic_fp16_vector.py | 99ece98dcff7af485333887d940e48ccac1ff4b7 | [] | no_license | BartoszRudnik/GK | 1294f7708902e867dacd7da591b9f2e741bfe9e5 | 6dc09184a3af07143b9729e42a6f62f13da50128 | refs/heads/main | 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | '''OpenGL extension NV.shader_atomic_fp16_vector
This module customises the behaviour of the
OpenGL.raw.GLES2.NV.shader_atomic_fp16_vector to provide a more
Python-friendly API
Overview (from the spec)
This extension provides GLSL built-in functions and assembly opcodes
allowing shaders to perform a limited set of atomic read-modify-write
operations to buffer or texture memory with 16-bit floating point vector
surface formats.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/shader_atomic_fp16_vector.txt
'''
from OpenGL.raw.GLES2.NV.shader_atomic_fp16_vector import _EXTENSION_NAME
def glInitShaderAtomicFp16VectorNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | [
"[email protected]"
] | |
c4d74c2b8f344ee2be3556aa368f15b22c62cd6e | cb1d06e91347a23438057d9f40b5a74cad595766 | /autonetkit/anm/__init__.py | 4ee754a1a2282b1c46bccc42aaac4a0d566210a7 | [] | permissive | plucena24/autonetkit | 9f94d3fba6bfad54793a7de58ef17439c2c71f0b | f7e8c03ee685d5b89f9028cb556017e730e0446c | refs/heads/master | 2023-08-16T18:03:54.593010 | 2014-11-07T13:43:39 | 2014-11-07T13:43:39 | 27,204,033 | 0 | 0 | BSD-3-Clause | 2023-08-08T18:36:36 | 2014-11-27T01:36:38 | Python | UTF-8 | Python | false | false | 273 | py | from autonetkit.anm.network_model import NetworkModel as NetworkModel
from autonetkit.anm.graph import NmGraph as NmGraph
from autonetkit.anm.node import NmNode as NmNode
from autonetkit.anm.edge import NmEdge as NmEdge
from autonetkit.anm.interface import NmPort as NmPort | [
"[email protected]"
] | |
8be656429e0ccfbc6d5b995c311d4436a9d86d31 | 3ca67d69abd4e74b7145b340cdda65532f90053b | /programmers/난이도별/level03.표 편집/sangmandu.py | 309e210a1f26779c12c4f4c6978734fa138431d5 | [] | no_license | DKU-STUDY/Algorithm | 19549516984b52a1c5cd73e1ed1e58f774d6d30e | 6f78efdbefd8eedab24e43d74c7dae7f95c2893b | refs/heads/master | 2023-02-18T06:48:39.309641 | 2023-02-09T07:16:14 | 2023-02-09T07:16:14 | 258,455,710 | 175 | 49 | null | 2023-02-09T07:16:16 | 2020-04-24T08:42:27 | Python | UTF-8 | Python | false | false | 3,870 | py | '''
https://programmers.co.kr/learn/courses/30/lessons/81303
표 편집
[풀이]
0. 연결리스트 문제
=> 보통 이런문제가 나오면 클래스로 짜는 사람이 있다. 참 대단..
1. 연결리스트를 딕셔너리로 구현한다.
=> i번째 노드는 원소가 2개인 list 타입의 값을 가진다.
=> [0] : left = i-1번째 노드
=> [1] : right = i+1번째 노드
=> 0번 노드와 n-1번 노드는 양쪽 끝에 None을 가지고 있다.
2. 명령어가 C, Z일때를 조건으로 하고 그 외에는 split()을 한다.
2-1. C일 경우
=> rm 리스트에 삭제할 노드의 idx와 left, right를 추가한다.
=> rm 리스트는 추후에 명령어가 Z일 경우 필요
=> 자신의 left 노드와 right 노드를 서로 이어준다
=> 이 때 None일 경우의 예외처리
2.2 Z일 경우
=> 삭제한 노드가 담겨있는 rm 리스트에서 pop
=> 되돌리려는 노드가 가장 마지막 노드일 경우와 아닌 경우를 나눈다.
=> 그리고 자신의 left, right 노드와 자신을 이어준다.
=> 자신의 left, right가 현재 삭제되있을 가능성은?
=> 없다. 왜냐하면 자신이 가장 최근에 삭제된 노드기 때문
=> 자신의 left, right가 삭제되었다면 이미 또 다른 left, right값을 가지고 있을 것임
2.3 U, D 일 경우
=> D면 idx 증가, U면 idx 감소.
=> 연결리스트의 장점이 드러나는 부분
=> 그 외의 방법은 이동하면서 노드가 삭제되었는지 여부를 검사해야한다.
=> 연결리스트는 연결이 안돼있으면 이미 삭제된 것이기 때문에 검사할 필요가 없음.
'''
def solution(n, k, cmd):
dic = {}
for i in range(0, n):
dic[i] = [i-1, i+1]
dic[0][0] = dic[n-1][1] = None
rm = []
for c in cmd:
if c == "C":
rm.append([k, dic[k][0], dic[k][1]])
if dic[k][1] is None:
k = dic[k][0]
dic[k][1] = None
else:
if dic[k][0] is not None:
dic[dic[k][0]][1] = dic[k][1]
dic[dic[k][1]][0] = dic[k][0]
k = dic[k][1]
elif c == "Z":
idx, left, right = rm.pop()
if left is not None:
dic[left][1] = idx
if right is not None:
dic[right][0] = idx
dic[idx] = [left, right]
else:
move, steps = c.split()
for _ in range(int(steps)):
k = dic[k][int(move == "D")]
answer = ["O"] * n
for idx, _, _ in rm:
answer[idx] = "X"
return ''.join(answer)
'''
리스트로 이 문제를 구현하면 100% 시간초과 날 것을 예상했다.
=> 삭제 검사, 리스트 중간에 삽입 및 삭제가 일어날 것이기 떄문
그래서 각 리스트의 상태를 기억할 수 있도록 구조를 짰다. 굉장히 좋은 풀이라고 생각했는데 효율성에서 에러가 났다. (아마 실제 인턴십 코테에서도 저랬던 것 같다)
아래 코드는 리스트로 짠 코드. 4개의 테스트 케이스에서 시간초과가 나서 결국 버려야 했다 ㅠㅠ
def solution(n, k, cmd):
lst = ["O"] * n
top = n-1
remove = []
for c in cmd:
if c == "C":
remove.append(k)
lst[k] = "X"
drt = 2 * (top != k) - 1
while lst[k+drt] == "X":
k += drt
k += drt
while lst[top] == "X":
top -= 1
elif c == "Z":
idx = remove.pop()
lst[idx] = "O"
top = max(idx, top)
else:
move, steps = c.split()
steps = int(steps)
drt = 2 * (move == "D") - 1
while steps:
k += drt
steps -= lst[k] == "O"
#print(c, lst, remove, top, k)
return ''.join(lst)
''' | [
"[email protected]"
] | |
009bcc09b6b0e01969e419f841aa942b60421c69 | ce36737f134db1726bb189c17a729b9d3abba4e4 | /assets/src/ba_data/python/bastd/ui/settings/gamepadselect.py | 4c1b4fa15cb76c093ad443c0653ec961e3d5fead | [
"MIT"
] | permissive | Indev450/ballistica | 0559940971c69b7596442abfc6ac2818a4987064 | 27420d3f64c24bf3c9b4b047177a4769977659b1 | refs/heads/master | 2023-07-20T16:01:04.586170 | 2020-04-13T09:32:36 | 2020-04-13T09:32:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,740 | py | # Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Settings UI related to gamepad functionality."""
from __future__ import annotations
from typing import TYPE_CHECKING
import _ba
import ba
if TYPE_CHECKING:
from typing import Dict, Any
def gamepad_configure_callback(event: Dict[str, Any]) -> None:
"""Respond to a gamepad button press during config selection."""
from ba.internal import get_remote_app_name
from bastd.ui.settings import gamepad
# Ignore all but button-presses.
if event['type'] not in ['BUTTONDOWN', 'HATMOTION']:
return
_ba.release_gamepad_input()
try:
ba.containerwidget(edit=ba.app.main_menu_window, transition='out_left')
except Exception:
ba.print_exception("Error transitioning out main_menu_window.")
ba.playsound(ba.getsound('activateBeep'))
ba.playsound(ba.getsound('swish'))
inputdevice = event['input_device']
assert isinstance(inputdevice, ba.InputDevice)
if inputdevice.allows_configuring:
ba.app.main_menu_window = (
gamepad.GamepadSettingsWindow(inputdevice).get_root_widget())
else:
width = 700
height = 200
button_width = 100
ba.app.main_menu_window = dlg = (ba.containerwidget(
scale=1.7 if ba.app.small_ui else 1.4 if ba.app.med_ui else 1.0,
size=(width, height),
transition='in_right'))
device_name = inputdevice.name
if device_name == 'iDevice':
msg = ba.Lstr(resource='bsRemoteConfigureInAppText',
subs=[('${REMOTE_APP_NAME}', get_remote_app_name())])
else:
msg = ba.Lstr(resource='cantConfigureDeviceText',
subs=[('${DEVICE}', device_name)])
ba.textwidget(parent=dlg,
position=(0, height - 80),
size=(width, 25),
text=msg,
scale=0.8,
h_align="center",
v_align="top")
def _ok() -> None:
from bastd.ui.settings import controls
ba.containerwidget(edit=dlg, transition='out_right')
ba.app.main_menu_window = (controls.ControlsSettingsWindow(
transition='in_left').get_root_widget())
ba.buttonwidget(parent=dlg,
position=((width - button_width) / 2, 20),
size=(button_width, 60),
label=ba.Lstr(resource='okText'),
on_activate_call=_ok)
class GamepadSelectWindow(ba.Window):
"""Window for selecting a gamepad to configure."""
def __init__(self) -> None:
from typing import cast
width = 480
height = 170
spacing = 40
self._r = 'configGamepadSelectWindow'
super().__init__(root_widget=ba.containerwidget(
scale=2.3 if ba.app.small_ui else 1.5 if ba.app.med_ui else 1.0,
size=(width, height),
transition='in_right'))
btn = ba.buttonwidget(parent=self._root_widget,
position=(20, height - 60),
size=(130, 60),
label=ba.Lstr(resource='backText'),
button_type='back',
scale=0.8,
on_activate_call=self._back)
# Let's not have anything selected by default; its misleading looking
# for the controller getting configured.
ba.containerwidget(edit=self._root_widget,
cancel_button=btn,
selected_child=cast(ba.Widget, 0))
ba.textwidget(parent=self._root_widget,
position=(20, height - 50),
size=(width, 25),
text=ba.Lstr(resource=self._r + '.titleText'),
maxwidth=250,
color=ba.app.title_color,
h_align="center",
v_align="center")
ba.buttonwidget(edit=btn,
button_type='backSmall',
size=(60, 60),
label=ba.charstr(ba.SpecialChar.BACK))
v: float = height - 60
v -= spacing
ba.textwidget(parent=self._root_widget,
position=(15, v),
size=(width - 30, 30),
scale=0.8,
text=ba.Lstr(resource=self._r + '.pressAnyButtonText'),
maxwidth=width * 0.95,
color=ba.app.infotextcolor,
h_align="center",
v_align="top")
v -= spacing * 1.24
if ba.app.platform == 'android':
ba.textwidget(parent=self._root_widget,
position=(15, v),
size=(width - 30, 30),
scale=0.46,
text=ba.Lstr(resource=self._r + '.androidNoteText'),
maxwidth=width * 0.95,
color=(0.7, 0.9, 0.7, 0.5),
h_align="center",
v_align="top")
_ba.capture_gamepad_input(gamepad_configure_callback)
def _back(self) -> None:
from bastd.ui.settings import controls
_ba.release_gamepad_input()
ba.containerwidget(edit=self._root_widget, transition='out_right')
ba.app.main_menu_window = (controls.ControlsSettingsWindow(
transition='in_left').get_root_widget())
| [
"[email protected]"
] | |
e2d6533a67d8bebf792f226e1e5a1c797c7a7032 | 39d7ab29356ea5363c783d518b3c92f52c2ef8c2 | /crawler/crawlers_BACKUP/sinarollcustom.py | 9c3170f6636f92bf98ab84af04baecb8a1e39df5 | [] | no_license | wdcpop/Web-Crawler | 7daad778bbda9e5852248971845e3b448629175e | 725037d17dfd2535e213df3cb7aafda523d39c03 | refs/heads/master | 2018-07-31T15:12:31.605633 | 2018-06-02T16:03:36 | 2018-06-02T16:03:36 | 121,194,531 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from .abstracts.crawler_abstract import CrawlerAbstract
import re
from urlparse import urljoin
from urlparse import urlsplit
from arrow import Arrow
import time
class SINAROLLCUSTOM(CrawlerAbstract):
title = u'新浪财经 - 滚动自选'
start_urls = [
'http://roll.news.sina.com.cn/s/channel.php?ch=01#col=96,97,98&spec=&type=&ch=01&k=&offset_page=0&offset_num=0&num=60&asc=&page=4'
]
url_patterns = [
re.compile(r'(http://.*?\.sina\.com\.cn/[\w/]+?/\d{4}-\d{2}-\d{2}/doc-\w+?\.shtml)')
]
content_selector = dict(
title='#artibodyTitle',
content='.article, #artibody',
date_area='.time-source, #pub_date'
)
| [
"[email protected]"
] | |
2370bfe569782dc9bb6f537eb8495692b3be2571 | 51d5bd1f792f3a0fe1285c3ccdeefb58077890df | /anees/migrations/0027_auto_20200926_1337.py | 33ea2f35e72c08589a1f98ef7070dd993ad8150f | [
"MIT"
] | permissive | ashish2020kashyap/cessini | 667c2d4ab64f34121255a43c327b8110fa499d0b | 9713fd76d2e31a95266ec69da2abc98424a46e52 | refs/heads/master | 2022-12-16T13:30:21.093504 | 2020-09-29T06:31:12 | 2020-09-29T06:31:12 | 299,510,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | # Generated by Django 3.1.1 on 2020-09-26 13:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('anees', '0026_auto_20200926_1336'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='customer_camp',
field=models.ManyToManyField(blank=True, null=True, related_name='customer_camp', to='anees.Campaign'),
),
migrations.AlterField(
model_name='customer',
name='customer_email',
field=models.ManyToManyField(blank=True, null=True, related_name='customer_email', to='anees.Email'),
),
migrations.AlterField(
model_name='customer',
name='name',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='customer',
name='phone',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='email',
name='camp',
field=models.ManyToManyField(null=True, related_name='camp', to='anees.Campaign'),
),
]
| [
"[email protected]"
] | |
0a1b40f9c2382b1f57e2db3116afbfc749929daf | 1b2fc9666edbbdc65387c854831097e0be8b686c | /BOJ(Baekjoon Online Judge)/Mathematics/2004_조합 0의 개수(counting trailing zero in combination).py | cfc777b9dac881b237d3a84d4de3aec3d7992dda | [] | no_license | seongbeenkim/Algorithm-python | 6593878cff8755800f3e8bcdaabdb41625324f38 | 24fe365a29c61c2405a06345f9105ed200a76bd5 | refs/heads/master | 2023-09-04T11:05:08.318769 | 2021-02-18T14:58:20 | 2021-02-18T14:58:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | #https://www.acmicpc.net/problem/2004
import sys
count_two = 0
count_five = 0
n, m = list(map(int,sys.stdin.readline().split()))
i = 2
while n >= i:
count_two += n // i
i *= 2
i = 2
while (n-m) >= i:
count_two -= (n-m) // i
i *= 2
i = 2
while m >= i:
count_two -= m // i
i *= 2
i = 5
while n >= i:
count_five += n // i
i *= 5
i = 5
while (n-m) >= i:
count_five -= (n-m) // i
i *= 5
i = 5
while m >= i:
count_five -= m // i
i *= 5
print(min(count_two,count_five)) | [
"[email protected]"
] | |
52e5b6853dc97a6beb2f483898e29c76c462f3e8 | 8fd28b248511f42ad8732ca1e574aada33908376 | /tests/test_models/test_recognizers.py | 8ce86d6ec3689d2e8fbc75634871f24de57ddb72 | [
"Apache-2.0"
] | permissive | vt-vl-lab/video-data-aug | 28bd175535cab1444055502389c8f5d7d75e4bd2 | 01667cdbd1b952f2510af3422beeeb76e0d9e15a | refs/heads/main | 2023-09-01T02:36:40.034893 | 2021-07-21T01:31:42 | 2021-07-21T01:31:42 | 352,920,339 | 29 | 6 | Apache-2.0 | 2021-07-21T01:29:36 | 2021-03-30T08:06:54 | Jupyter Notebook | UTF-8 | Python | false | false | 10,211 | py | import os.path as osp
import mmcv
import numpy as np
import pytest
import torch
import torch.nn.functional as F
from mmaction.models import BaseRecognizer, build_recognizer
class ExampleRecognizer(BaseRecognizer):
def __init__(self, train_cfg, test_cfg):
super(BaseRecognizer, self).__init__()
# reconstruct `__init__()` method in BaseRecognizer to avoid building
# backbone and head which are useless to ExampleRecognizer,
# since ExampleRecognizer is only used for model-unrelated methods
# (like `average_clip`) testing.
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def forward_train(self, imgs, labels):
pass
def forward_test(self, imgs):
pass
def _get_recognizer_cfg(fname):
"""Grab configs necessary to create a recognizer.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
repo_dpath = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dpath, 'configs/recognition')
config_fpath = osp.join(config_dpath, fname)
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config = mmcv.Config.fromfile(config_fpath)
return config.model, config.train_cfg, config.test_cfg
def test_base_recognizer():
cls_score = torch.rand(5, 400)
with pytest.raises(KeyError):
# "average_clips" must defined in test_cfg keys
wrong_test_cfg = dict(clip='score')
recognizer = ExampleRecognizer(None, wrong_test_cfg)
recognizer.average_clip(cls_score)
with pytest.raises(ValueError):
# unsupported average clips type
wrong_test_cfg = dict(average_clips='softmax')
recognizer = ExampleRecognizer(None, wrong_test_cfg)
recognizer.average_clip(cls_score)
with pytest.raises(ValueError):
# Label should not be None
recognizer = ExampleRecognizer(None, None)
recognizer(torch.tensor(0))
# average_clips='score'
test_cfg = dict(average_clips='score')
recognizer = ExampleRecognizer(None, test_cfg)
score = recognizer.average_clip(cls_score)
assert torch.equal(score, cls_score.mean(dim=0, keepdim=True))
# average_clips='prob'
test_cfg = dict(average_clips='prob')
recognizer = ExampleRecognizer(None, test_cfg)
score = recognizer.average_clip(cls_score)
assert torch.equal(score,
F.softmax(cls_score, dim=1).mean(dim=0, keepdim=True))
def test_tsn():
model, train_cfg, test_cfg = _get_recognizer_cfg(
'tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py')
model['backbone']['pretrained'] = None
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 3, 32, 32)
demo_inputs = generate_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
def test_i3d():
model, train_cfg, test_cfg = _get_recognizer_cfg(
'i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py')
model['backbone']['pretrained2d'] = False
model['backbone']['pretrained'] = None
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 3, 8, 32, 32)
demo_inputs = generate_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
def test_r2plus1d():
model, train_cfg, test_cfg = _get_recognizer_cfg(
'r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py')
model['backbone']['pretrained2d'] = False
model['backbone']['pretrained'] = None
model['backbone']['norm_cfg'] = dict(type='BN3d')
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 3, 8, 32, 32)
demo_inputs = generate_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
def test_slowfast():
model, train_cfg, test_cfg = _get_recognizer_cfg(
'slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py')
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 3, 8, 32, 32)
demo_inputs = generate_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
def test_tsm():
model, train_cfg, test_cfg = _get_recognizer_cfg(
'tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py')
model['backbone']['pretrained'] = None
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 8, 3, 32, 32)
demo_inputs = generate_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
def test_csn():
model, train_cfg, test_cfg = _get_recognizer_cfg(
'csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py')
model['backbone']['pretrained2d'] = False
model['backbone']['pretrained'] = None
recognizer = build_recognizer(
model, train_cfg=train_cfg, test_cfg=test_cfg)
input_shape = (1, 3, 3, 8, 32, 32)
demo_inputs = generate_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
def generate_demo_inputs(input_shape=(1, 3, 3, 224, 224), model_type='2D'):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 250, 3, 224, 224).
model_type (str): Model type for data generation, from {'2D', '3D'}.
Default:'2D'
"""
if len(input_shape) == 5:
(N, L, C, H, W) = input_shape
elif len(input_shape) == 6:
(N, M, C, L, H, W) = input_shape
imgs = np.random.random(input_shape)
if model_type == '2D':
gt_labels = torch.LongTensor([2] * N)
elif model_type == '3D':
gt_labels = torch.LongTensor([2] * M)
else:
raise ValueError(f'Data type {model_type} is not available')
inputs = {
'imgs': torch.FloatTensor(imgs),
'gt_labels': gt_labels,
}
return inputs
| [
"[email protected]"
] | |
7b1eb99a5365b143075c1cb52706163e83e63362 | 794e14945c0521b4eab03e8b9a3f93b8fa14e021 | /src/compas_ghpython/artists/lineartist.py | e4bc4ee098824c867f75517ab75f1e85f0d1a226 | [
"MIT"
] | permissive | KEERTHANAUDAY/compas | 5e8ada865bc87ee48ba77b3f6fd03661a9b9c17d | 4d1101cf302f95a4472a01a1265cc64eaec6aa4a | refs/heads/master | 2021-07-11T16:26:19.452926 | 2020-09-10T14:27:11 | 2020-09-10T14:27:11 | 294,453,684 | 0 | 0 | MIT | 2020-09-10T15:47:31 | 2020-09-10T15:47:30 | null | UTF-8 | Python | false | false | 1,054 | py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_ghpython
from compas_ghpython.artists._primitiveartist import PrimitiveArtist
__all__ = ['LineArtist']
class LineArtist(PrimitiveArtist):
"""Artist for drawing lines.
Parameters
----------
primitive : :class:`compas.geometry.Line`
A COMPAS line.
Other Parameters
----------------
See :class:`compas_ghpython.artists.PrimitiveArtist` for all other parameters.
"""
def draw(self):
"""Draw the line.
Returns
-------
:class:`Rhino.Geometry.Line`
"""
start = list(self.primitive.start)
end = list(self.primitive.end)
lines = [{'start': start, 'end': end}]
return compas_ghpython.draw_lines(lines)[0]
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
| [
"[email protected]"
] | |
f0078d8d238fb95be3367e8c9a6724e692d2f892 | d806dd4a6791382813d2136283a602207fb4b43c | /migrations/versions/414eda9f70d0_.py | 2eb67987fdcb5ce4328978a6b6c70f45820f1622 | [] | no_license | MarsStirner/sirius | 5bbf2a03dafb7248db481e13aff63ff989fabbc2 | 8839460726cca080ca8549bacd3a498e519c8f96 | refs/heads/master | 2021-03-24T12:09:14.673193 | 2017-06-06T16:28:53 | 2017-06-06T16:28:53 | 96,042,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | """empty message
Revision ID: 414eda9f70d0
Revises: f088f9315be0
Create Date: 2016-10-20 20:27:23.521000
"""
# revision identifiers, used by Alembic.
revision = '414eda9f70d0'
down_revision = 'f088f9315be0'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(u'_remote_entity_id_uc', 'matching_id', type_='unique')
op.drop_index('ix_matching_id_remote_id', table_name='matching_id')
op.drop_column(u'matching_id', 'remote_id')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column(u'matching_id', sa.Column('remote_id', sa.INTEGER(), autoincrement=False, nullable=False))
op.create_index('ix_matching_id_remote_id', 'matching_id', ['remote_id'], unique=False)
op.create_unique_constraint(u'_remote_entity_id_uc', 'matching_id', ['remote_entity_id', 'remote_id'])
### end Alembic commands ###
| [
"[email protected]"
] | |
c2fbf9b3a660a865fe1e20d672d48b4f0f4211bc | 50084bf941b61791ac4def5b8cff950e7ddfdd15 | /10 Advanced Language Techniques/Functional/04_currying.py | 9bfdbe7774d011369592c47083061947c15d5c7e | [] | no_license | gjq91459/mycourse | 6904ad191dc6128fb853eb8bdb1b200a46b2059f | 211d7707e41f50495375b4a1dfc32b62b76b317b | refs/heads/master | 2021-01-21T10:34:44.841349 | 2017-03-01T11:17:52 | 2017-03-01T11:17:52 | 83,456,979 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | # conventional function that takes 5 args
def func(a, b, c, d, e):
return a, b, c, d, e
print func(1, 2, 3, 4, 5)
# curried version that takes 1 arg
def f(a):
def g(b):
def h(c):
def i(d):
def j(e):
return a, b, c, d, e
return j
return i
return h
return g
# f can be called in a variety of ways
a = f(1)
b = f(1)(2)
c = f(1)(2)(3)
d = f(1)(2)(3)(4)
e = f(1)(2)(3)(4)(5)
# missing arguments can be supplied later
print a(2)(3)(4)(5)
print b(3)(4)(5)
print c(4)(5)
print d(5)
print e
| [
"[email protected]"
] | |
bd036bff8f5d9e56e55bb6ba97338a10bbbf2499 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/82/usersdata/231/43199/submittedfiles/decimal2bin.py | cfc4f3a70bf6f9df805aa03d52a228511ebb8f8f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | # -*- coding: utf-8 -*-
n=int(input('digite n: '))
soma=0
i=0
while n>0:
resto =n%10
soma=soma+resto*(2**i)
n=n//10
i=i+1
print(soma)
| [
"[email protected]"
] | |
9af0b1c58bb1fb1a4ecbbacf86e2e4df2322826d | e37050384528b91832860d5490c1551812f45359 | /train/train_spider_funtions/constant.py | 24228b31060266e820cb2c471204257d584cba3f | [] | no_license | gii001/train_spider_api | 72366cf4c17ac9596090bba1f9b80778e1d2d289 | 5307b6f538bad1379e6f218f824a402565c42fee | refs/heads/master | 2020-05-01T03:59:11.263971 | 2019-04-05T06:07:07 | 2019-04-05T06:07:07 | 177,261,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60,619 | py | #!/usr/bin/env python
# encoding: utf-8
"""
常量设置
Author: Wendell
Date: 2019/03/23 11:00
"""
# 车次信息网址
TRAIN_NO_BASE_URL = 'https://kyfw.12306.cn/otn/leftTicket/query?'
# 车站信息网址
STATION_INFO_BASE_URL = 'https://kyfw.12306.cn/otn/czxx/queryByTrainNo?'
# 成人票网址后缀
PURPOSE_CODES = '&purpose_codes=ADULT'
# User-Agent
USER_AGENTS = [
'Mozilla/5.0(Macintosh;U;IntelMacOSX10_6_8;en-us)AppleWebKit/534.50(KHTML,likeGecko)Version/5.1Safari/534.50',
'Mozilla/5.0(Windows;U;WindowsNT6.1;en-us)AppleWebKit/534.50(KHTML,likeGecko)Version/5.1Safari/534.50',
'Mozilla/5.0(compatible;MSIE9.0;WindowsNT6.1;Trident/5.0;',
'Mozilla/5.0(Macintosh;IntelMacOSX10.6;rv:2.0.1)Gecko/20100101Firefox/4.0.1',
'Opera/9.80(WindowsNT6.1;U;en)Presto/2.8.131Version/11.11',
'Mozilla/5.0(Macintosh;IntelMacOSX10_7_0)AppleWebKit/535.11(KHTML,likeGecko)Chrome/17.0.963.56Safari/535.11',
'Mozilla/4.0(compatible;MSIE7.0;WindowsNT5.1;Maxthon2.0)',
'Mozilla/4.0(compatible;MSIE7.0;WindowsNT5.1;TencentTraveler4.0)',
'Mozilla/4.0(compatible;MSIE7.0;WindowsNT5.1;TheWorld)',
'Mozilla/5.0(Linux;U;Android2.3.7;en-us;NexusOneBuild/FRF91)AppleWebKit/533.1(KHTML,likeGecko)Version/4.0MobileSafari/533.1',
'Mozilla/5.0(iPad;U;CPUOS4_3_3likeMacOSX;en-us)AppleWebKit/533.17.9(KHTML,likeGecko)Version/5.0.2Mobile/8J2Safari/6533.18.5',
'Mozilla/4.0(compatible;MSIE7.0;WindowsNT5.1;Trident/4.0;SE2.XMetaSr1.0;SE2.XMetaSr1.0;.NETCLR2.0.50727;SE2.XMetaSr1.0)',
]
# 车站字典
STATION_DICT = {'北京北': 'VAP', '北京东': 'BOP', '北京': 'BJP', '北京南': 'VNP', '北京西': 'BXP', '广州南': 'IZQ', '重庆北': 'CUW',
'重庆': 'CQW', '重庆南': 'CRW', '重庆西': 'CXW', '广州东': 'GGQ', '上海': 'SHH', '上海南': 'SNH', '上海虹桥': 'AOH',
'上海西': 'SXH', '天津北': 'TBP', '天津': 'TJP', '天津南': 'TIP', '天津西': 'TXP', '香港西九龙': 'XJA', '长春': 'CCT',
'长春南': 'CET', '长春西': 'CRT', '成都东': 'ICW', '成都南': 'CNW', '成都': 'CDW', '长沙': 'CSQ', '长沙南': 'CWQ',
'大明湖': 'JAK', '福州': 'FZS', '福州南': 'FYS', '贵阳': 'GIW', '广州': 'GZQ', '广州西': 'GXQ', '哈尔滨': 'HBB',
'哈尔滨东': 'VBB', '哈尔滨西': 'VAB', '合肥': 'HFH', '合肥西': 'HTH', '呼和浩特东': 'NDC', '呼和浩特': 'HHC', '海 口东': 'KEQ',
'海口东': 'HMQ', '海口': 'VUQ', '杭州东': 'HGH', '杭州': 'HZH', '杭州南': 'XHH', '济南': 'JNK', '济南西': 'JGK',
'昆明': 'KMM', '昆明西': 'KXM', '拉萨': 'LSO', '兰州东': 'LVJ', '兰州': 'LZJ', '兰州西': 'LAJ', '南昌': 'NCG',
'南京': 'NJH', '南京南': 'NKH', '南宁': 'NNZ', '石家庄北': 'VVP', '石家庄': 'SJP', '沈阳': 'SYT', '沈阳北': 'SBT',
'沈阳东': 'SDT', '沈阳南': 'SOT', '太原北': 'TBV', '太原东': 'TDV', '太原': 'TYV', '武汉': 'WHN', '王家营西': 'KNM',
'乌鲁木齐': 'WAR', '西安北': 'EAY', '西安': 'XAY', '西安南': 'CAY', '西宁': 'XNO', '银川': 'YIJ', '郑州': 'ZZF',
'阿尔山': 'ART', '安康': 'AKY', '阿克苏': 'ASR', '阿里河': 'AHX', '阿拉山口': 'AKR', '安平': 'APT', '安庆': 'AQH',
'安顺': 'ASW', '鞍山': 'AST', '安阳': 'AYF', '北安': 'BAB', '蚌埠': 'BBH', '白城': 'BCT', '北海': 'BHZ', '白河': 'BEL',
'白涧': 'BAP', '宝鸡': 'BJY', '滨江': 'BJB', '博克图': 'BKX', '百色': 'BIZ', '白山市': 'HJL', '北台': 'BTT',
'包头东': 'BDC', '包头': 'BTC', '北屯市': 'BXR', '本溪': 'BXT', '白云鄂博': 'BEC', '白银西': 'BXJ', '亳州': 'BZH',
'赤壁': 'CBN', '常德': 'VGQ', '承德': 'CDP', '长甸': 'CDT', '赤峰': 'CFD', '茶陵': 'CDG', '苍南': 'CEH', '昌平': 'CPP',
'崇仁': 'CRG', '昌图': 'CTT', '长汀镇': 'CDB', '曹县': 'CXK', '楚雄南': 'COM', '陈相屯': 'CXT', '长治北': 'CBF',
'池州': 'IYH', '长征': 'CZJ', '常州': 'CZH', '郴州': 'CZQ', '长治': 'CZF', '沧州': 'COP', '崇左': 'CZZ', '大安北': 'RNT',
'大成': 'DCT', '丹东': 'DUT', '东方红': 'DFB', '东莞东': 'DMQ', '大虎山': 'DHD', '敦化': 'DHL', '敦煌': 'DHJ',
'德惠': 'DHT', '东京城': 'DJB', '大涧': 'DFP', '都江堰': 'DDW', '大连北': 'DFT', '大理': 'DKM', '大连': 'DLT',
'定南': 'DNG', '大庆': 'DZX', '东胜': 'DOC', '大石桥': 'DQT', '大同': 'DTV', '东营': 'DPK', '大杨树': 'DUX',
'都匀': 'RYW', '邓州': 'DOF', '达州': 'RXW', '德州': 'DZP', '额济纳': 'EJC', '二连': 'RLC', '恩施': 'ESN', '福鼎': 'FES',
'凤凰机场': 'FJQ', '风陵渡': 'FLV', '涪陵': 'FLW', '富拉尔基': 'FRX', '抚顺北': 'FET', '佛山': 'FSQ', '阜新南': 'FXD',
'阜阳': 'FYH', '格尔木': 'GRO', '广汉': 'GHW', '古交': 'GJV', '桂林北': 'GBZ', '古莲': 'GRX', '桂林': 'GLZ',
'固始': 'GXN', '广水': 'GSN', '干塘': 'GNJ', '广元': 'GYW', '广州北': 'GBQ', '赣州': 'GZG', '公主岭': 'GLT',
'公主岭南': 'GBT', '淮安': 'AUH', '淮北': 'HRH', '鹤北': 'HMB', '淮滨': 'HVN', '河边': 'HBV', '潢川': 'KCN',
'韩城': 'HCY', '邯郸': 'HDP', '横道河子': 'HDB', '鹤岗': 'HGB', '皇姑屯': 'HTT', '红果': 'HEM', '黑河': 'HJB',
'怀化': 'HHQ', '汉口': 'HKN', '葫芦岛': 'HLD', '海拉尔': 'HRX', '霍林郭勒': 'HWD', '海伦': 'HLB', '侯马': 'HMV',
'哈密': 'HMR', '淮南': 'HAH', '桦南': 'HNB', '海宁西': 'EUH', '鹤庆': 'HQM', '怀柔北': 'HBP', '怀柔': 'HRP',
'黄石东': 'OSN', '华山': 'HSY', '黄山': 'HKH', '黄石': 'HSN', '衡水': 'HSP', '衡阳': 'HYQ', '菏泽': 'HIK', '贺州': 'HXZ',
'汉中': 'HOY', '惠州': 'HCQ', '吉安': 'VAG', '集安': 'JAL', '江边村': 'JBG', '晋城': 'JCF', '金城江': 'JJZ',
'景德镇': 'JCG', '嘉峰': 'JFF', '加格达奇': 'JGX', '井冈山': 'JGG', '蛟河': 'JHL', '金华南': 'RNH', '金华': 'JBH',
'九江': 'JJG', '吉林': 'JLL', '荆门': 'JMN', '佳木斯': 'JMB', '济宁': 'JIK', '集宁南': 'JAC', '酒泉': 'JQJ',
'江山': 'JUH', '吉首': 'JIQ', '九台': 'JTL', '镜铁山': 'JVJ', '鸡西': 'JXB', '绩溪县': 'JRH', '嘉峪关': 'JGJ',
'江油': 'JFW', '蓟州北': 'JKP', '金州': 'JZT', '锦州': 'JZD', '库尔勒': 'KLR', '开封': 'KFF', '岢岚': 'KLV',
'凯里': 'KLW', '喀什': 'KSR', '昆山南': 'KNH', '奎屯': 'KTR', '开原': 'KYT', '六安': 'UAH', '灵宝': 'LBF',
'芦潮港': 'UCH', '隆昌': 'LCW', '陆川': 'LKZ', '利川': 'LCN', '临川': 'LCG', '潞城': 'UTP', '鹿道': 'LDL', '娄底': 'LDQ',
'临汾': 'LFV', '良各庄': 'LGP', '临河': 'LHC', '漯河': 'LON', '绿化': 'LWJ', '隆化': 'UHP', '丽江': 'LHM', '临江': 'LQL',
'龙井': 'LJL', '吕梁': 'LHV', '醴陵': 'LLG', '柳林南': 'LKV', '滦平': 'UPP', '六盘水': 'UMW', '灵丘': 'LVV',
'旅顺': 'LST', '兰溪': 'LWH', '陇西': 'LXJ', '澧县': 'LEQ', '临西': 'UEP', '龙岩': 'LYS', '耒阳': 'LYQ', '洛阳': 'LYF',
'连云港东': 'UKH', '洛阳东': 'LDF', '临沂': 'LVK', '洛阳龙门': 'LLF', '柳园': 'DHR', '凌源': 'LYD', '辽源': 'LYL',
'立志': 'LZX', '柳州': 'LZZ', '辽中': 'LZD', '麻城': 'MCN', '免渡河': 'MDX', '牡丹江': 'MDB', '莫尔道嘎': 'MRX',
'明光': 'MGH', '满归': 'MHX', '漠河': 'MVX', '茂名': 'MDQ', '茂名西': 'MMZ', '密山': 'MSB', '马三家': 'MJT',
'麻尾': 'VAW', '绵阳': 'MYW', '梅州': 'MOQ', '满洲里': 'MLX', '宁波东': 'NVH', '宁波': 'NGH', '南岔': 'NCB',
'南充': 'NCW', '南丹': 'NDZ', '南大庙': 'NMP', '南芬': 'NFT', '讷河': 'NHX', '嫩江': 'NGX', '内江': 'NJW', '南平': 'NPS',
'南通': 'NUH', '南阳': 'NFF', '碾子山': 'NZX', '平顶山': 'PEN', '盘锦': 'PVD', '平凉': 'PIJ', '平凉南': 'POJ',
'平泉': 'PQP', '坪石': 'PSQ', '萍乡': 'PXG', '凭祥': 'PXZ', '郫县西': 'PCW', '攀枝花': 'PRW', '蕲春': 'QRN',
'青城山': 'QSW', '青岛': 'QDK', '清河城': 'QYP', '曲靖': 'QJM', '黔江': 'QNW', '前进镇': 'QEB', '齐齐哈尔': 'QHX',
'七台河': 'QTB', '沁县': 'QVV', '泉州东': 'QRS', '泉州': 'QYS', '衢州': 'QEH', '融安': 'RAZ', '汝箕沟': 'RQJ',
'瑞金': 'RJG', '日照': 'RZK', '双城堡': 'SCB', '绥芬河': 'SFB', '韶关东': 'SGQ', '山海关': 'SHD', '绥化': 'SHB',
'三间房': 'SFX', '苏家屯': 'SXT', '舒兰': 'SLL', '神木南': 'OMY', '三门峡': 'SMF', '商南': 'ONY', '遂宁': 'NIW',
'四平': 'SPT', '商丘': 'SQF', '上饶': 'SRG', '韶山': 'SSQ', '宿松': 'OAH', '汕头': 'OTQ', '邵武': 'SWS', '涉县': 'OEP',
'三亚': 'SEQ', '三 亚': 'JUQ', '邵阳': 'SYQ', '十堰': 'SNN', '三元区': 'SMS', '双鸭山': 'SSB', '松原': 'VYT',
'苏州': 'SZH', '深圳': 'SZQ', '宿州': 'OXH', '随州': 'SZN', '朔州': 'SUV', '深圳西': 'OSQ', '塘豹': 'TBQ',
'塔尔气': 'TVX', '潼关': 'TGY', '塘沽': 'TGP', '塔河': 'TXX', '通化': 'THL', '泰来': 'TLX', '吐鲁番': 'TFR',
'通辽': 'TLD', '铁岭': 'TLT', '陶赖昭': 'TPT', '图们': 'TML', '铜仁': 'RDQ', '唐山北': 'FUP', '田师府': 'TFT',
'泰山': 'TAK', '唐山': 'TSP', '天水': 'TSJ', '通远堡': 'TYT', '太阳升': 'TQT', '泰州': 'UTH', '桐梓': 'TZW',
'通州西': 'TAP', '五常': 'WCB', '武昌': 'WCN', '瓦房店': 'WDT', '威海': 'WKK', '芜湖': 'WHH', '乌海西': 'WXC',
'吴家屯': 'WJT', '乌鲁木齐南': 'WMR', '武隆': 'WLW', '乌兰浩特': 'WWT', '渭南': 'WNY', '威舍': 'WSM', '歪头山': 'WIT',
'武威': 'WUJ', '武威南': 'WWJ', '无锡': 'WXH', '乌西': 'WXR', '乌伊岭': 'WPB', '武夷山': 'WAS', '万源': 'WYY',
'万州': 'WYW', '梧州': 'WZZ', '温州': 'RZH', '温州南': 'VRH', '西昌': 'ECW', '许昌': 'XCF', '西昌南': 'ENW',
'锡林浩特': 'XTC', '厦门北': 'XKS', '厦门': 'XMS', '厦门高崎': 'XBS', '宣威': 'XWM', '新乡': 'XXF', '信阳': 'XUN',
'咸阳': 'XYY', '襄阳': 'XFN', '熊岳城': 'XYT', '新余': 'XUG', '徐州': 'XCH', '延安': 'YWY', '宜宾': 'YBW',
'亚布力南': 'YWB', '叶柏寿': 'YBD', '宜昌东': 'HAN', '永川': 'YCW', '盐城': 'AFH', '宜昌': 'YCN', '运城': 'YNV',
'伊春': 'YCB', '榆次': 'YCV', '杨村': 'YBP', '宜春西': 'YCG', '伊尔施': 'YET', '燕岗': 'YGW', '永济': 'YIV',
'延吉': 'YJL', '营口': 'YKT', '牙克石': 'YKX', '玉林': 'YLZ', '阎良': 'YNY', '榆林': 'ALY', '亚龙湾': 'TWQ',
'一面坡': 'YPB', '伊宁': 'YMR', '阳平关': 'YAY', '玉屏': 'YZW', '原平': 'YPV', '延庆': 'YNP', '阳泉曲': 'YYV',
'玉泉': 'YQB', '阳泉': 'AQP', '营山': 'NUW', '玉山': 'YNG', '燕山': 'AOP', '榆树': 'YRT', '鹰潭': 'YTG', '烟台': 'YAK',
'伊图里河': 'YEX', '玉田县': 'ATP', '义乌': 'YWH', '阳新': 'YON', '义县': 'YXD', '益阳': 'AEQ', '岳阳': 'YYQ',
'崖州': 'YUQ', '永州': 'AOQ', '扬州': 'YLH', '淄博': 'ZBK', '镇城底': 'ZDV', '自贡': 'ZGW', '珠海': 'ZHQ',
'珠海北': 'ZIQ', '湛江': 'ZJZ', '镇江': 'ZJH', '张家界': 'DIQ', '张家口': 'ZKP', '张家口南': 'ZMP', '周口': 'ZKN',
'哲里木': 'ZLC', '扎兰屯': 'ZTX', '驻马店': 'ZDN', '肇庆': 'ZVQ', '周水子': 'ZIT', '昭通': 'ZDW', '中卫': 'ZWJ',
'资阳': 'ZYW', '遵义西': 'ZIW', '枣庄': 'ZEK', '资中': 'ZZW', '株洲': 'ZZQ', '枣庄西': 'ZFK', '昂昂溪': 'AAX',
'阿城': 'ACB', '安达': 'ADX', '安德': 'ARW', '安定': 'ADP', '安多': 'ADO', '安广': 'AGT', '敖汉': 'YED', '艾河': 'AHP',
'安化': 'PKQ', '艾家村': 'AJJ', '鳌江': 'ARH', '安家': 'AJB', '阿金': 'AJD', '安靖': 'PYW', '阿克陶': 'AER',
'安口窑': 'AYY', '敖力布告': 'ALD', '安龙': 'AUZ', '阿龙山': 'ASX', '安陆': 'ALN', '阿木尔': 'JTX', '阿南庄': 'AZM',
'安庆西': 'APH', '鞍山西': 'AXT', '安塘': 'ATV', '安亭北': 'ASH', '阿图什': 'ATR', '安图': 'ATL', '安溪': 'AXS',
'博鳌': 'BWQ', '北碚': 'BPW', '白壁关': 'BGV', '蚌埠南': 'BMH', '巴楚': 'BCR', '板城': 'BUP', '北戴河': 'BEP',
'保定': 'BDP', '宝坻': 'BPP', '八达岭': 'ILP', '巴东': 'BNN', '柏果': 'BGM', '布海': 'BUT', '白河东': 'BIY',
'贲红': 'BVC', '宝华山': 'BWH', '白河县': 'BEY', '白芨沟': 'BJJ', '碧鸡关': 'BJM', '北滘': 'IBQ', '碧江': 'BLQ',
'白鸡坡': 'BBM', '笔架山': 'BSB', '八角台': 'BTD', '保康': 'BKD', '白奎堡': 'BKB', '白狼': 'BAT', '百浪': 'BRZ',
'博乐': 'BOR', '宝拉格': 'BQC', '巴林': 'BLX', '宝林': 'BNB', '北流': 'BOZ', '勃利': 'BLB', '布列开': 'BLR',
'宝龙山': 'BND', '百里峡': 'AAP', '八面城': 'BMD', '班猫箐': 'BNM', '八面通': 'BMB', '北马圈子': 'BRP', '北票南': 'RPD',
'白旗': 'BQP', '宝泉岭': 'BQB', '白泉': 'BQL', '巴山': 'BAY', '白水江': 'BSY', '白沙坡': 'BPM', '白石山': 'BAL',
'白水镇': 'BUM', '包头 东': 'FDC', '坂田': 'BTQ', '泊头': 'BZP', '北屯': 'BYP', '本溪湖': 'BHT', '博兴': 'BXK',
'八仙筒': 'VXD', '白音察干': 'BYC', '背荫河': 'BYB', '北营': 'BIV', '巴彦高勒': 'BAC', '白音他拉': 'BID', '鲅鱼圈': 'BYT',
'白银市': 'BNJ', '白音胡硕': 'BCD', '巴中': 'IEW', '霸州': 'RMP', '北宅': 'BVP', '赤壁北': 'CIN', '查布嘎': 'CBC',
'长城': 'CEJ', '长冲': 'CCM', '承德东': 'CCP', '赤峰西': 'CID', '嵯岗': 'CAX', '柴岗': 'CGT', '长葛': 'CEF',
'柴沟堡': 'CGV', '城固': 'CGY', '陈官营': 'CAJ', '成高子': 'CZB', '草海': 'WBW', '柴河': 'CHB', '册亨': 'CHZ',
'草河口': 'CKT', '崔黄口': 'CHP', '巢湖': 'CIH', '蔡家沟': 'CJT', '成吉思汗': 'CJX', '岔江': 'CAM', '蔡家坡': 'CJY',
'昌乐': 'CLK', '超梁沟': 'CYP', '慈利': 'CUQ', '昌黎': 'CLP', '长岭子': 'CLT', '晨明': 'CMB', '长农': 'CNJ',
'昌平北': 'VBP', '常平': 'DAQ', '长坡岭': 'CPM', '辰清': 'CQB', '蔡山': 'CON', '楚山': 'CSB', '长寿': 'EFW',
'磁山': 'CSP', '苍石': 'CST', '草市': 'CSL', '察素齐': 'CSC', '长山屯': 'CVT', '长汀': 'CES', '朝天南': 'CTY',
'昌图西': 'CPT', '春湾': 'CQQ', '磁县': 'CIP', '岑溪': 'CNZ', '辰溪': 'CXQ', '磁西': 'CRP', '长兴南': 'CFH',
'磁窑': 'CYK', '春阳': 'CAL', '城阳': 'CEK', '创业村': 'CEX', '朝阳川': 'CYL', '朝阳地': 'CDD', '朝阳南': 'CYD',
'长垣': 'CYF', '朝阳镇': 'CZL', '滁州北': 'CUH', '常州北': 'ESH', '滁州': 'CXH', '潮州': 'CKQ', '常庄': 'CVK',
'曹子里': 'CFP', '车转湾': 'CWM', '郴州西': 'ICQ', '沧州西': 'CBP', '德安': 'DAG', '大安': 'RAT', '大坝': 'DBJ',
'大板': 'DBC', '大巴': 'DBD', '电白': 'NWQ', '到保': 'RBT', '达坂城': 'DCR', '定边': 'DYJ', '东边井': 'DBB',
'德伯斯': 'RDT', '打柴沟': 'DGJ', '德昌': 'DVW', '滴道': 'DDB', '大磴沟': 'DKJ', '刀尔登': 'DRD', '得耳布尔': 'DRX',
'杜尔伯特': 'TKX', '东方': 'UFQ', '丹凤': 'DGY', '东丰': 'DIL', '都格': 'DMM', '大官屯': 'DTT', '大关': 'RGW',
'东光': 'DGP', '东海': 'DHB', '大灰厂': 'DHP', '大红旗': 'DQD', '大禾塘': 'SOQ', '德惠西': 'DXT', '东海县': 'DQH',
'达家沟': 'DJT', '东津': 'DKB', '杜家': 'DJL', '大口屯': 'DKP', '东来': 'RVD', '德令哈': 'DHO', '大陆号': 'DLC',
'带岭': 'DLB', '大林': 'DLD', '达拉特旗': 'DIC', '独立屯': 'DTX', '豆罗': 'DLV', '达拉特西': 'DNC', '大连西': 'GZT',
'东明村': 'DMD', '洞庙河': 'DEP', '东明县': 'DNF', '大拟': 'DNZ', '大平房': 'DPD', '大盘石': 'RPP', '大埔': 'DPI',
'大堡': 'DVT', '大庆东': 'LFX', '大其拉哈': 'DQX', '道清': 'DML', '对青山': 'DQB', '德清西': 'MOH', '大庆西': 'RHX',
'东升': 'DRQ', '砀山': 'DKH', '独山': 'RWW', '登沙河': 'DWT', '读书铺': 'DPM', '大石头': 'DSL', '东胜西': 'DYC',
'大石寨': 'RZT', '东台': 'DBH', '定陶': 'DQK', '灯塔': 'DGT', '大田边': 'DBM', '东通化': 'DTL', '丹徒': 'RUH',
'大屯': 'DNT', '东湾': 'DRJ', '大武口': 'DFJ', '低窝铺': 'DWJ', '大王滩': 'DZZ', '大湾子': 'DFM', '大兴沟': 'DXL',
'大兴': 'DXX', '定西': 'DSJ', '甸心': 'DXM', '东乡': 'DXG', '代县': 'DKV', '定襄': 'DXV', '东戌': 'RXP', '东辛庄': 'DXD',
'丹阳': 'DYH', '德阳': 'DYW', '大雁': 'DYX', '当阳': 'DYN', '丹阳北': 'EXH', '大英东': 'IAW', '东淤地': 'DBV',
'大营': 'DYV', '定远': 'EWH', '岱岳': 'RYV', '大元': 'DYZ', '大营镇': 'DJP', '大营子': 'DZD', '大战场': 'DTJ',
'德州东': 'DIP', '东至': 'DCH', '低庄': 'DVQ', '东镇': 'DNV', '道州': 'DFZ', '东庄': 'DZV', '兑镇': 'DWV', '豆庄': 'ROP',
'定州': 'DXP', '大竹园': 'DZY', '大杖子': 'DAP', '豆张庄': 'RZP', '峨边': 'EBW', '二道沟门': 'RDP', '二道湾': 'RDX',
'鄂尔多斯': 'EEC', '二龙': 'RLD', '二龙山屯': 'ELA', '峨眉': 'EMW', '二密河': 'RML', '恩平': 'PXQ', '二营': 'RYJ',
'鄂州': 'ECN', '福安': 'FAS', '丰城': 'FCG', '丰城南': 'FNG', '肥东': 'FIH', '发耳': 'FEM', '富海': 'FHX', '福海': 'FHR',
'凤凰城': 'FHT', '汾河': 'FEV', '奉化': 'FHH', '富锦': 'FIB', '范家屯': 'FTT', '福利区': 'FLJ', '福利屯': 'FTB',
'丰乐镇': 'FZB', '阜南': 'FNH', '阜宁': 'AKH', '抚宁': 'FNP', '福清': 'FQS', '福泉': 'VMW', '丰水村': 'FSJ',
'丰顺': 'FUQ', '繁峙': 'FSV', '抚顺': 'FST', '福山口': 'FKP', '扶绥': 'FSZ', '冯屯': 'FTX', '浮图峪': 'FYP',
'富县东': 'FDY', '凤县': 'FXY', '富县': 'FEY', '费县': 'FXK', '凤阳': 'FUH', '汾阳': 'FAV', '扶余北': 'FBT',
'分宜': 'FYG', '富源': 'FYM', '扶余': 'FYT', '富裕': 'FYX', '抚州北': 'FBG', '凤州': 'FZY', '丰镇': 'FZC', '范镇': 'VZK',
'固安': 'GFP', '广安': 'VJW', '高碑店': 'GBP', '沟帮子': 'GBD', '甘草店': 'GDJ', '谷城': 'GCN', '藁城': 'GEP',
'高村': 'GCV', '古城镇': 'GZB', '广德': 'GRH', '贵定': 'GTW', '贵定南': 'IDW', '古东': 'GDV', '贵港': 'GGZ',
'官高': 'GVP', '葛根庙': 'GGT', '干沟': 'GGL', '甘谷': 'GGJ', '高各庄': 'GGP', '甘河': 'GAX', '根河': 'GEX',
'郭家店': 'GDT', '孤家子': 'GKT', '古浪': 'GLJ', '皋兰': 'GEJ', '高楼房': 'GFM', '归流河': 'GHT', '关林': 'GLF',
'甘洛': 'VOW', '郭磊庄': 'GLP', '高密': 'GMK', '公庙子': 'GMC', '工农湖': 'GRT', '广宁寺南': 'GNT', '广南卫': 'GNM',
'高平': 'GPF', '甘泉北': 'GEY', '共青城': 'GAG', '甘旗卡': 'GQD', '甘泉': 'GQY', '高桥镇': 'GZD', '灌水': 'GST',
'赶水': 'GSW', '孤山口': 'GSP', '果松': 'GSL', '高山子': 'GSD', '嘎什甸子': 'GXD', '高台': 'GTJ', '高滩': 'GAY',
'古田': 'GTS', '官厅': 'GTP', '官厅西': 'KEP', '贵溪': 'GXG', '涡阳': 'GYH', '巩义': 'GXF', '高邑': 'GIP',
'巩义南': 'GYF', '广元南': 'GAW', '固原': 'GUJ', '菇园': 'GYL', '公营子': 'GYD', '光泽': 'GZS', '古镇': 'GNQ',
'固镇': 'GEH', '虢镇': 'GZY', '瓜州': 'GZJ', '高州': 'GSQ', '盖州': 'GXT', '官字井': 'GOT', '冠豸山': 'GSS',
'盖州西': 'GAT', '海安': 'HIH', '淮安南': 'AMH', '红安': 'HWN', '红安西': 'VXN', '黄柏': 'HBL', '海北': 'HEB',
'鹤壁': 'HAF', '会昌北': 'XEG', '华城': 'VCQ', '河唇': 'HCZ', '汉川': 'HCN', '海城': 'HCT', '合川': 'WKW',
'黑冲滩': 'HCJ', '黄村': 'HCP', '海城西': 'HXT', '化德': 'HGC', '洪洞': 'HDV', '霍尔果斯': 'HFR', '横峰': 'HFG',
'韩府湾': 'HXJ', '汉沽': 'HGP', '黄瓜园': 'HYM', '红光镇': 'IGW', '浑河': 'HHT', '红花沟': 'VHD', '黄花筒': 'HUD',
'贺家店': 'HJJ', '和静': 'HJR', '红江': 'HFM', '黑井': 'HIM', '获嘉': 'HJF', '河津': 'HJV', '涵江': 'HJS', '华家': 'HJT',
'杭锦后旗': 'HDC', '河间西': 'HXP', '花家庄': 'HJM', '河口南': 'HKJ', '湖口': 'HKG', '黄口': 'KOH', '呼兰': 'HUB',
'葫芦岛北': 'HPD', '浩良河': 'HHB', '哈拉海': 'HIT', '鹤立': 'HOB', '桦林': 'HIB', '黄陵': 'ULY', '海林': 'HRB',
'虎林': 'VLB', '寒岭': 'HAT', '和龙': 'HLL', '海龙': 'HIL', '哈拉苏': 'HAX', '呼鲁斯太': 'VTJ', '火连寨': 'HLT',
'黄梅': 'VEH', '韩麻营': 'HYP', '黄泥河': 'HHL', '海宁': 'HNH', '惠农': 'HMJ', '和平': 'VAQ', '花棚子': 'HZM',
'花桥': 'VQH', '宏庆': 'HEY', '怀仁': 'HRV', '华容': 'HRN', '华山北': 'HDY', '黄松甸': 'HDL', '和什托洛盖': 'VSR',
'红山': 'VSB', '汉寿': 'VSQ', '衡山': 'HSQ', '黑水': 'HOT', '惠山': 'VCH', '虎什哈': 'HHP', '红寺堡': 'HSJ',
'虎石台': 'HUT', '海石湾': 'HSO', '衡山西': 'HEQ', '红砂岘': 'VSJ', '黑台': 'HQB', '桓台': 'VTK', '和田': 'VTR',
'会同': 'VTQ', '海坨子': 'HZT', '黑旺': 'HWK', '海湾': 'RWH', '红星': 'VXB', '徽县': 'HYY', '红兴隆': 'VHB',
'换新天': 'VTB', '红岘台': 'HTJ', '红彦': 'VIX', '海晏': 'HFO', '合阳': 'HAY', '衡阳东': 'HVQ', '华蓥': 'HUW',
'汉阴': 'HQY', '黄羊滩': 'HGJ', '汉源': 'WHW', '河源': 'VIQ', '花园': 'HUN', '湟源': 'HNO', '黄羊镇': 'HYJ',
'湖州': 'VZH', '化州': 'HZZ', '黄州': 'VON', '霍州': 'HZV', '惠州西': 'VXQ', '巨宝': 'JRT', '靖边': 'JIY',
'金宝屯': 'JBD', '晋城北': 'JEF', '金昌': 'JCJ', '鄄城': 'JCK', '交城': 'JNV', '建昌': 'JFD', '峻德': 'JDB',
'井店': 'JFP', '鸡东': 'JOB', '江都': 'UDH', '鸡冠山': 'JST', '金沟屯': 'VGP', '静海': 'JHP', '金河': 'JHX',
'锦河': 'JHB', '精河': 'JHR', '精河南': 'JIR', '江华': 'JHZ', '建湖': 'AJH', '纪家沟': 'VJD', '晋江': 'JJS',
'锦界': 'JEY', '姜家': 'JJB', '江津': 'JJW', '金坑': 'JKT', '芨岭': 'JLJ', '金马村': 'JMM', '江门东': 'JWQ',
'角美': 'JES', '莒南': 'JOK', '井南': 'JNP', '建瓯': 'JVS', '经棚': 'JPC', '江桥': 'JQX', '九三': 'SSX', '金山北': 'EGH',
'嘉善': 'JSH', '京山': 'JCN', '建始': 'JRN', '稷山': 'JVV', '吉舒': 'JSL', '建设': 'JET', '甲山': 'JOP', '建三江': 'JIB',
'嘉善南': 'EAH', '金山屯': 'JTB', '江所田': 'JOM', '景泰': 'JTJ', '九台南': 'JNL', '吉文': 'JWX', '进贤': 'JUG',
'莒县': 'JKK', '嘉祥': 'JUK', '介休': 'JXV', '嘉兴': 'JXH', '井陉': 'JJP', '嘉兴南': 'EPH', '夹心子': 'JXT',
'姜堰': 'UEH', '简阳': 'JYW', '揭阳': 'JRQ', '建阳': 'JYS', '巨野': 'JYK', '江永': 'JYZ', '缙云': 'JYH', '靖远': 'JYJ',
'江源': 'SZL', '济源': 'JYF', '靖远西': 'JXJ', '胶州北': 'JZK', '焦作东': 'WEF', '金寨': 'JZH', '靖州': 'JEQ',
'荆州': 'JBN', '胶州': 'JXK', '晋州': 'JXP', '蓟州': 'JIP', '锦州南': 'JOD', '焦作': 'JOF', '旧庄窝': 'JVP',
'金杖子': 'JYD', '开安': 'KAT', '库车': 'KCR', '康城': 'KCP', '库都尔': 'KDX', '宽甸': 'KDT', '克东': 'KOB',
'昆都仑召': 'KDC', '开江': 'KAW', '康金井': 'KJB', '喀喇其': 'KQX', '开鲁': 'KLC', '克拉玛依': 'KHR', '开平南': 'PVQ',
'口前': 'KQL', '昆山': 'KSH', '奎山': 'KAB', '克山': 'KSB', '康熙岭': 'KXZ', '昆阳': 'KAM', '克一河': 'KHX',
'开原西': 'KXT', '康庄': 'KZP', '来宾': 'UBZ', '老边': 'LLT', '灵宝西': 'LPF', '龙川': 'LUQ', '乐昌': 'LCQ',
'黎城': 'UCP', '聊城': 'UCK', '蓝村': 'LCK', '两当': 'LDY', '林东': 'LRC', '乐都': 'LDO', '梁底下': 'LDP',
'六道河子': 'LVP', '鲁番': 'LVM', '廊坊': 'LJP', '落垡': 'LOP', '廊坊北': 'LFP', '老府': 'UFD', '兰岗': 'LNB',
'龙骨甸': 'LGM', '芦沟': 'LOM', '龙沟': 'LGJ', '拉古': 'LGB', '临海': 'UFH', '林海': 'LXX', '拉哈': 'LHX', '凌海': 'JID',
'柳河': 'LNL', '六合': 'KLH', '龙华': 'LHP', '滦河沿': 'UNP', '六合镇': 'LEX', '亮甲店': 'LRT', '刘家店': 'UDT',
'刘家河': 'LVT', '连江': 'LKS', '庐江': 'UJH', '李家': 'LJB', '罗江': 'LJW', '廉江': 'LJZ', '两家': 'UJT', '龙江': 'LJX',
'龙嘉': 'UJL', '莲江口': 'LHB', '蔺家楼': 'ULK', '李家坪': 'LIJ', '兰考': 'LKF', '林口': 'LKB', '路口铺': 'LKQ',
'老莱': 'LAX', '拉林': 'LAB', '陆良': 'LRM', '龙里': 'LLW', '临澧': 'LWQ', '兰棱': 'LLB', '零陵': 'UWZ', '卢龙': 'UAP',
'喇嘛甸': 'LMX', '里木店': 'LMB', '洛门': 'LMJ', '龙南': 'UNG', '梁平': 'UQW', '罗平': 'LPM', '落坡岭': 'LPP',
'六盘山': 'UPJ', '乐平市': 'LPG', '临清': 'UQK', '龙泉寺': 'UQJ', '乐山北': 'UTW', '乐善村': 'LUM', '冷水江东': 'UDQ',
'连山关': 'LGT', '流水沟': 'USP', '丽水': 'USH', '陵水': 'LIQ', '罗山': 'LRN', '鲁山': 'LAF', '梁山': 'LMK',
'灵石': 'LSV', '露水河': 'LUL', '庐山': 'LSG', '林盛堡': 'LBT', '柳树屯': 'LSD', '龙山镇': 'LAS', '梨树镇': 'LSB',
'李石寨': 'LET', '黎塘': 'LTZ', '轮台': 'LAR', '芦台': 'LTP', '龙塘坝': 'LBM', '濑湍': 'LVZ', '骆驼巷': 'LTJ',
'李旺': 'VLJ', '莱芜东': 'LWK', '狼尾山': 'LRJ', '灵武': 'LNJ', '莱芜西': 'UXK', '朗乡': 'LXB', '陇县': 'LXY',
'临湘': 'LXQ', '芦溪': 'LUG', '莱西': 'LXK', '林西': 'LXC', '滦县': 'UXP', '莱阳': 'LYK', '略阳': 'LYY', '辽阳': 'LYT',
'凌源东': 'LDD', '临沂东': 'UYK', '连云港': 'UIH', '临颍': 'LNF', '老营': 'LXL', '龙游': 'LMH', '罗源': 'LVS',
'林源': 'LYX', '涟源': 'LAQ', '涞源': 'LYP', '耒阳西': 'LPQ', '临泽': 'LEJ', '龙爪沟': 'LZT', '雷州': 'UAQ',
'六枝': 'LIW', '鹿寨': 'LIZ', '来舟': 'LZS', '龙镇': 'LZA', '拉鲊': 'LEM', '兰州新区': 'LQJ', '马鞍山': 'MAH',
'毛坝': 'MBY', '毛坝关': 'MGY', '麻城北': 'MBN', '渑池': 'MCF', '明城': 'MCL', '庙城': 'MAP', '渑池南': 'MNF',
'茅草坪': 'KPM', '猛洞河': 'MUQ', '磨刀石': 'MOB', '弥渡': 'MDF', '帽儿山': 'MRB', '明港': 'MGN', '梅河口': 'MHL',
'马皇': 'MHZ', '孟家岗': 'MGB', '美兰': 'MHQ', '汨罗东': 'MQQ', '马莲河': 'MHB', '茅岭': 'MLZ', '庙岭': 'MLL',
'茂林': 'MLD', '穆棱': 'MLB', '马林': 'MID', '马龙': 'MGM', '木里图': 'MUD', '汨罗': 'MLQ', '玛纳斯湖': 'MNR',
'冕宁': 'UGW', '沐滂': 'MPQ', '马桥河': 'MQB', '闽清': 'MQS', '民权': 'MQF', '明水河': 'MUT', '麻山': 'MAB',
'眉山': 'MSW', '漫水湾': 'MKW', '茂舍祖': 'MOM', '米沙子': 'MST', '马踏': 'PWQ', '美溪': 'MEB', '勉县': 'MVY',
'麻阳': 'MVQ', '密云北': 'MUP', '米易': 'MMW', '麦园': 'MYS', '墨玉': 'MUR', '庙庄': 'MZJ', '米脂': 'MEY', '明珠': 'MFQ',
'宁安': 'NAB', '农安': 'NAT', '南博山': 'NBK', '南仇': 'NCK', '南城司': 'NSP', '宁村': 'NCZ', '宁德': 'NES',
'南观村': 'NGP', '南宫东': 'NFP', '南关岭': 'NLT', '宁国': 'NNH', '宁海': 'NHH', '南华北': 'NHS', '南河川': 'NHJ',
'泥河子': 'NHD', '宁家': 'NVT', '南靖': 'NJS', '牛家': 'NJB', '能家': 'NJD', '南口': 'NKP', '南口前': 'NKT',
'南朗': 'NNQ', '乃林': 'NLD', '尼勒克': 'NIR', '那罗': 'ULZ', '宁陵县': 'NLF', '奈曼': 'NMD', '宁明': 'NMZ',
'南木': 'NMX', '南平南': 'NNS', '那铺': 'NPZ', '南桥': 'NQD', '那曲': 'NQO', '暖泉': 'NQJ', '南台': 'NTT', '南头': 'NOQ',
'宁武': 'NWV', '南湾子': 'NWP', '南翔北': 'NEH', '宁乡': 'NXQ', '内乡': 'NXF', '牛心台': 'NXT', '南峪': 'NUP',
'娘子关': 'NIP', '南召': 'NAF', '南杂木': 'NZT', '蓬安': 'PAW', '平安': 'PAL', '平安驿': 'PNO', '磐安镇': 'PAJ',
'平安镇': 'PZT', '蒲城东': 'PEY', '蒲城': 'PCY', '裴德': 'PDB', '偏店': 'PRP', '平顶山西': 'BFF', '坡底下': 'PXJ',
'瓢儿屯': 'PRT', '平房': 'PFB', '平岗': 'PGL', '平果': 'PGZ', '平关': 'PGM', '盘关': 'PAM', '徘徊北': 'PHP',
'平河口': 'PHM', '平湖': 'PHQ', '盘锦北': 'PBD', '潘家店': 'PDP', '皮口南': 'PKT', '普兰店': 'PLT', '偏岭': 'PNT',
'平山': 'PSB', '彭山': 'PSW', '皮山': 'PSR', '磐石': 'PSL', '平社': 'PSV', '彭水': 'PHW', '平台': 'PVT', '平田': 'PTM',
'莆田': 'PTS', '葡萄菁': 'PTW', '普湾': 'PWT', '平旺': 'PWV', '平型关': 'PGV', '普雄': 'POW', '蓬溪': 'KZW',
'郫县': 'PWW', '平洋': 'PYX', '彭阳': 'PYJ', '平遥': 'PYV', '平邑': 'PIK', '平原堡': 'PPJ', '平原': 'PYK', '平峪': 'PYP',
'彭泽': 'PZG', '邳州': 'PJH', '平庄': 'PZD', '泡子': 'POD', '平庄南': 'PND', '乾安': 'QOT', '庆安': 'QAB', '迁安': 'QQP',
'祁东北': 'QRQ', '七甸': 'QDM', '曲阜东': 'QAK', '庆丰': 'QFT', '奇峰塔': 'QVP', '曲阜': 'QFK', '琼海': 'QYQ',
'秦皇岛': 'QTP', '千河': 'QUY', '清河': 'QIP', '清河门': 'QHD', '清华园': 'QHP', '全椒': 'INH', '渠旧': 'QJZ',
'潜江': 'QJN', '秦家': 'QJB', '綦江': 'QJW', '祁家堡': 'QBT', '清涧县': 'QNY', '秦家庄': 'QZV', '七里河': 'QLD',
'秦岭': 'QLY', '渠黎': 'QLZ', '青龙': 'QIB', '青龙山': 'QGH', '祁门': 'QIH', '前磨头': 'QMP', '青山': 'QSB',
'确山': 'QSN', '前山': 'QXQ', '清水': 'QUJ', '戚墅堰': 'QYH', '青田': 'QVH', '桥头': 'QAT', '青铜峡': 'QTJ',
'前卫': 'QWD', '前苇塘': 'QWP', '渠县': 'QRW', '祁县': 'QXV', '青县': 'QXP', '桥西': 'QXJ', '清徐': 'QUV',
'旗下营': 'QXC', '千阳': 'QOY', '沁阳': 'QYF', '泉阳': 'QYL', '祁阳北': 'QVQ', '七营': 'QYJ', '庆阳山': 'QSJ',
'清远': 'QBQ', '清原': 'QYT', '钦州东': 'QDZ', '钦州': 'QRZ', '青州市': 'QZK', '瑞安': 'RAH', '荣昌': 'RCW',
'瑞昌': 'RCG', '如皋': 'RBH', '容桂': 'RUQ', '任丘': 'RQP', '乳山': 'ROK', '融水': 'RSZ', '热水': 'RSD', '容县': 'RXZ',
'饶阳': 'RVP', '汝阳': 'RYF', '绕阳河': 'RHD', '汝州': 'ROF', '石坝': 'OBJ', '上板城': 'SBP', '施秉': 'AQW',
'上板城南': 'OBP', '世博园': 'ZWT', '双城北': 'SBB', '舒城': 'OCH', '商城': 'SWN', '莎车': 'SCR', '顺昌': 'SCS',
'神池': 'SMV', '沙城': 'SCP', '石城': 'SCT', '山城镇': 'SCL', '山丹': 'SDJ', '顺德': 'ORQ', '绥德': 'ODY', '水洞': 'SIL',
'商都': 'SXC', '十渡': 'SEP', '四道湾': 'OUD', '顺德学院': 'OJQ', '绅坊': 'OLH', '双丰': 'OFB', '四方台': 'STB',
'水富': 'OTW', '三关口': 'OKJ', '桑根达来': 'OGC', '韶关': 'SNQ', '上高镇': 'SVK', '上杭': 'JBS', '沙海': 'SED',
'蜀河': 'SHY', '松河': 'SBM', '沙河': 'SHP', '沙河口': 'SKT', '赛汗塔拉': 'SHC', '沙河市': 'VOP', '沙后所': 'SSD',
'山河屯': 'SHL', '三河县': 'OXP', '四合永': 'OHD', '三汇镇': 'OZW', '双河镇': 'SEL', '石河子': 'SZR', '三合庄': 'SVP',
'三家店': 'ODP', '水家湖': 'SQH', '沈家河': 'OJJ', '松江河': 'SJL', '尚家': 'SJB', '孙家': 'SUB', '沈家': 'OJB',
'双吉': 'SML', '松江': 'SAH', '三江口': 'SKD', '司家岭': 'OLK', '松江南': 'IMH', '石景山南': 'SRP', '邵家堂': 'SJJ',
'三江县': 'SOZ', '三家寨': 'SMM', '十家子': 'SJD', '松江镇': 'OZL', '施家嘴': 'SHM', '深井子': 'SWT', '什里店': 'OMP',
'疏勒': 'SUR', '疏勒河': 'SHJ', '舍力虎': 'VLD', '石磷': 'SPB', '石林': 'SLM', '双辽': 'ZJD', '绥棱': 'SIB',
'石岭': 'SOL', '石林南': 'LNM', '石龙': 'SLQ', '萨拉齐': 'SLC', '索伦': 'SNT', '商洛': 'OLY', '沙岭子': 'SLP',
'石门县北': 'VFQ', '三门峡南': 'SCF', '三门县': 'OQH', '石门县': 'OMQ', '三门峡西': 'SXF', '肃宁': 'SYP', '宋': 'SOB',
'双牌': 'SBZ', '沙坪坝': 'CYW', '四平东': 'PPT', '遂平': 'SON', '沙坡头': 'SFJ', '沙桥': 'SQM', '商丘南': 'SPF',
'水泉': 'SID', '石泉县': 'SXY', '石桥子': 'SQT', '石人城': 'SRB', '石人': 'SRL', '山市': 'SQB', '神树': 'SWB',
'鄯善': 'SSR', '三水': 'SJQ', '泗水': 'OSK', '石山': 'SAD', '松树': 'SFT', '首山': 'SAT', '三十家': 'SRD',
'三十里堡': 'SST', '双水镇': 'PQQ', '松树镇': 'SSL', '松桃': 'MZQ', '索图罕': 'SHX', '三堂集': 'SDH', '石头': 'OTB',
'神头': 'SEV', '沙沱': 'SFM', '上万': 'SWP', '孙吴': 'SKB', '沙湾县': 'SXR', '歙县': 'OVH', '遂溪': 'SXZ', '沙县': 'SAS',
'绍兴': 'SOH', '石岘': 'SXL', '上西铺': 'SXM', '石峡子': 'SXJ', '沭阳': 'FMH', '绥阳': 'SYB', '寿阳': 'SYV',
'水洋': 'OYP', '三阳川': 'SYJ', '上腰墩': 'SPJ', '三营': 'OEJ', '顺义': 'SOP', '三义井': 'OYD', '三源浦': 'SYL',
'上虞': 'BDH', '三原': 'SAY', '上园': 'SUD', '水源': 'OYJ', '桑园子': 'SAJ', '绥中北': 'SND', '苏州北': 'OHH',
'宿州东': 'SRH', '深圳东': 'BJQ', '深州': 'OZP', '孙镇': 'OZY', '绥中': 'SZD', '尚志': 'SZB', '师庄': 'SNM',
'松滋': 'SIN', '师宗': 'SEM', '苏州园区': 'KAH', '苏州新区': 'ITH', '泰安': 'TMK', '台安': 'TID', '通安驿': 'TAJ',
'桐柏': 'TBF', '通北': 'TBB', '桐城': 'TTH', '汤池': 'TCX', '郯城': 'TZK', '铁厂': 'TCL', '桃村': 'TCK', '通道': 'TRQ',
'田东': 'TDZ', '天岗': 'TGL', '土贵乌拉': 'TGC', '通沟': 'TOL', '太谷': 'TGV', '塔哈': 'THX', '棠海': 'THM',
'唐河': 'THF', '泰和': 'THG', '太湖': 'TKH', '团结': 'TIX', '谭家井': 'TNJ', '陶家屯': 'TOT', '唐家湾': 'PDQ',
'统军庄': 'TZP', '吐列毛杜': 'TMD', '图里河': 'TEX', '铜陵': 'TJH', '田林': 'TFZ', '亭亮': 'TIZ', '铁力': 'TLB',
'铁岭西': 'PXT', '图们北': 'QSL', '天门': 'TMN', '天门南': 'TNN', '太姥山': 'TLS', '土牧尔台': 'TRC', '土门子': 'TCJ',
'洮南': 'TVT', '潼南': 'TVW', '太平川': 'TIT', '太平镇': 'TEB', '图强': 'TQX', '台前': 'TTK', '天桥岭': 'TQL',
'土桥子': 'TQJ', '汤山城': 'TCT', '桃山': 'TAB', '台山': 'PUQ', '塔石嘴': 'TIM', '通途': 'TUT', '汤旺河': 'THB',
'同心': 'TXJ', '土溪': 'TSW', '桐乡': 'TCH', '田阳': 'TRZ', '天义': 'TND', '汤阴': 'TYF', '驼腰岭': 'TIL',
'太阳山': 'TYJ', '通榆': 'KTT', '汤原': 'TYB', '塔崖驿': 'TYP', '滕州东': 'TEK', '台州': 'TZH', '天祝': 'TZJ',
'滕州': 'TXK', '天镇': 'TZV', '桐子林': 'TEW', '天柱山': 'QWH', '文安': 'WBP', '武安': 'WAP', '王安镇': 'WVP',
'吴堡': 'WUY', '旺苍': 'WEW', '五叉沟': 'WCT', '文昌': 'WEQ', '温春': 'WDB', '五大连池': 'WRB', '文登': 'WBK',
'五道沟': 'WDL', '五道河': 'WHP', '文地': 'WNZ', '卫东': 'WVT', '武当山': 'WRN', '望都': 'WDP', '乌尔旗汗': 'WHX',
'潍坊': 'WFK', '万发屯': 'WFB', '王府': 'WUT', '瓦房店西': 'WXT', '王岗': 'WGB', '武功': 'WGY', '湾沟': 'WGL',
'吴官田': 'WGM', '乌海': 'WVC', '苇河': 'WHB', '卫辉': 'WHF', '吴家川': 'WCJ', '五家': 'WUB', '威箐': 'WAM',
'午汲': 'WJP', '渭津': 'WJL', '王家湾': 'WJJ', '倭肯': 'WQB', '五棵树': 'WKT', '五龙背': 'WBT', '乌兰哈达': 'WLC',
'万乐': 'WEB', '瓦拉干': 'WVX', '温岭': 'VHH', '五莲': 'WLK', '乌拉特前旗': 'WQC', '乌拉山': 'WSC', '卧里屯': 'WLX',
'渭南北': 'WBY', '乌奴耳': 'WRX', '万宁': 'WNQ', '万年': 'WWG', '渭南南': 'WVY', '渭南镇': 'WNJ', '沃皮': 'WPT',
'吴桥': 'WUP', '汪清': 'WQL', '武清': 'WWP', '武山': 'WSJ', '文水': 'WEV', '魏善庄': 'WSP', '王瞳': 'WTP',
'五台山': 'WSV', '王团庄': 'WZJ', '五五': 'WVR', '无锡东': 'WGH', '卫星': 'WVB', '闻喜': 'WXV', '武乡': 'WVV',
'无锡新区': 'IFH', '武穴': 'WXN', '吴圩': 'WYZ', '王杨': 'WYB', '武义': 'RYH', '五营': 'WWB', '瓦窑田': 'WIM',
'五原': 'WYC', '苇子沟': 'WZL', '韦庄': 'WZY', '五寨': 'WZV', '王兆屯': 'WZB', '微子镇': 'WQP', '魏杖子': 'WKD',
'新安': 'EAM', '兴安': 'XAZ', '新安县': 'XAF', '新保安': 'XAP', '下板城': 'EBP', '西八里': 'XLP', '宣城': 'ECH',
'兴城': 'XCD', '小村': 'XEM', '新绰源': 'XRX', '下城子': 'XCB', '新城子': 'XCT', '喜德': 'EDW', '小得江': 'EJM',
'西大庙': 'XMP', '小董': 'XEZ', '小东': 'XOD', '香坊': 'XFB', '信丰': 'EFG', '襄汾': 'XFV', '息烽': 'XFW', '新干': 'EGG',
'轩岗': 'XGV', '孝感': 'XGN', '西固城': 'XUJ', '兴国': 'EUG', '西固': 'XIJ', '夏官营': 'XGJ', '西岗子': 'NBB',
'宣汉': 'XHY', '襄河': 'XXB', '新和': 'XIR', '宣和': 'XWJ', '斜河涧': 'EEP', '新华屯': 'XAX', '新会': 'EFQ',
'新华': 'XHB', '新晃': 'XLQ', '新化': 'EHQ', '宣化': 'XHP', '兴和西': 'XEC', '小河沿': 'XYD', '下花园': 'XYP',
'小河镇': 'EKY', '徐家店': 'HYK', '徐家': 'XJB', '峡江': 'EJG', '新绛': 'XJV', '辛集': 'ENP', '新江': 'XJM',
'西街口': 'EKM', '许家屯': 'XJT', '许家台': 'XTJ', '谢家镇': 'XMT', '兴凯': 'EKB', '小榄': 'EAQ', '香兰': 'XNB',
'兴隆店': 'XDD', '新乐': 'ELP', '新林': 'XPX', '小岭': 'XLB', '新李': 'XLJ', '西林': 'XYB', '西柳': 'GCT', '仙林': 'XPH',
'新立屯': 'XLD', '兴隆县': 'EXP', '兴隆镇': 'XZB', '新立镇': 'XGT', '新民': 'XMD', '西麻山': 'XMB', '下马塘': 'XAT',
'孝南': 'XNV', '咸宁北': 'XRN', '兴宁': 'ENQ', '咸宁': 'XNN', '犀浦东': 'XAW', '西平': 'XPN', '兴平': 'XPY',
'新坪田': 'XPM', '霞浦': 'XOS', '溆浦': 'EPQ', '犀浦': 'XIW', '新青': 'XQB', '新邱': 'XQD', '兴泉堡': 'XQJ',
'仙人桥': 'XRL', '小寺沟': 'ESP', '杏树': 'XSB', '浠水': 'XZN', '下社': 'XSV', '小市': 'XST', '徐水': 'XSP',
'夏石': 'XIZ', '小哨': 'XAM', '秀山': 'ETW', '新松浦': 'XOB', '杏树屯': 'XDT', '许三湾': 'XSJ', '湘潭': 'XTQ',
'邢台': 'XTP', '向塘': 'XTG', '仙桃西': 'XAN', '下台子': 'EIP', '徐闻': 'XJQ', '新窝铺': 'EPD', '修武': 'XWF',
'新县': 'XSN', '息县': 'ENN', '西乡': 'XQY', '湘乡': 'XXQ', '西峡': 'XIF', '孝西': 'XOV', '小新街': 'XXM',
'新兴县': 'XGQ', '西小召': 'XZC', '小西庄': 'XXP', '向阳': 'XDB', '旬阳': 'XUY', '旬阳北': 'XBY', '襄阳东': 'XWN',
'兴业': 'SNZ', '小雨谷': 'XHM', '新沂': 'VIH', '兴义': 'XRZ', '信宜': 'EEQ', '小月旧': 'XFM', '小扬气': 'XYX',
'襄垣': 'EIF', '夏邑县': 'EJH', '祥云西': 'EXM', '新友谊': 'EYB', '新阳镇': 'XZJ', '徐州东': 'UUH', '新帐房': 'XZX',
'悬钟': 'XRP', '新肇': 'XZT', '忻州': 'XXV', '汐子': 'XZD', '西哲里木': 'XRD', '新杖子': 'ERP', '姚安': 'YAC',
'依安': 'YAX', '永安': 'YAS', '永安乡': 'YNB', '亚布力': 'YBB', '元宝山': 'YUD', '羊草': 'YAB', '秧草地': 'YKM',
'阳澄湖': 'AIH', '迎春': 'YYB', '叶城': 'YER', '盐池': 'YKJ', '砚川': 'YYY', '阳春': 'YQQ', '宜城': 'YIN', '应城': 'YHN',
'禹城': 'YCK', '晏城': 'YEK', '阳城': 'YNF', '阳岔': 'YAL', '郓城': 'YPK', '雁翅': 'YAP', '云彩岭': 'ACP',
'虞城县': 'IXH', '营城子': 'YCT', '英德': 'YDQ', '永登': 'YDJ', '尹地': 'YDM', '永定': 'YGS', '阳东': 'WLQ',
'雁荡山': 'YGH', '于都': 'YDG', '园墩': 'YAJ', '英德西': 'IIQ', '永丰营': 'YYM', '杨岗': 'YRB', '阳高': 'YOV',
'阳谷': 'YIK', '友好': 'YOB', '余杭': 'EVH', '沿河城': 'YHP', '岩会': 'AEP', '羊臼河': 'YHM', '永嘉': 'URH',
'营街': 'YAM', '盐津': 'AEW', '阳江': 'WRQ', '余江': 'YHG', '燕郊': 'AJP', '姚家': 'YAT', '岳家井': 'YGJ',
'一间堡': 'YJT', '英吉沙': 'YIR', '云居寺': 'AFP', '燕家庄': 'AZK', '永康': 'RFH', '营口东': 'YGT', '银浪': 'YJX',
'永郎': 'YLW', '宜良北': 'YSM', '永乐店': 'YDY', '伊拉哈': 'YLX', '伊林': 'YLB', '杨陵': 'YSY', '彝良': 'ALW',
'杨林': 'YLM', '余粮堡': 'YLD', '杨柳青': 'YQP', '月亮田': 'YUM', '义马': 'YMF', '阳明堡': 'YVV', '玉门': 'YXJ',
'云梦': 'YMN', '元谋': 'YMM', '一面山': 'YST', '沂南': 'YNK', '宜耐': 'YVM', '伊宁东': 'YNR', '营盘水': 'YZJ',
'羊堡': 'ABM', '阳泉北': 'YPP', '乐清': 'UPH', '焉耆': 'YSR', '源迁': 'AQK', '姚千户屯': 'YQT', '阳曲': 'YQV',
'榆树沟': 'YGP', '月山': 'YBF', '玉石': 'YSJ', '玉舍': 'AUM', '偃师': 'YSF', '沂水': 'YUK', '榆社': 'YSV', '颍上': 'YVH',
'窑上': 'ASP', '元氏': 'YSP', '杨树岭': 'YAD', '野三坡': 'AIP', '榆树屯': 'YSX', '榆树台': 'YUT', '鹰手营子': 'YIP',
'源潭': 'YTQ', '牙屯堡': 'YTZ', '烟筒山': 'YSL', '烟筒屯': 'YUX', '羊尾哨': 'YWM', '越西': 'YHW', '攸县': 'YOG',
'阳西': 'WMQ', '永修': 'ACG', '玉溪西': 'YXM', '弋阳': 'YIG', '余姚': 'YYH', '酉阳': 'AFW', '岳阳东': 'YIQ',
'阳邑': 'ARP', '鸭园': 'YYL', '鸳鸯镇': 'YYJ', '燕子砭': 'YZY', '仪征': 'UZH', '宜州': 'YSZ', '兖州': 'YZK',
'迤资': 'YQM', '羊者窝': 'AEM', '杨杖子': 'YZD', '镇安': 'ZEY', '治安': 'ZAD', '招柏': 'ZBP', '张百湾': 'ZUP',
'中川机场': 'ZJJ', '枝城': 'ZCN', '子长': 'ZHY', '诸城': 'ZQK', '邹城': 'ZIK', '赵城': 'ZCV', '章党': 'ZHT',
'正定': 'ZDP', '肇东': 'ZDB', '照福铺': 'ZFM', '章古台': 'ZGD', '赵光': 'ZGB', '中和': 'ZHX', '中华门': 'VNH',
'枝江北': 'ZIN', '钟家村': 'ZJY', '朱家沟': 'ZUB', '紫荆关': 'ZYP', '周家': 'ZOB', '诸暨': 'ZDH', '镇江南': 'ZEH',
'周家屯': 'ZOD', '褚家湾': 'CWJ', '湛江西': 'ZWQ', '朱家窑': 'ZUJ', '曾家坪子': 'ZBW', '张兰': 'ZLV', '镇赉': 'ZLT',
'枣林': 'ZIV', '扎鲁特': 'ZLD', '扎赉诺尔西': 'ZXX', '樟木头': 'ZOQ', '中牟': 'ZGF', '中宁东': 'ZDJ', '中宁': 'VNJ',
'中宁南': 'ZNJ', '镇平': 'ZPF', '漳平': 'ZPS', '泽普': 'ZPR', '枣强': 'ZVP', '张桥': 'ZQY', '章丘': 'ZTK',
'朱日和': 'ZRC', '泽润里': 'ZLM', '中山北': 'ZGQ', '樟树东': 'ZOG', '珠斯花': 'ZHD', '中山': 'ZSQ', '柞水': 'ZSY',
'钟山': 'ZSZ', '樟树': 'ZSG', '珠窝': 'ZOP', '张维屯': 'ZWB', '彰武': 'ZWD', '棕溪': 'ZOY', '钟祥': 'ZTN', '资溪': 'ZXS',
'镇西': 'ZVT', '张辛': 'ZIP', '正镶白旗': 'ZXC', '紫阳': 'ZVY', '枣阳': 'ZYN', '竹园坝': 'ZAW', '张掖': 'ZYJ',
'镇远': 'ZUW', '漳州东': 'GOS', '漳州': 'ZUS', '壮志': 'ZUX', '子洲': 'ZZY', '中寨': 'ZZM', '涿州': 'ZXP', '咋子': 'ZAL',
'卓资山': 'ZZC', '株洲西': 'ZAQ', '郑州西': 'XPF', '阿巴嘎旗': 'AQC', '阿城北': 'ABB', '阿尔山北': 'ARX', '安江东': 'ADA',
'阿勒泰': 'AUR', '安仁': 'ARG', '安顺西': 'ASE', '安图西': 'AXL', '安阳东': 'ADF', '博白': 'BBZ', '八步': 'BBE',
'栟茶': 'FWH', '保定东': 'BMP', '八方山': 'FGQ', '白沟': 'FEP', '滨海': 'YKP', '滨海北': 'FCP', '滨海港': 'BGU',
'滨海西': 'FHP', '宝鸡南': 'BBY', '北井子': 'BRT', '白马井': 'BFQ', '北票': 'BPT', '宝清': 'BUB', '璧山': 'FZW',
'白沙铺': 'BSN', '白水县': 'BGY', '板塘': 'NGQ', '白文东': 'BCV', '宾西北': 'BBB', '本溪新城': 'BVT', '宾阳': 'UKZ',
'白洋淀': 'FWP', '百宜': 'FHW', '白音华南': 'FNC', '巴中东': 'BDE', '彬州': 'BXY', '滨州': 'BIK', '宾州': 'BZB',
'霸州西': 'FOP', '澄城': 'CUY', '承德县北': 'IYP', '承德南': 'IVP', '成都西': 'CMW', '曹妃甸东': 'POP', '曹妃甸港': 'PGP',
'城固北': 'CBY', '查干湖': 'VAT', '巢湖东': 'GUH', '从江': 'KNW', '蔡家崖': 'EBV', '茶卡': 'CVO', '长临河': 'FVH',
'茶陵南': 'CNG', '常平东': 'FQQ', '常平南': 'FPQ', '长庆桥': 'CQJ', '长寿北': 'COW', '长寿湖': 'CSE', '常山': 'CSU',
'潮汕': 'CBQ', '长沙西': 'RXQ', '朝天': 'CTE', '长汀南': 'CNS', '长武': 'CWY', '长兴': 'CBH', '苍溪': 'CXE',
'楚雄': 'CUM', '朝阳': 'VBT', '长阳': 'CYN', '潮阳': 'CNQ', '朝阳湖': 'CYE', '崇州': 'CZE', '城子坦': 'CWT',
'东安东': 'DCZ', '德保': 'RBZ', '都昌': 'DCG', '东岔': 'DCJ', '东城南': 'IYQ', '东戴河': 'RDD', '丹东西': 'RWT',
'东二道河': 'DRB', '大丰': 'KRQ', '大方南': 'DNE', '东港北': 'RGT', '大孤山': 'RMT', '东莞': 'RTQ', '鼎湖东': 'UWQ',
'鼎湖山': 'NVQ', '道滘': 'RRQ', '垫江': 'DJE', '洞井': 'FWQ', '董家口': 'DTK', '大苴': 'DIM', '洞口': 'DKA',
'达连河': 'DCB', '大荔': 'DNY', '大朗镇': 'KOQ', '得莫利': 'DTB', '大青沟': 'DSD', '德清': 'DRH', '东胜东': 'RSC',
'砀山南': 'PRH', '大石头南': 'DAL', '当涂东': 'OWH', '大通西': 'DTO', '大旺': 'WWQ', '定西北': 'DNJ', '德兴东': 'DDG',
'德兴': 'DWG', '丹霞山': 'IRQ', '大冶北': 'DBN', '都匀东': 'KJW', '大邑': 'DEE', '东营南': 'DOK', '大余': 'DYG',
'定州东': 'DOP', '端州': 'WZQ', '大足南': 'FQW', '峨眉山': 'IXW', '阿房宫': 'EGY', '鄂州东': 'EFN', '防城港北': 'FBZ',
'凤城东': 'FDT', '富川': 'FDZ', '繁昌西': 'PUH', '丰都': 'FUW', '涪陵北': 'FEW', '枫林': 'FLN', '阜宁东': 'FDU',
'富宁': 'FNM', '佛坪': 'FUY', '法启': 'FQE', '芙蓉南': 'KCQ', '复盛': 'FAW', '抚松': 'FSL', '佛山西': 'FOQ',
'福山镇': 'FZQ', '福田': 'NZQ', '阜新': 'FOT', '富阳': 'FYU', '富源北': 'FBM', '抚远': 'FYB', '抚州东': 'FDG',
'抚州': 'FZG', '方正': 'FNB', '福州 南': 'FXS', '高安': 'GCG', '广安南': 'VUW', '贵安': 'GAE', '高碑店东': 'GMP',
'恭城': 'GCZ', '藁城南': 'GUP', '贵定北': 'FMW', '葛店南': 'GNN', '贵定县': 'KIW', '广汉北': 'GVW', '高花': 'HGD',
'革居': 'GEM', '高楞': 'GLB', '关岭': 'GLE', '桂林西': 'GEZ', '高密北': 'GVK', '光明城': 'IMQ', '广宁': 'FBQ',
'广宁寺': 'GQT', '广南县': 'GXM', '桂平': 'GAZ', '弓棚子': 'GPT', '赶水东': 'GDE', '光山': 'GUN', '谷山': 'FFQ',
'观沙岭': 'FKQ', '古田北': 'GBS', '广通北': 'GPM', '高台南': 'GAJ', '古田会址': 'STS', '贵阳北': 'KQW', '贵阳东': 'KEW',
'赣榆': 'GYU', '高邑西': 'GNP', '惠安': 'HNS', '淮北北': 'PLH', '鹤壁东': 'HFF', '寒葱沟': 'HKB', '霍城': 'SER',
'珲春': 'HUL', '横道河子东': 'KUX', '邯郸东': 'HPP', '惠东': 'KDQ', '哈达铺': 'HDJ', '洪洞西': 'HTV', '海东西': 'HDO',
'哈尔滨北': 'HTB', '合肥北城': 'COH', '合肥南': 'ENH', '黄冈': 'KGN', '黄冈东': 'KAN', '横沟桥东': 'HNN', '黄冈西': 'KXN',
'洪河': 'HPB', '怀化南': 'KAQ', '黄河景区': 'HCF', '惠环': 'KHQ', '花湖': 'KHN', '后湖': 'IHN', '怀集': 'FAQ',
'河口北': 'HBM', '宏克力': 'OKB', '海林北': 'KBX', '黄流': 'KLQ', '黄陵南': 'VLY', '鲘门': 'KMQ', '海门': 'HMU',
'虎门': 'IUQ', '侯马西': 'HPV', '衡南': 'HNG', '淮南东': 'HOH', '合浦': 'HVZ', '霍邱': 'FBH', '怀仁东': 'HFV',
'华容东': 'HPN', '华容南': 'KRN', '黑山北': 'HQT', '衡水北': 'IHP', '黄石北': 'KSN', '黄山北': 'NYH', '贺胜桥东': 'HLN',
'和硕': 'VUR', '花山南': 'KNN', '荷塘': 'KXQ', '黄土店': 'HKP', '海阳北': 'HEK', '合阳北': 'HTY', '槐荫': 'IYN',
'鄠邑': 'KXY', '花园口': 'HYT', '霍州东': 'HWV', '惠州南': 'KNQ', '建安': 'JUL', '泾川': 'JAJ', '景德镇北': 'JDG',
'旌德': 'NSH', '建德': 'JDU', '尖峰': 'PFQ', '近海': 'JHD', '蛟河西': 'JOL', '军粮城北': 'JMP', '将乐': 'JLS',
'贾鲁河': 'JLF', '九郎山': 'KJQ', '即墨北': 'JVK', '剑门关': 'JME', '佳木斯西': 'JUB', '建宁县北': 'JCS', '济南东': 'MDK',
'江宁': 'JJH', '江宁西': 'OKH', '建瓯西': 'JUS', '酒泉南': 'JNJ', '句容西': 'JWH', '建水': 'JSM', '尖山': 'JPQ',
'界首市': 'JUN', '绩溪北': 'NRH', '介休东': 'JDV', '泾县': 'LOH', '靖西': 'JMZ', '进贤南': 'JXG', '江油北': 'JBE',
'简阳南': 'JOW', '嘉峪关南': 'JBJ', '金银潭': 'JTN', '靖宇': 'JYL', '金月湾': 'PYQ', '缙云西': 'PYH', '景州': 'JEP',
'晋中': 'JZV', '开封北': 'KBF', '开福寺': 'FLQ', '开化': 'KHU', '凯里南': 'QKW', '库伦': 'KLD', '昆明南': 'KOM',
'葵潭': 'KTQ', '开阳': 'KVW', '喀左': 'KZT', '隆安东': 'IDZ', '来宾北': 'UCZ', '灵璧': 'GMH', '寮步': 'LTQ',
'绿博园': 'LCF', '隆昌北': 'NWW', '乐昌东': 'ILQ', '临城': 'UUP', '罗城': 'VCZ', '陵城': 'LGK', '老城镇': 'ACQ',
'龙洞堡': 'FVW', '乐都南': 'LVO', '娄底南': 'UOQ', '乐东': 'UQQ', '离堆公园': 'INW', '娄烦': 'USV', '陆丰': 'LLQ',
'龙丰': 'KFQ', '禄丰南': 'LQM', '临汾西': 'LXV', '临高南': 'KGQ', '麓谷': 'BNQ', '滦河': 'UDP', '珞璜南': 'LNE',
'隆回': 'LHA', '漯河西': 'LBN', '罗江东': 'IKW', '柳江': 'UQZ', '利津南': 'LNK', '兰考南': 'LUF', '龙口市': 'UKK',
'龙里北': 'KFW', '兰陵北': 'COK', '沥林北': 'KBQ', '醴陵东': 'UKQ', '陇南': 'INJ', '梁平南': 'LPE', '礼泉': 'LGY',
'灵石东': 'UDV', '乐山': 'IVW', '龙市': 'LAG', '溧水': 'LDH', '娄山关南': 'LSE', '岚山西': 'UWK', '洛湾三江': 'KRW',
'莱西北': 'LBK', '岚县': 'UXV', '溧阳': 'LEH', '临邑': 'LUK', '柳园南': 'LNR', '鹿寨北': 'LSZ', '临淄北': 'UEK',
'阆中': 'LZE', '临泽南': 'LDJ', '马鞍山东': 'OMH', '毛陈': 'MHN', '帽儿山西': 'MUB', '明港东': 'MDN', '民和南': 'MNO',
'闵集': 'MJN', '马兰': 'MLR', '民乐': 'MBJ', '弥勒': 'MLM', '玛纳斯': 'MSR', '牟平': 'MBK', '闽清北': 'MBS',
'民权北': 'MIF', '眉山东': 'IUW', '名山': 'MSE', '庙山': 'MSN', '岷县': 'MXJ', '门源': 'MYO', '暮云': 'KIQ',
'蒙自北': 'MBM', '孟庄': 'MZF', '蒙自': 'MZM', '南部': 'NBE', '南曹': 'NEF', '南充北': 'NCE', '南城': 'NDG',
'南 昌': 'NOG', '南昌西': 'NXG', '宁东南': 'NDJ', '宁东': 'NOJ', '南芬北': 'NUT', '南丰': 'NFG', '南湖东': 'NDN',
'牛河梁': 'LKT', '南华': 'NAM', '内江北': 'NKW', '南江': 'FIW', '南江口': 'NDQ', '奈林皋': 'NGT', '南陵': 'LLH',
'尼木': 'NMO', '南宁东': 'NFZ', '南宁西': 'NXZ', '南平北': 'NBS', '南堡北': 'TLP', '宁强南': 'NOY', '南雄': 'NCQ',
'纳雍': 'NYE', '南阳寨': 'NYF', '普安': 'PAN', '普安县': 'PUE', '屏边': 'PBM', '平坝南': 'PBE', '平昌': 'PCE',
'普定': 'PGW', '平度': 'PAK', '蒲江': 'PJE', '皮口': 'PUT', '盘龙城': 'PNN', '蓬莱市': 'POK', '普宁': 'PEQ',
'平南南': 'PAZ', '平泉北': 'PBP', '彭山北': 'PPW', '盘山': 'PUD', '坪上': 'PSK', '萍乡北': 'PBG', '鄱阳': 'PYG',
'濮阳': 'PYF', '平遥古城': 'PDV', '平原东': 'PUK', '盘州': 'PAE', '普者黑': 'PZM', '彭州': 'PMW', '秦安': 'QGJ',
'青白江东': 'QFW', '青川': 'QCE', '青岛北': 'QHK', '千岛湖': 'QDU', '祁东': 'QMQ', '启东': 'QOU', '青堆': 'QET',
'青岛西': 'QUK', '前锋': 'QFB', '清河门北': 'QBD', '齐河': 'QIK', '曲靖北': 'QBM', '綦江东': 'QDE', '曲江': 'QIM',
'邛崃': 'QLE', '青莲': 'QEW', '齐齐哈尔南': 'QNB', '清水北': 'QEJ', '青神': 'QVW', '岐山': 'QAY', '庆盛': 'QSQ',
'清水县': 'QIJ', '曲水县': 'QSO', '祁县东': 'QGV', '乾县': 'QBY', '旗下营南': 'QNC', '祁阳': 'QWQ', '青州市北': 'QOK',
'全州南': 'QNZ', '棋子湾': 'QZQ', '仁布': 'RUO', '荣昌北': 'RQW', '荣成': 'RCK', '瑞昌西': 'RXG', '如东': 'RIH',
'榕江': 'RVW', '日喀则': 'RKO', '饶平': 'RVQ', '日照西': 'KZK', '宋城路': 'SFF', '三道湖': 'SDL', '邵东': 'FIQ',
'三都县': 'KKW', '胜芳': 'SUP', '双峰北': 'NFQ', '商河': 'SOK', '泗洪': 'GQH', '四会': 'AHQ', '石家庄东': 'SXP',
'三江南': 'SWZ', '三井子': 'OJT', '双流机场': 'IPW', '双龙湖': 'OHB', '石林西': 'SYM', '沙岭子西': 'IXP', '双流西': 'IQW',
'胜利镇': 'OLB', '三明北': 'SHS', '三明': 'SVS', '嵩明': 'SVM', '树木岭': 'FMQ', '神木': 'HMY', '苏尼特左旗': 'ONC',
'山坡东': 'SBN', '石桥': 'SQE', '沈丘': 'SQN', '鄯善北': 'SMR', '狮山北': 'NSQ', '三水北': 'ARQ', '松山湖北': 'KUQ',
'狮山': 'KSQ', '三水南': 'RNQ', '韶山南': 'INQ', '三穗': 'QHW', '石梯': 'STE', '汕尾': 'OGQ', '歙县北': 'NPH',
'绍兴北': 'SLH', '绍兴东': 'SSH', '泗县': 'GPH', '始兴': 'IPQ', '双洋': 'SQS', '泗阳': 'MPH', '三阳': 'SYU',
'射阳': 'SAU', '双阳': 'OYT', '邵阳北': 'OVQ', '松原北': 'OCT', '山阴': 'SNV', '邵阳西': 'SXA', '沈阳西': 'OOT',
'深圳北': 'IOQ', '神州': 'SRQ', '尚志南': 'OZB', '深圳坪山': 'IFQ', '石嘴山': 'QQJ', '石柱县': 'OSW', '台安南': 'TAD',
'桃村北': 'TOK', '田东北': 'TBZ', '土地堂东': 'TTN', '太谷西': 'TIV', '吐哈': 'THR', '通海': 'TAM', '太和北': 'JYN',
'天河机场': 'TJN', '天河街': 'TEN', '唐海南': 'IEP', '通化县': 'TXL', '同江': 'TJB', '托克托东': 'TVC', '吐鲁番北': 'TAR',
'铜陵北': 'KXH', '桐庐': 'TLU', '泰宁': 'TNS', '铜仁南': 'TNW', '天水南': 'TIJ', '通渭': 'TWJ', '田心东': 'KQQ',
'汤逊湖': 'THN', '藤县': 'TAZ', '太原南': 'TNV', '通远堡西': 'TST', '桐梓北': 'TBE', '桐梓东': 'TDE', '通州': 'TOP',
'吴川': 'WAQ', '文登东': 'WGK', '潍坊北': 'WJK', '五府山': 'WFG', '威虎岭北': 'WBL', '威海北': 'WHK', '苇河西': 'WIB',
'温江': 'WJE', '乌兰察布': 'WPC', '五龙背东': 'WMT', '乌龙泉南': 'WFN', '乌兰木图': 'VLT', '五女山': 'WET', '武胜': 'WSE',
'五通': 'WTZ', '无为': 'IIH', '瓦屋山': 'WAH', '闻喜西': 'WOV', '武义北': 'WDH', '武夷山北': 'WBS', '武夷山东': 'WCS',
'婺源': 'WYG', '渭源': 'WEJ', '万州北': 'WZE', '武陟': 'WIF', '梧州南': 'WBZ', '兴安北': 'XDZ', '许昌东': 'XVF',
'项城': 'ERN', '新都东': 'EWW', '西渡': 'XDA', '西丰': 'XFT', '先锋': 'NQQ', '湘府路': 'FVQ', '襄汾西': 'XTV',
'孝感北': 'XJN', '孝感东': 'GDN', '西湖东': 'WDQ', '新化南': 'EJQ', '新晃西': 'EWQ', '新津': 'IRW', '小金口': 'NKQ',
'辛集南': 'IJP', '新津南': 'ITW', '西来': 'XLE', '新民北': 'XOT', '厦 门': 'EMS', '咸宁东': 'XKN', '咸宁南': 'UNN',
'溆浦南': 'EMQ', '西平西': 'EGQ', '响水县': 'XSU', '湘潭北': 'EDQ', '邢台东': 'EDP', '西乌旗': 'XWC', '修武西': 'EXF',
'修文县': 'XWE', '萧县北': 'QSH', '新香坊北': 'RHB', '新乡东': 'EGF', '新余北': 'XBG', '西阳村': 'XQF', '信阳东': 'OYN',
'咸阳秦都': 'XOY', '仙游': 'XWS', '祥云': 'XQM', '新郑机场': 'EZF', '香樟路': 'FNQ', '忻州西': 'IXV', '雅安': 'YAE',
'永安南': 'YQS', '迎宾路': 'YFW', '亚布力西': 'YSB', '永城北': 'RGH', '盐城北': 'AEH', '运城北': 'ABV', '永川东': 'WMW',
'禹城东': 'YSK', '宜春': 'YEG', '岳池': 'AWW', '云东海': 'NAQ', '姚渡': 'AOJ', '云浮东': 'IXQ', '永福南': 'YBZ',
'雨格': 'VTM', '洋河': 'GTH', '永济北': 'AJV', '弋江': 'RVH', '延吉西': 'YXL', '永康南': 'QUH', '依兰': 'YEB',
'运粮河': 'YEF', '炎陵': 'YAG', '杨陵南': 'YEY', '羊马': 'YME', '一面坡北': 'YXB', '伊敏': 'YMX', '郁南': 'YKQ',
'云南驿': 'ANM', '银瓶': 'KPQ', '延平西': 'YWS', '原平西': 'IPV', '杨桥': 'YQA', '阳曲西': 'IQV', '阳朔': 'YCZ',
'永寿': 'ASY', '云山': 'KZQ', '玉山南': 'YGG', '雁石南': 'YMS', '永泰': 'YTS', '银滩': 'CTQ', '鹰潭北': 'YKG',
'烟台南': 'YLK', '伊通': 'YTL', '烟台西': 'YTK', '尤溪': 'YXS', '云霄': 'YBS', '宜兴': 'YUH', '玉溪': 'AXM',
'阳信': 'YVK', '应县': 'YZV', '攸县南': 'YXG', '洋县西': 'YXY', '义县西': 'YSD', '余姚北': 'CTH', '榆中': 'IZJ',
'诏安': 'ZDS', '淄博北': 'ZRK', '正定机场': 'ZHP', '纸坊东': 'ZMN', '准格尔': 'ZEC', '庄河北': 'ZUT', '昭化': 'ZHW',
'织金北': 'ZJE', '张家川': 'ZIJ', '芷江': 'ZPQ', '织金': 'IZW', '仲恺': 'KKQ', '曾口': 'ZKE', '珠琳': 'ZOM',
'左岭': 'ZSN', '樟木头东': 'ZRQ', '驻马店西': 'ZLN', '邹平': 'ZLK', '漳浦': 'ZCS', '漳平西': 'ZXG', '章丘北': 'ZVK',
'肇庆东': 'FCQ', '庄桥': 'ZQH', '昭山': 'KWQ', '钟山西': 'ZAZ', '朱砂古镇': 'ZSE', '漳县': 'ZXJ', '资阳北': 'FYW',
'遵义': 'ZYE', '遵义南': 'ZNE', '张掖西': 'ZEJ', '资中北': 'WZW', '涿州东': 'ZAP', '枣庄东': 'ZNK', '卓资东': 'ZDC',
'郑州东': 'ZAF', '株洲南': 'KVQ'} | [
"[email protected]"
] | |
b9b729355f00f06c81c387568d229eb5f38329c1 | b206fdcb53cb69497745c67beed17e0be909ddf6 | /Hack2020/db_load/init_data.py | d13c1243716ba3d30a4ad449457696d4830a6ddd | [] | no_license | zapravka-team/hack_django | 1e5300684699420085048b3bdfae67f5daa75937 | 007f6e216d605c4742d55285064c077b7e0c2755 | refs/heads/main | 2023-01-05T21:44:32.829539 | 2020-11-01T08:59:32 | 2020-11-01T08:59:32 | 308,709,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,678 | py |
from pet.models import PetType, PetGender, EarType, TailType, ColorType, EuthanasiaCause, DeathCause, DisposeCause, \
FursType, Breed
ears = \
"""Стоячие
Полустоячие
Висячие
Купированные"""
tails = """Обычный
Саблевидный
Крючком
Поленом
Прутом
Пером
Серпом
Кольцом
Купированный"""
death_cause = """Естественная смерть
Эвтаназия"""
dispose_cause = """Смерть
Передача в собственность (под опеку)"""
euthanasia_cause = """Заболевание, несовместимое с жизнью
Травма, несовместимая с жизнью"""
furs_types_pussy = """
Короткая
Длинная
Бесшерстная
"""
furs_types_dogs = """
Обычная
Длинная
Гладкая
Вьющаяся
Жесткая
Бесшерстная
Нетипичная"""
colors_pussy = """черный
белый
голубой
шоколадный
красный
кремовый
черепаховый
серебристый
пегий
дымчатый
золотой
голубо-кремовый черепаховый
арлекин
биколор
шиншилла
коричневый
светло-коричневый
лиловый
черный с белым
красный с белым
голубой с белым
мраморный
дымчатый золотистый
циннамон (корица)
фавн (бежевый)
бледно-желтый
тигровый
черно-красный-белый
"""
colors_dogs = """черный
белый
лиловый
рыжий
кремовый
темно коричневый
светло коричневый
молочный
тигровый
триколор (красный/черный/лиловый)
биколор (черный/красный)
лиловый с белым
черно-белый
перец с солью
голубой с подпалом
голубой с пятнами
чепрачный
мраморный
абрикосовый
палевый
волчий
соболиный
муругий
чалый
пегий"""
breeds_dog = """австралийская борзая, кенгуровая
австралийская овчарка
австралийский терьер
азавак
аиди, атласская овчарка
айну
акбаш
Акита - ину
акита ину
акита-ину
алабай
алапахский чистокровный бульдог
альпийский спаниель
альпине даксбракке
аляскинский маламут
американский бульдог
американский водяной спаниель
американский кокер спаниель
американский питбультерьер
американский стафордшир-терьер
американский той-фокстерьер
американский фоксхаунд
американский эскимоский шпиц
анатолийский карабаш
английская овчарка
английский бульдог
английский водяной спаниель
английский кокер спаниель
английский мастиф
английский пойнтер
английский сеттер
английский спрингер-спаниель
английский той-терьер
английский фоксхаунд
аппенцеллер
аргентинский дог
арденский бувье
армант, египетская овчарка
артезиано-нормандский бассет
артуазская гончая
арьежская гончая
афганская борзая
афганская гончая
аффенпинчер
баварская горная гончая
балканская гончая
банджарская борзая
барбе
бардино махеро
басенджи
баскетмейкер
бассетхаунд
беардед колли (хайланд колли)
бедлингтон терьер
белый английский терьер
бельгийская лакенуа
бельгийская малинуа
бельгийский брак
бельгийский гриффон
бельгийский грюнендаль
бельгийский ловенар
бельгийский тервюрен
бергамаско, бергамская овчарка
бернер лауфхунд
бернская горная пастушья собака
бернский зенненхунд
беспородная
бигль
бийи
бишон фризе
бладхаунд
блю лейси
бобтейл, староанглийская овчарка
бойкин спаниель
боксёр
Болонка
болонский бишон
большая англо-французская гончая
большая голубая гасконская гончая
большая древесная гончая
большая пиринейская собака
большая швейцарская горная пасту
большой вандейский гриффон
большой мюнстерлендер
большой немецкий шпиц
большой французский брак
бордер колли
бордер терьер
бордосский дог (французский мастифф)
борзой, русская псовая борзая
бородатая колли
босерон
боснийская грубошерстная гончая
бостон-терьер
брабансон
брак дюпюи
брандл-брак
бретонский эпаньоль
бриар
бруно-де-юра
брюссельский гриффон
булленбайзер
бульдог англии старого типа
бульмастиф
бультерьер
бультерьер миниатюрный
бурбонский брак
бурбуль
веймаренер
Вельш корги
вельш корги кардиган
вельш корги пемброук
вельш-спрингер-спаниель
вельш-терьер
венгерская борзая
вест хайленд уайт терьер
вестфальский таксообразный брак
ветерхаунд, голландский спаниель
водяной спаниель
водяной твид спаниель
вольпино итальано
восточно-европейская овчарка
восточносибирская лайка
выжла, венгерская легавая
гавайская собака пои
гаванский бишон
гамильтонстёваре
ганноверская гончая
германский дратхаар
германский жесткошерстный пойнте
германский короткошерстный пойнт
герта пойнтер
гладкошерстный ретривер
гладкошерстный фокстерьер
глен-оф-имаал-терьер
голая собака инков
голландская овчарка
голландский смаусхонд
голландский тульпхонд
голубой гасконский бассет
голубой овернский брак
голубой пикардийский эпаньоль
голубой хиллер
голштинская гончая
гончая плотта
гончая стефена
гончая тальбота
гордон-сеттер
гранди поденгу португезу
грейхаунд
гренландская собака
греческая заячья гончая
греческая овчарка
Гриффон
гриффон кортальса
далматин
датский брохольмер
денди-динмонт терьер
джек рассел терьер
джек-рассел-терьер
джиндо
Доберман
доберман-пинчер
дратхаар
древер
древесная тенессийская тигровая
дрентская куропаточная собака
другая
дункер
западно-сибирская лайка
зауерландский брак
зауерландский хольцбрак
золотистый ретривер
ирландский водяной спаниель
ирландский волкодав (вольфхаунд)
ирландский красно-белый сеттер
ирландский сеттер
ирландский терьер
исландская собака
испанский брак, испанская легавая
испанский гальго, испанская борзая
испанский мастиф
истрийская гончая
итальянский брак
йемтхунд, шведский элкхаунд
йоркширский терьер
ка де бо
Кавалер кинг чарльз спаниель
кавказская овчарка
кай кен
кай лео
кан да серра ди айреш
кан да серра ди эштрела
кан ди агуа
кангал
кане корсо
каракачанская овчарка
карело-финская лайка
карельская медвежья собака
карликовый пинчер
каролинская собака
карстская (красская) овчарка
каталонская овчарка
кеесхонд
келпи
кентукская шел хип
керн-терьер
керри блю терьер
керри-бигль
кисю
китайская хохлатая собака
кламбер спаниель
коикерхондье
колли, шотландская овчарка
комондор, венгерская овчарка
котон де тулеар
крапчато-голубой кунхаунд
красный кунхаунд
кромфорлендер
ксолойтцкуинтли
кувас
кунхаунд уолкера
курляндская гончая
курцхаар
курчавый ретривер
куунхаунд
лабрадор
лабрадор ретривер
лабрадор-ретривер
лаготто-романьоло
лангедокская пастушья собака
лангхаар
ландсир
ланкаширский хилер
лапинкойра, финнский лаппхунд
лапинпорокойра
лапландский шпиц
ларри
левеск
левретка, итальянская борзая
лёвхен, львиная собачка
лейкланд-терьер
леонбергер
леопардовая гончая
леопардовая собака катахулы
лёрчер
литовская гончая
лудогорская гончая
лхасский апсо
люненбургская гончая
люцернер лауфхунд
малая англо-французская гончая
малый брабансон
малый вандейский бассет-гриффон
малый мюнстерлендер
малый французский брак
мальтез, мальтийский бишон
манчестерский терьер
маратхская борзая
маремма
мастино наполетано
медвежья канадская собака талтан
медиу поденгу португезу
метис
микадо терьер
миниатюрный ксолойтцкуинтли
мино
миттельшнауцер
монтебёф
мопс
московская сторожевая
московский длинношерстный той-терьер
московский дракон
московский короткошерстный той-терьер
мууди
немецкая овчарка
немецкая оленегонная лайка
немецкий вахтельхунд
немецкий вольфшпиц
немецкий дог
немецкий карликовый шпиц
немецкий шпиц
нивернейский гриффон
новогвинейская поющая собака
новошотландский ретривер
норботтен шпиц
норвежский бухунд
норвежский дункер хаунд
норвежский лундехунд
норвежский элкхаунд
норвич-терьер
нормандская гончая
норфолк терьер
ньюфаундленд
ойразир
ойуки терьер
орхидея петербургская
оттерхаунд, выдровая гончая
палевый бретонский бассет-гриффо
папильон
паттердейл-терьер
пекинес
пекиньу поденгу португезу
пердигейру португезу
пердигеро де бургос
пердигеро наварро
перро де пастор мальоркин
перро де преса мальоркин
перро де пресса канарио
перуанская орхидея инков
пикардийская овчарка
пинчер
пинчер миниатюрный
пиринейская овчарка
пиринейский мастиф
подгальянская овчарка
поденгу ди мастра
поденгу ибисенко, ибисская собака
польская низинная собака
польский огар
помераниан
померанский шпиц
понт-одмерский эпаньоль
порселен, фарфоровая гончая
посавская гончая
пражский крысарик
пуатвен, пуатвинская гончая
пудель
пудель миниатюрный
пудель стандартный
пудель-пойнтер
пули
пуми
пшеничный мягкошерстный терьер
рампурская борзая
растреадор бразилейру
рафейру ду алентежу
ретривер
Риджбек
ризеншнауцер
родезийский риджбек
ротвейлер
румынская овчарка
русская гончая
русская пегая гончая
русский спаниель
русский той-терьер
русский черный терьер
русско-европейская лайка
рэт-терьер
саарлосвольфхунд
сабуесо эспаньоле де монте
сабуэсо эспаньоль де монте
салюки, персидская гончая
самоедская лайка
санин
сансю
сегуджио итальяно
сенбернар
сен-жерменский брак
серис
сеттер-гордон
сиба-ину
сибирская хаски
сибирский хаски
сийлихам терьер
сика ину
сикоку
силки терьер
синсю
скай терьер
скотч терьер
словацкий копов
словацкий чувач
слюги, арабская борзая
смаусхонд
смешанная
смитфильд-колли
смоландстёваре, смоландская гончая
солуки
спиноне
среднеазиатская овчарка
средний шнауцер
средняя англо-французская гончая
стабихаун
стародатская легавая
стафордширский бультерьер
стреллуфстёвер
суоменайокойра, финская гончая
суоменпастикорва, финский шпиц
суссекс-спаниель
тазы (среднеазиатская борзая)
тай риджбек
тайган, киргизская борзая
такса гладкошерстная
такса длинношерстная
такса жесткошерстная
такса кроличья
такса миниатюрная
такса стандартная
теломиан
тибетский мастиф
тибетский спаниель
тибетский терьер
тирольская гончая
той-пудель
той-терьер
тоса ину
уест-хайланд-уайт-терьер
уипет
фален, континентальный той-спаниель
фараоновая собака
фила бразилейру (бразильский мастиф)
фильд спаниель
финская лайка
финский шпиц
фландрский бувье
фокстерьер гладкошерстный
фокстерьер жесткошерстный
французская гончая
французский бульдог
французский эпаньоль
француский бульдог
фу куок
халенденстёваре, хальденская гончая
ханаанская собака
харрьер
хаски антарктическая
ховаварт
хорватская овчарка
хортая
хюгенхунд
хюнерхунд
цвергпинчер
цвергшнауцер
цвергшпиц
чау-чау
черный терьер
чесапик бей ретривер
чехословацкий влчак
чешский терьер
чешский фоусек
чинук
чихуахуа
чи-хуа-хуа
шапендус
шарпей
шарпланинская овчарка
швайсхунд
шведский вальхунд
швейцарская овчарка
швейцарский лауфхунд
шелти
шельти
шиллерстёваре
шипперке
ши-тцу
шотландский дирхаунд
шотландский терьер
шпиц
штихельхаар
Щвейцарский зенненхунд
энтлебухер зенненхунд
эпаньоль нэн
эрдели копо, трансильванская гончая
эрдельтерьер
эскимосская лайка
эстонская гончая
югославская грехцветная гончая
южная гончая
южнорусская овчарка
ягдтерьер
японский спаниель
японский стерьер
японский хин
японский шпиц
"""
breeds_cats = """абиссинская
американская жесткошерстная
американская короткошерстная
американский бобтейл
американский кёрл
ангорская
балинезийская
бенгальская
беспородная
бирма
бирманская пятнисто-краповая
бомбейская
британская голубая
британская короткошерстная
бурма
бурмилла
гаванна
герман рекс
девон рекс
домашняя
донский сфинкс
другая
европейская короткошерстная
египетская мау
канадский сфинкс
кимрик
корат
корниш-рекс
курильский бобтейл
манкс
манчкин
мейн кун
меконгский бобтейл
мексиканская
метис
мингонский бобтейл
мэйн кун
невская маскарадная
норвежская лесная дымчато-голубая
ориенталская короткошерстная
ориентальская длинношерстная
охос азулес
оцикет
персидская
пиксибоб
русская голубая
рысь домашняя
рэгдол
селкирк-рекс
сиамская
сибирская
сингапура
скоттиш страйт
скоттиш фолд
сноу-шу
сомали
сфинкс
тайская
тифани
тонкинез
турецкий ван
цейлонская
шартрез
шиншилла
шотландская вислоухая
экзотическая короткошерстная
японский бобтейл"""
genders = """Мужской
Женский"""
pet_type = """Кошка
Собака"""
operation_orgs = """ГБУ «Автомобильные дороги САО»
ГБУ «Автомобильные дороги СВАО»
ГБУ «Автомобильные дороги ВАО»
ГБУ «Автомобильные дороги ЮАО»
ГБУ «Автомобильные дороги ЮЗАО»
ГБУ «Автомобильные дороги ЮВАО»
ГБУ «Автомобильные дороги ЗАО»
ГБУ «Доринвест»"""
from manufacture.models import OperatingOrganization
def load_all():
PetType.load_initial(pet_type, None)
PetGender.load_initial(genders, None)
EarType.load_initial(ears, None)
TailType.load_initial(tails, None)
EuthanasiaCause.load_initial(euthanasia_cause, None)
DeathCause.load_initial(death_cause, None)
DisposeCause.load_initial(dispose_cause, None)
dog = PetType.objects.get(value='собака')
cat = PetType.objects.get(value='кошка')
ColorType.load_initial(colors_dogs, dog)
FursType.load_initial(furs_types_dogs, dog)
ColorType.load_initial(colors_pussy, cat)
FursType.load_initial(furs_types_pussy, cat)
Breed.load_initial(breeds_cats, cat)
Breed.load_initial(breeds_dog, dog)
OperatingOrganization.load_data(operation_orgs) | [
"[email protected]"
] | |
b7d64c74771a4379ce29c27a191bccd4114e553d | 5b4c803f68e52849a1c1093aac503efc423ad132 | /UnPyc/tests/tests/all_opcodes/34.py | 1281469b68be11bf4133e93412aa03185117bd5e | [] | no_license | Prashant-Jonny/UnPyc | 9ce5d63b1e0d2ec19c1faa48d932cc3f71f8599c | 4b9d4ab96dfc53a0b4e06972443e1402e9dc034f | refs/heads/master | 2021-01-17T12:03:17.314248 | 2013-02-22T07:22:35 | 2013-02-22T07:22:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | a = [1, 2, 3]
del a[:2]
| [
"[email protected]"
] | |
dee4c636e7942fb1628a33bd9034f00d3c0944cd | 4125b4e65bd523e2c8c44d0a269d11de3f246d9c | /jsont/transforms/xml.py | a55dbbb796cca2e236b85972001c744456e2f77e | [] | no_license | usnistgov/jsont | a3be1c2a9f3c16d670a4cb0ad12470b1b289a34c | 091f5471f2aaba71ffe7904b7ff620f09f4ebf15 | refs/heads/master | 2021-01-10T02:09:21.988933 | 2016-03-16T00:19:40 | 2016-03-16T00:19:40 | 53,922,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,959 | py | """
transforms for creating XML from JSON data
"""
import os, copy, re, textwrap, collections
import json as jsp
from ..exceptions import *
from ..base import Transform, ScopedDict, Context
from .std import JSON, Extract, StringTemplate, Function
from . import std
MODULE_NAME = __name__
TRANSFORMS_PKG = __name__.rsplit('.', 1)[0]
def _generate_name(spec, engine, tname=None, ttype=None):
return _generate_value(spec, engine, tname, ttype, True)
def _generate_value(spec, engine, tname=None, ttype=None, forname=False):
if spec is None:
return None
if not forname and (isinstance(spec, int) or isinstance(spec, float) or \
isinstance(spec, bool)):
return spec
if isinstance(spec, dict):
if spec.has_key('$val'):
spec = spec['$val']
if isinstance(spec, dict):
return engine.make_transform(spec)
if isinstance(spec, str) or isinstance(spec, unicode):
if not Function.matches(spec) and \
(spec == '' or ':' in spec or spec.startswith('/')):
return Extract({'select': spec}, engine, tname, ttype)
return engine.resolve_transform(spec)
else:
raise TransformConfigTypeError("spec", "string or object",
type(spec), self.spec)
if isinstance(spec, str) or isinstance(spec, unicode):
if '{' in spec and '}' in spec:
# it's a string template
return StringTemplate({'content': spec}, engine,
"attr spec", "xml.attribute")
else:
raise TransformConfigTypeError("spec", "string or object",
type(spec), self.spec)
return spec
def _generate_object(spec, engine, tname=None, ttype=None):
if spec is None:
return None
if isinstance(spec, dict):
# it's an object, either a transform or JSON template
if not spec.has_key('$val'):
# it's a JSON template
return JSON({'content': spec}, engine, tname, ttype)
else:
spec = spec["$val"]
if isinstance(spec, dict):
# it's an anonymous transform
return engine.make_transform(spec)
if isinstance(spec, str) or isinstance(spec, unicode):
if not Function.matches(spec) and \
(spec == '' or ':' in spec or spec.startswith('/')):
# it's a data pointer to select data
return Extract({'select': spec}, engine, tname, ttype)
# it's a named transform or transform function
return engine.resolve_transform(spec)
return spec
class ToAttribute(Transform):
"""
a transform type for creating XML Attribute data. This transform type
takes the following paramters:
:argument name: the local name for the attribute. If provided as a
string (with { and } characters), it will be treated
as a StringTemplate. If provided as an object with a
$val property, the name will be generated by the transform
implied by the $val property value. Any other kind of
object will be treated as an anonymous transform that
should produce a string value to provide the value of the
name.
:argument value:
:argument namespace:
:argument prefix:
"""
def mkfn(self, config, engine):
tname = self.name or '(xml)'
ttype = "xml.attribute"
try:
name = _generate_name(config['name'], engine, tname+" Attr name",
ttype)
except KeyError, ex:
raise MissingTransformData("name", self.name)
try:
value = _generate_value(config['value'], engine,
tname+" Attr val",ttype)
except KeyError, ex:
raise MissingTransformData("value", self.name)
ns = _generate_name(config.get('namespace'), engine,
tname+" Attr ns", ttype)
pref = _generate_name(config.get('prefix'), engine,
tname+" Attr prefix", ttype)
def impl(input, context, *args):
out = {}
out['name'] = name
if isinstance(name, Transform):
out['name'] = name(input, context)
out['value'] = value
if isinstance(value, Transform):
out['value'] = value(input, context)
if ns:
out['namespace'] = ns
if isinstance(ns, Transform):
out['namespace'] = ns(input, context)
if pref:
out['prefix'] = pref
if isinstance(pref, Transform):
out['prefix'] = pref(input, context)
return out
return impl
class ToElementContent(Transform):
def mkfn(self, config, engine):
ttype = "xml.elementContent"
name = self.name or "(anon)"
attrs = None
if config.has_key("attrs"):
# attrs can be
# 1) a list where each element forms an attribute
# 2) a dict representing a transform ({$val}, {$type}) that
# generates a list of attributes
# 3) a string providing a named transform or datapointer that
# generates a list of attributes
if isinstance(config['attrs'], list):
attrs = []
for attr in config['attrs']:
attr = self._generate_attribute(attr, engine,
"{0} attr".format(name),
ttype)
attrs.append(attr)
else:
# some reference to a transform that should result in an array.
# the value is interpreted just like a metaproperty directive
attrs = std.resolve_meta_property(config['attrs'], engine,
name+':(attrs)')
children = None
if config.has_key("children"):
if isinstance(config['children'], list) or \
isinstance(config['children'], tuple):
children = []
for child in config['children']:
if isinstance(child, list) or isinstance(child, tuple):
raise TransformConfigTypeError("children item",
"str or dict",type(child),
name)
elif isinstance(child, collections.Mapping):
child = self._generate_element(child, engine,
"{0} child".format(name),
ttype)
children.append(child)
else:
# some reference to a transform that should result in an array.
# the value is interpreted just like a metaproperty directive
children = std.resolve_meta_property(config['children'], engine,
name+':(children)')
def impl(input, context, *args):
out = {}
if attrs is not None:
if isinstance(attrs, Transform):
out['attrs'] = attrs(input, context)
else:
ol = []
for attr in attrs:
if isinstance(attr, Transform):
attr = attr(input, context)
elif isinstance(attr, collections.Mapping):
if '$ins' in attr:
attr = attr['$ins'](input, context)
if hasattr(attr, '__iter__'):
ol.extend(attr)
continue
elif '$upd' in attr:
# shouldn't happen
attr = attr['$upd'](input, context)
ol.append(attr)
out['attrs'] = ol
if children is not None:
if isinstance(children, Transform):
out['children'] = children(input, context)
else:
ol = []
for child in children:
if isinstance(child, Transform):
child = child(input, context)
elif isinstance(child, collections.Mapping):
if '$ins' in child:
child = child['$ins'](input, context)
if hasattr(child, '__iter__'):
ol.extend(child)
continue
elif '$upd' in child:
# shouldn't happen
child = child['$upd'](input, context)
ol.append(child)
out['children'] = ol
return out
return impl
def _generate_attribute(self, spec, engine, name, type=None):
return self._gen_cont_item(spec, engine, name, ToAttribute, type)
def _gen_cont_item(self, spec, engine, name, cls, type=None):
if isinstance(spec, collections.Mapping):
if '$val' in spec:
return std.resolve_meta_directive(spec['$val'], engine,
name+":$val")
if '$ins' in spec or '$upd' in spec:
key = '$upd'
if '$ins' in spec:
key = '$ins'
spec[key] = std.resolve_meta_directive(spec[key], engine,
name+":"+key)
return spec
# assume this is a cls transform if it isn't some other
# reference to a transform
return std.resolve_meta_directive(spec, engine, name, cls)
def _generate_element(self, spec, engine, name, type=None):
return self._gen_cont_item(spec, engine, name, ToElement, type)
class ToElement(Transform):
def mkfn(self, config, engine):
tname = self.name or '(xml)'
ttype = "xml.element"
try:
name = _generate_name(config['name'], engine,
tname+" Element name", ttype)
except KeyError, ex:
raise MissingTransformData("name", self.name)
ns = _generate_value(config.get('namespace'), engine, tname+" El ns",
ttype)
pref = _generate_name(config.get('prefix'), engine, tname+" El prefix",
ttype)
content = self._generate_content(config.get('content'), engine,
tname+" content", ttype)
prefixes = _generate_object(config.get('prefixes'), engine,
tname+" prefixes", ttype)
def impl(input, context, *args):
out = {}
out['name'] = name
if isinstance(name, Transform):
out['name'] = name(input, context)
out['content'] = content
if isinstance(content, Transform):
out['content'] = content(input, context)
if ns:
out['namespace'] = ns
if isinstance(ns, Transform):
out['namespace'] = ns(input, context)
if pref:
out['prefix'] = pref
if isinstance(pref, Transform):
out['prefix'] = pref(input, context)
if prefixes:
out['prefixes'] = prefixes
if isinstance(prefixes, Transform):
out['prefixes'] = prefixes(input, context)
if config.has_key('hints'):
out['hints'] = copy.deepcopy(config['hints'])
return out
return impl
def _generate_content(self, spec, engine, tname, ttype):
# a plain object (no $val or $type) is an implicit ToElementContent
# transform
if spec is None:
return None
return std.resolve_meta_directive(spec, engine, tname, ToElementContent)
class ToTextElement(Transform):
"""
formats an XML element that contains only text as its contents (attributes
are optional).
"""
def mkfn(self, config, engine):
elcfg = copy.deepcopy(config)
elcfg['$type'] = 'element'
content = { }
if "value" in elcfg:
content['children'] = [ elcfg['value'] ]
del elcfg['value']
if "attrs" in elcfg:
content['attrs'] = [ elcfg['attrs'] ]
del elcfg['attrs']
elcfg['content'] = content
transf = ToElement(elcfg, engine, self.name, 'element', True)
def impl(input, context, *args):
return transf(input, context, *args)
return impl
class ToXML(Transform):
"""
formats XML data into an output string
"""
def mkfn(self, config, engine):
try:
transf = config['element']
except KeyError, ex:
raise MissingTransformData("element", self.name)
if isinstance(transf, dict):
transf = engine.make_transform(transf)
elif isinstance(transf, str) or isinstance(transf, unicode):
transf = engine.resolve_transform(transf)
else:
raise TransformConfigTypeError('transform', 'dict or str',
type(transf))
def impl(input, context):
root = transf(input, context)
return format_element(root, context, None, self.name)
return impl
types = {
"xml.print": ToXML,
"xml.attribute": ToAttribute,
"xml.elementContent": ToElementContent,
"xml.element": ToElement,
"xml.textElement": ToTextElement
}
class InvalidXMLError(TransformInputTypeError):
"""
XML cannot be properly formatted because the input XML data is invalid.
"""
def __init__(self, got=None, input=None, context=None, message=None):
if not got:
got = str(type(input))
if not message:
msg = "Invalid XML data: need an object"
if got:
msg += ", got " + got + "."
message = msg
super(InvalidXMLError, self).__init__("object", got, None,
input, context, message)
def format_element(el, context, prefixes=None, transname=None):
"""
format the data in an element data object into XML according to preferences
from the context.
"""
if not isinstance(el, dict):
raise InvalidXMLError(input=el, context=context)
if prefixes is None:
prefixes = ScopedDict()
else:
prefixes = ScopedDict(prefixes)
hints = {}
newxmlns = None
if el.get('hints'):
# The element data carries hints on rendering the data; these override
# preferences given in the context. So...
# load the hints about formatting into our context
hints = el['hints']
if hints.get('xmlns') != context.get('xmlns',''):
newxmlns
context = Context(context)
context.update(hints)
if context.get('xml.style','pretty') == 'compact':
context['xml.base_indent'] = 0
context['xml.indent'] = -1
context['xml.text_packing'] = 'compact'
elif context.get('xml.value_pad', 0) > 0:
context['xml.text_packing'] = 'pretty'
indent = context.get('xml.base_indent', 0)
step = context.get('xml.indent', 2)
try:
# determine if we need a prefix
prefix, pfxdefs = determine_prefix(el.get('namespace'), el.get('prefix'),
context, prefixes)
# preface opening tag with indent
indentsp = (indent * ' ')
opentag = indentsp + '<' + prefix + el['name']
# assemble the attribute data
atts = el.get('content', {}).get('attrs', [])
if pfxdefs:
atts.extend(pfxdefs)
if el.get('prefixes'):
for p, ns in el['prefixes'].items():
if ns not in prefixes or prefixes[ns] != p:
prefixes[ns] = p
atts.append('xmlns:{0}="{1}"'.format(p, ns))
if el.get('content', {}).get('children'):
for child in el['content']['children']:
p, pfxdefs = determine_prefix(el.get('namespace'),
el.get('prefix'),
context, prefixes)
atts.extend(pfxdefs)
# now insert attributes into the opening tag
if atts:
atts = format_atts(atts, len(opentag), context, prefixes)
opentag += atts
# format the child nodes
if not el.get('content', {}).get('children'):
# there are none
opentag += '/>'
if step >= 0:
opentag += '\n'
return opentag
else:
opentag += '>'
closetag = '</' + prefix + el['name'] + '>'
maxlen = context.get('xml.max_line_length', 78)
minlen = context.get('xml.min_line_length', 30)
if len(el['content']['children']) == 1 and \
(isinstance(el['content']['children'][0], str) or
isinstance(el['content']['children'][0], unicode)):
# single text value
child = el['content']['children'][0]
if context.get('xml.value_pad', 0) <= 0 or \
context.get('xml.text_packing','pretty') == 'pretty' and \
len(child) < maxlen - len(opentag) - len(closetag):
#short enough to fit into one line
if context.get('xml.value_pad', 0) > 0:
pad = context['xml.value_pad'] * ' '
child = pad + child + pad
# return the single line
return opentag + child + closetag
# treat like multi-child content
parts = [ opentag ]
subcontext = Context(context)
if step < 0:
# don't insert newlines
subcontext['xml.base_indent'] = 0
subcontext['xml.indent'] = -1
else:
subcontext['xml.base_indent'] = indent + step
for child in el['content']['children']:
if isinstance(child, str) or isinstance(child, unicode):
parts.append(format_text(child, subcontext))
else:
parts.append(format_element(child, subcontext, prefixes))
if step < 0:
parts.append(closetag)
return ''.join(parts)
parts.append(indentsp + closetag)
return '\n'.join(parts)
except KeyError, ex:
raise MissingXMLData.due_to(ex, transname)
def format_text(text, context=None):
"""
format the given text for inclusion as the content for an element
"""
if not isinstance(text, str) and not isinstance(text, unicode):
raise InvalidXMLError(input=text, context=context,
message="Invalid XML: expected text, got "+str(type(text))+": "+text)
if context is None:
context = Context()
step = context.get('xml.indent', 2)
pack = context.get('xml.text_packing', 'compact')
if pack == 'compact' or step < 0:
return text
indent = context.get('xml.base_indent', 0)
if pack == 'loose':
return (indent * ' ') + text
maxlen = context.get('xml.max_line_length', 78)
minlen = context.get('xml.min_line_length', 30)
sublen = maxlen - indent
if sublen < minlen:
sublen = minlen
return "\n".join(map(lambda l: (indent * ' ') + l,
textwrap.wrap(text, sublen)))
def determine_prefix(ns, prefix, context, prefixes):
pfxdefs = []
xmlns = context.get('xml.xmlns', '')
if ns and not context.get('xml.prefer_prefix', False) and ns == xmlns:
# namespace matches xmlns
return ('', pfxdefs)
if ns:
if prefix:
if prefix != prefixes.get(ns):
prefixes[ns] = prefix
pfxdefs.append('xmlns:{0}="{1}"'.format(prefix, ns))
else:
prefix = prefixes.get(ns)
if not prefix:
autopat = re.compile(r'^ns\d+')
nums = map(lambda i: int(i),
map(lambda p: p[2:],
filter(lambda x: autopat.match(x),
prefixes.values())))
if not nums:
nums = [0]
prefix = 'ns'+str(max(nums)+1)
prefixes[ns] = prefix
pfxdefs.append('xmlns:{0}="{1}"'.format(prefix, ns))
if prefix:
return (prefix+':', pfxdefs)
return ('', pfxdefs)
def format_atts(atts, indent, context, prefixes):
"""
format the attributes for insertion into an opening element tag.
When many attributes are present, these can be wrapped onto separate lines.
"""
if not isinstance(atts, list) and not isinstance(atts, tuple):
raise InvalidXMLError(input=atts, context=context,
message="Invalid XML: expected array, got "+str(type(atts))+": "+atts)
style = context.get('xml.style', 'pretty')
maxlen = context.get('xml.max_line_length', 78)
minlen = context.get('xml.min_line_length', 30)
attlen = maxlen - indent
out = ['']
atts = list(atts)
while len(atts) > 0:
att = atts.pop(0)
if isinstance(att, dict):
prefix, pfxdefs = determine_prefix(att.get('namespace'),
att.get('prefix'),
context, prefixes)
if len(pfxdefs) > 0:
atts.extend(pfxdefs)
nxt = prefix+att['name'] + '="' + att['value'] + '"'
else:
# assume it's already formatted (better be a string)
nxt = att
if style == 'pretty' and \
len(out[-1]) > minlen and len(nxt) + len(out[-1]) + 1 > attlen:
out.append('')
out[-1] += ' '+nxt
if style == 'pretty':
return ('\n'+(indent * ' ')).join(out)
return ''.join(out)
class MissingXMLData(TransformApplicationException):
def __init__(self, message, prop=None, input=None, context=None, name=None,
cause=None):
"""
construct the exception, providing an explanation.
:argument str message: an explanation of what went wrong
:argument str prop: the name of the missing XML property
:argument input: the JSON data that was being transformed
:argument context: the context at the point of the exception
:argument str name: the name of the transform being applied when
the exception occured. If None, the exception
is not known or not specific to a particular
transform.
:argument Exception cause: the exception representing the underlying
cause of the exception. If None, there
was no such underlying cause.
"""
super(MissingXMLData, self).__init__(message,input,context,name,cause)
self.prop = prop
@classmethod
def due_to(cls, cause, input=None, context=None, name=None):
prop = None
if isinstance(cause, KeyError):
prop = cause.args[0]
msg = "Missing XML data"
if name:
msg += " in '"+name+"' transform"
msg += ": "
msg += (prop or str(cause))
return cls(msg, prop, input, context, name, cause)
# load in stylesheet-based definitions
MOD_STYLESHEET_FILE = "xml_ss.json"
ssfile = os.path.join(os.path.dirname(__file__), MOD_STYLESHEET_FILE)
with open(ssfile) as fd:
MOD_STYLESHEET = jsp.load(fd)
del ssfile
# load the module's initial context data. The defaults are specified here
# for documentation purposes; however, values set wihtin the stylesheet file
# will take precedence.
p = "xml."
def_context = {
# The prefered line width to use when filling data into paragraphs by
# the fill transform
#
p+"max_line_width": 75,
# The prefered indentation amount to use when filling data into
# paragraphs by the fill transform
#
p+"style": 'compact',
# The number of spaces to add to indentation with each step into the
# XML hierarchy
p+"indent": 2,
# The number of spaces to indent the current element. When printing
# an entire XML document, the current element is the root element; thus,
# the whole document will be indented by this much (plus what is added
# by xml.indent for the root's children). This parameter is updated
# automatically by xml.indent amounts as the document is printed.
p+"base_indent": 0
}
del p
# the module's default context data
MOD_CONTEXT = ScopedDict(def_context)
MOD_CONTEXT.update(MOD_STYLESHEET.get('context',{}))
MOD_STYLESHEET['context'] = MOD_CONTEXT
| [
"[email protected]"
] | |
370b9d1e5738731995469d3572171a1eb0805860 | b20cc37e0b986a0b458f7f446d5025beee01ba7a | /326-power-of-three/326-power-of-three.py | ee9a38938a90148c4442109da4f8e747ba9f4374 | [] | no_license | Maruf-S/Competitve-programing | e5e405912a4c2d9a1fad35d66411964ecbc10d00 | 3134d9c1b7e987c6cec3c614512faab4114fe0bd | refs/heads/master | 2023-02-04T08:08:43.374565 | 2023-02-01T17:29:55 | 2023-02-01T17:29:55 | 225,399,606 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | class Solution:
def isPowerOfThree(self, n: int) -> bool:
if n==1:
return True
if n<1:
return False
return self.isPowerOfThree(n/3) | [
"[email protected]"
] | |
54a9a094e0ee5716cca0e893e3b82f367b83dbe1 | a0dda8be5892a390836e19bf04ea1d098e92cf58 | /7章之后刷题/7章/求一元二次方程的解.py | 517c5ec018ad4fd2fec5dfce6394c34f9bf20cf7 | [] | no_license | wmm98/homework1 | d9eb67c7491affd8c7e77458ceadaf0357ea5e6b | cd1f7f78e8dbd03ad72c7a0fdc4a8dc8404f5fe2 | refs/heads/master | 2020-04-14T19:22:21.733111 | 2019-01-08T14:09:58 | 2019-01-08T14:09:58 | 164,055,018 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | """
【问题描述】一元二次方程:ax2+bx+c=0 (a ╪ 0)
【输入形式】输入a、b和c的值(有理数)
【输出形式】输出x的两个值,或者No(即没有有理数的解)
【样例输入】1 2.5 3
【样例输出】No
【样例输入】1 -2 1
【样例输出】1.00 1.00
【样例输出说明】输出的两个解保留两位小数,大的在前
"""
import math
a, b, c = input().split()
if a != 0:
a = float(a)
b = float(b)
c = float(c)
if b ** 2 - 4 * a * c < 0:
print("No")
else:
i = math.sqrt(b ** 2 - 4 * a * c)
x1 = (-b + i) / (2 * a)
x2 = (-b - i) / (2 * a)
if x1 > x2:
print("%.2f %.2f" % (x1, x2))
else:
print("%.2f %.2f" % (x2, x1)) | [
"[email protected]"
] | |
69219886e2fb7b8ea37b60e28d74a109c0dd00ec | 508bef828c3ce1f1c53fbe52397632ebcb392a7f | /excercise/DataOutput.py | 17f26dd4e5ecf58f8a646828a506123f3f85f981 | [] | no_license | wudangqibujie/excercise | bc19a7923836aae9d12e1147b1d282250c0037c6 | ff3e255e809e414fd43e7cf16e03466c91b613a2 | refs/heads/master | 2021-05-02T12:20:09.701592 | 2018-02-08T09:20:04 | 2018-02-08T09:20:04 | 120,738,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | import codecs
import json
class DataOutput(object):
def __init__(self):
self.datas = []
def store_data(self,data):
if data is None:
return
self.datas.append(data)
def output_txt(self):
print(self.datas)
print(len(self.datas))
def output_html(self):
fout = codecs.open("baike.html",'w',encoding="utf-8")
fout.write("<html>")
fout.write("<body>")
fout.write("<table>")
for data in self.datas:
fout.write("<tr>")
fout.write("<td>%s</td>"%data['url'])
fout.write("<td>%s</td>"%data['title'])
fout.write("<td>%s</td>"%data['summary'])
fout.write("</tr>")
self.datas.remove(data)
fout.write("</table>")
fout.write("</body>")
fout.write("</html>")
fout.close()
| [
"[email protected]"
] | |
cb404b4d172d193fdd675a194c0f74c3dc2bcbec | 7e8c799037f47345cb12a6fc7911610d7ac63640 | /blog/models/kategory.py | db1f3aed151335e537ac97042a080087a708be89 | [] | no_license | SonerArslan2019/Django_Blog_kilicarslan | bcfd953dfc0d530217c02ff9bf1428160a63e68e | 8959708689b830c387c76414545add2474beeddd | refs/heads/master | 2023-06-04T06:19:28.209246 | 2021-06-27T14:50:40 | 2021-06-27T14:50:40 | 346,781,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | from django.db import models
from autoslug import AutoSlugField
class KategoriModel(models.Model):
isim = models.CharField(max_length=30, blank=False, null=False)
slug = AutoSlugField(populate_from='isim', unique=True)
class Meta:
db_table = 'kategori'
verbose_name_plural = 'Kategoriler'
verbose_name = 'Kategori'
def __str__(self):
return self.isim
| [
"[email protected]"
] | |
314cb13e39d3721bdde09602caf9430c87651252 | e2baefd54ed6f44d351d867b8d8eb937424fae23 | /class-10/game-of-greed/tests/version_4/test_keep_scorers.py | ad3c4e7685ac39c0a0bdef3b04d08c355b08a355 | [] | no_license | codefellows/seattle-python-401n2 | 60cc100b5b6dc8bc3d72784a5ec8f9c2c9db1942 | 24148a2ee6526104566b5df64945a40222cfb3e2 | refs/heads/main | 2023-04-23T15:35:49.820538 | 2021-04-24T18:02:51 | 2021-04-24T18:02:51 | 316,109,616 | 8 | 16 | null | 2021-03-10T05:36:09 | 2020-11-26T03:06:19 | Jupyter Notebook | UTF-8 | Python | false | false | 460 | py | import pytest
from game_of_greed.game_logic import GameLogic
pytestmark = [pytest.mark.version_4]
@pytest.mark.parametrize(
"test_input,expected",
[
((1, 1, 1, 2, 3, 4), (1, 1, 1)),
((1, 1, 5, 2, 3, 5), (1, 1, 5, 5)),
((1, 6, 5, 2, 3, 4), (1, 6, 5, 2, 3, 4)),
((1, 6, 5, 2, 3), (1, 5)),
],
)
def test_get_scorers(test_input, expected):
actual = GameLogic.get_scorers(test_input)
assert actual == expected
| [
"[email protected]"
] | |
270a1aae2fd8c1049b534b01bbc44c37a2580987 | 79e19819aec49b500825f82a7de149eb6a0ba81d | /leetcode/704.py | 67ae34f57125b3c89b84138ec42378a347353c62 | [] | no_license | seoyeonhwng/algorithm | 635e5dc4a2e9e1c50dc0c75d9a2a334110bb8e26 | 90406ee75de69996e666ea505ff5d9045c2ad941 | refs/heads/master | 2023-05-03T16:51:48.454619 | 2021-05-26T00:54:40 | 2021-05-26T00:54:40 | 297,548,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | class Solution:
def search(self, nums: List[int], target: int) -> int:
try:
return nums.index(target)
except ValueError:
return -1 | [
"[email protected]"
] | |
d63a8cf77a7bbfd03771436916c9f84472b354e1 | 13efb3baccc678f9d57776244c7dc067e486df9e | /students/migrations/0021_auto_20160517_1221.py | e9210404690fc2e9213f7ae9d9febd5fc7d78325 | [] | no_license | grydinywka/studentsdb | fd109dfe60f4ffd666c12acbe645ca14e064c29b | bc2c968538f88bd539d931a7caf1b693fbb65843 | refs/heads/master | 2020-05-31T00:05:22.638475 | 2016-08-26T17:00:43 | 2016-08-26T17:00:43 | 31,331,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import students.models.Result_exam
class Migration(migrations.Migration):
dependencies = [
('students', '0020_auto_20160421_0936'),
]
operations = [
migrations.AlterField(
model_name='result_exam',
name='valuetion',
field=models.DecimalField(verbose_name='\u041e\u0446\u0456\u043d\u043a\u0430', max_digits=2, decimal_places=0, validators=[students.models.Result_exam.validate_value]),
),
]
| [
"[email protected]"
] | |
adc4c82afca2554fb969296bae54e2b26b9e6e9f | 45c170fb0673deece06f3055979ece25c3210380 | /toontown/shtiker/DisguisePage.py | 3ee2886febb3d641d32591427874690f460eb813 | [] | no_license | MTTPAM/PublicRelease | 5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f | 825f562d5021c65d40115d64523bb850feff6a98 | refs/heads/master | 2021-07-24T09:48:32.607518 | 2018-11-13T03:17:53 | 2018-11-13T03:17:53 | 119,129,731 | 2 | 6 | null | 2018-11-07T22:10:10 | 2018-01-27T03:43:39 | Python | UTF-8 | Python | false | false | 9,526 | py | #Embedded file name: toontown.shtiker.DisguisePage
from toontown.shtiker import ShtikerPage
from direct.gui.DirectGui import *
from panda3d.core import *
from panda3d.direct import *
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.suit import SuitDNA
from toontown.battle import SuitBattleGlobals
from toontown.minigame import MinigamePowerMeter
from toontown.coghq import CogDisguiseGlobals
DeptColors = (Vec4(0.647, 0.608, 0.596, 1.0),
Vec4(0.588, 0.635, 0.671, 1.0),
Vec4(0.596, 0.714, 0.659, 1.0),
Vec4(0.761, 0.678, 0.69, 1.0),
Vec4(0.5, 0.5, 0.5, 1.0))
PartNames = ('lUpleg', 'lLowleg', 'lShoe', 'rUpleg', 'rLowleg', 'rShoe', 'lShoulder', 'rShoulder', 'chest', 'waist', 'hip', 'lUparm', 'lLowarm', 'lHand', 'rUparm', 'rLowarm', 'rHand')
class DisguisePage(ShtikerPage.ShtikerPage):
meterColor = Vec4(0.87, 0.87, 0.827, 1.0)
meterActiveColor = Vec4(0.7, 0.3, 0.3, 1)
def __init__(self):
ShtikerPage.ShtikerPage.__init__(self)
self.activeTab = 0
self.progressTitle = None
def load(self):
ShtikerPage.ShtikerPage.load(self)
gui = loader.loadModel('phase_9/models/gui/cog_disguises')
icons = loader.loadModel('phase_3/models/gui/cog_icons')
self.frame = DirectFrame(parent=self, relief=None, scale=0.47, pos=(0.1, 1, 0))
self.bkgd = DirectFrame(parent=self.frame, geom=gui.find('**/base'), relief=None, scale=(0.98, 1, 1))
self.bkgd.setTextureOff(1)
self.buttons = []
self.pageFrame = DirectFrame(parent=self.frame, relief=None)
self.xOffset = 0.4
self.deptLabel = DirectLabel(parent=self.frame, text='', text_font=ToontownGlobals.getSuitFont(), text_style=3, text_fg=(1, 1, 1, 1), text_scale=TTLocalizer.DPdeptLabel, text_pos=(-0.1, 0.8))
DirectFrame(parent=self.frame, relief=None, geom=gui.find('**/pipe_frame'))
self.tube = DirectFrame(parent=self.frame, relief=None, geom=gui.find('**/tube'))
DirectFrame(parent=self.frame, relief=None, geom=gui.find('**/robot/face'))
DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_cog_disguises'), geom_pos=(0, 0.1, 0))
self.meritTitle = DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_merit_progress'), geom_pos=(0, 0.1, 0))
self.meritTitle.hide()
self.cogbuckTitle = DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_cashbuck_progress'), geom_pos=(0, 0.1, 0))
self.cogbuckTitle.hide()
self.juryNoticeTitle = DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_jury_notice_progress'), geom_pos=(0, 0.1, 0))
self.juryNoticeTitle.hide()
self.stockOptionTitle = DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_stock_option_progress'), geom_pos=(0, 0.1, 0))
self.stockOptionTitle.hide()
self.progressTitle = self.meritTitle
self.promotionTitle = DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_ready4promotion'), geom_pos=(0, 0.1, 0))
self.cogName = DirectLabel(parent=self.frame, relief=None, text='', text_font=ToontownGlobals.getSuitFont(), text_scale=TTLocalizer.DPcogName, text_align=TextNode.ACenter, pos=(-0.948, 0, -1.15))
self.cogLevel = DirectLabel(parent=self.frame, relief=None, text='', text_font=ToontownGlobals.getSuitFont(), text_scale=0.09, text_align=TextNode.ACenter, pos=(-0.91, 0, -1.02))
self.partFrame = DirectFrame(parent=self.frame, relief=None)
self.parts = []
for partNum in xrange(0, 17):
self.parts.append(DirectFrame(parent=self.partFrame, relief=None, geom=gui.find('**/robot/' + PartNames[partNum])))
self.holes = []
for partNum in xrange(0, 17):
self.holes.append(DirectFrame(parent=self.partFrame, relief=None, geom=gui.find('**/robot_hole/' + PartNames[partNum])))
self.cogPartRatio = DirectLabel(parent=self.frame, relief=None, text='', text_font=ToontownGlobals.getSuitFont(), text_scale=0.08, text_align=TextNode.ACenter, pos=(-0.91, 0, -0.82))
self.cogMeritRatio = DirectLabel(parent=self.frame, relief=None, text='', text_font=ToontownGlobals.getSuitFont(), text_scale=0.08, text_align=TextNode.ACenter, pos=(0.45, 0, -0.36))
meterFace = gui.find('**/meter_face_whole')
meterFaceHalf = gui.find('**/meter_face_half')
self.meterFace = DirectLabel(parent=self.frame, relief=None, geom=meterFace, color=self.meterColor, pos=(0.455, 0.0, 0.04))
self.meterFaceHalf1 = DirectLabel(parent=self.frame, relief=None, geom=meterFaceHalf, color=self.meterActiveColor, pos=(0.455, 0.0, 0.04))
self.meterFaceHalf2 = DirectLabel(parent=self.frame, relief=None, geom=meterFaceHalf, color=self.meterColor, pos=(0.455, 0.0, 0.04))
for dept in xrange(len(SuitDNA.suitDepts)):
button = DirectButton(parent=self.frame, relief=None, pos=(-1 + self.xOffset * dept, 0, 1.05), image=icons.find(SuitDNA.suitDeptModelPaths[dept]), image_scale=0.25, image2_color=(1, 1, 1, 0.75), command=self.doTab, extraArgs=[dept])
self.buttons.append(button)
self.frame.hide()
self.activeTab = 3
self.updatePage()
def unload(self):
ShtikerPage.ShtikerPage.unload(self)
def enter(self):
self.frame.show()
ShtikerPage.ShtikerPage.enter(self)
def exit(self):
self.frame.hide()
ShtikerPage.ShtikerPage.exit(self)
def updatePage(self):
self.doTab(self.activeTab)
def updatePartsDisplay(self, index, numParts, numPartsRequired):
partBitmask = 1
groupingBitmask = CogDisguiseGlobals.PartsPerSuitBitmasks[index]
previousPart = 0
for part in self.parts:
groupingBit = groupingBitmask & partBitmask
if numParts & partBitmask & groupingBit:
part.show()
self.holes[self.parts.index(part)].hide()
if groupingBit:
previousPart = 1
elif not groupingBit and previousPart:
part.show()
self.holes[self.parts.index(part)].hide()
else:
self.holes[self.parts.index(part)].show()
part.hide()
previousPart = 0
partBitmask = partBitmask << 1
def updateMeritBar(self, dept):
merits = base.localAvatar.cogMerits[dept]
totalMerits = CogDisguiseGlobals.getTotalMerits(base.localAvatar, dept)
if totalMerits == 0:
progress = 1
else:
progress = min(merits / float(totalMerits), 1)
self.updateMeritDial(progress)
if base.localAvatar.readyForPromotion(dept):
self.cogMeritRatio['text'] = TTLocalizer.DisguisePageMeritFull
self.promotionTitle.show()
self.progressTitle.hide()
else:
self.cogMeritRatio['text'] = '%d/%d' % (merits, totalMerits)
self.promotionTitle.hide()
self.progressTitle.show()
def updateMeritDial(self, progress):
if progress == 0:
self.meterFaceHalf1.hide()
self.meterFaceHalf2.hide()
self.meterFace.setColor(self.meterColor)
elif progress == 1:
self.meterFaceHalf1.hide()
self.meterFaceHalf2.hide()
self.meterFace.setColor(self.meterActiveColor)
else:
self.meterFaceHalf1.show()
self.meterFaceHalf2.show()
self.meterFace.setColor(self.meterColor)
if progress < 0.5:
self.meterFaceHalf2.setColor(self.meterColor)
else:
self.meterFaceHalf2.setColor(self.meterActiveColor)
progress = progress - 0.5
self.meterFaceHalf2.setR(180 * (progress / 0.5))
def doTab(self, index):
self.activeTab = index
self.bkgd.setColor(DeptColors[index])
self.deptLabel['text'] = (SuitDNA.suitDeptFullnames[SuitDNA.suitDepts[index]],)
cogIndex = base.localAvatar.cogTypes[index] + SuitDNA.suitsPerDept * index
cog = SuitDNA.suitHeadTypes[cogIndex]
self.progressTitle.hide()
if SuitDNA.suitDepts[index] == 'm':
self.progressTitle = self.cogbuckTitle
elif SuitDNA.suitDepts[index] == 'l':
self.progressTitle = self.juryNoticeTitle
elif SuitDNA.suitDepts[index] == 'c':
self.progressTitle = self.stockOptionTitle
else:
self.progressTitle = self.meritTitle
self.progressTitle.show()
self.cogName['text'] = SuitBattleGlobals.SuitAttributes[cog]['name']
cogLevel = base.localAvatar.cogLevels[index]
if base.localAvatar.cogReviveLevels[self.activeTab] > -1:
cogLevel = base.localAvatar.cogReviveLevels[self.activeTab]
self.cogLevel['text_scale'] = 0.065
self.cogLevel['text'] = TTLocalizer.DisguisePageCogLevel % str(cogLevel + 1) + TTLocalizer.SkeleRevivePostFix
else:
self.cogLevel['text_scale'] = 0.09
self.cogLevel['text'] = TTLocalizer.DisguisePageCogLevel % str(cogLevel + 1)
numParts = base.localAvatar.cogParts[index]
numPartsRequired = CogDisguiseGlobals.PartsPerSuit[index]
self.updatePartsDisplay(index, numParts, numPartsRequired)
self.updateMeritBar(index)
self.cogPartRatio['text'] = '%d/%d' % (CogDisguiseGlobals.getTotalParts(numParts), numPartsRequired)
| [
"[email protected]"
] | |
0ef2bcc9c688462fe238bb21496e160b45812ca0 | 3e4b8fe54f11bf36f3615c21fdc1dca0ed00fe72 | /month04/spider/day01/text/02_tieba.py | 63d1010207234daf73d93db6df89f20b4e00ef29 | [] | no_license | leinian85/year2019 | 30d66b1b209915301273f3c367bea224b1f449a4 | 2f573fa1c410e9db692bce65d445d0543fe39503 | refs/heads/master | 2020-06-21T20:06:34.220046 | 2019-11-04T06:37:02 | 2019-11-04T06:37:02 | 197,541,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | from urllib import request
from urllib import parse
import random
import time
from fake_useragent import UserAgent
import re
class TiebaSpider:
def __init__(self):
self.url = "http://tieba.baidu.com/f?kw={}&pn={}"
def set_headers(self):
us = UserAgent()
self.headers = {"User-Agent":us.random}
# 获取响应内容
def get_page(self, url):
self.set_headers()
req = request.Request(url=url, headers=self.headers)
res = request.urlopen(req)
html = res.read().decode()
return html
# 解析,提取数据
def parse_page(self, name):
return request.quote(name)
# 保存数据
def write_page(self, filename, html):
with open(filename, 'w') as f:
f.write(html)
def run(self, name, start, end):
for page in range(start, end + 1):
url = self.url.format(self.parse_page(name), (page - 1) * 50)
filename = name + "_" + str(page) + ".html"
html = self.get_page(url)
print(url)
# imgs = self.get_imgs(html)
self.write_page(filename, html)
print("第{}页抓取成功".format(page))
time.sleep(random.randint(1, 3))
def get_imgs(self, html):
pattern = re.compile("",re.S)
if __name__ == "__main__""":
begin = time.time()
spider = TiebaSpider()
spider.run("赵丽颖", 1, 3)
stop = time.time()
print("执行时间%.2f" % (stop - begin))
| [
"[email protected]"
] | |
8d1ae267007951a34533aae1e23eb15ed57cf4ee | 0d5e40b598ee3ad2c0575a45857df49457a99cc7 | /june/handlers/api.py | d6ab577e83fa85e62f43ad5ac12f2b2f71770f36 | [
"BSD-3-Clause"
] | permissive | mitnk/june | 7fdf928b7de452911a9d683bc50ed52a9f04085c | ab2b8e42e9b632923187333cd91af0f683c16ba6 | refs/heads/master | 2021-01-18T12:01:19.949051 | 2012-03-27T07:10:43 | 2012-03-27T07:10:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,530 | py | import math
from tornado.options import options
from june.lib.handler import BaseHandler
from june.lib.decorators import require_user
from june.models import Topic, Member, Reply
from june.models.mixin import NotifyMixin
class UpTopicHandler(BaseHandler):
"""Up a topic will increase impact of the topic,
and increase reputation of the creator
"""
@require_user
def post(self, id):
topic = self.db.query(Topic).filter_by(id=id).first()
if not topic:
self.send_error(404)
return
user_id = self.current_user.id
if topic.user_id == user_id:
# you can't vote your own topic
dct = {'status': 'fail', 'msg': 'cannot up vote your own topic'}
self.write(dct)
return
if user_id in topic.down_users:
# you can't up and down vote at the same time
dct = {'status': 'fail', 'msg': 'cannot up vote your down topic'}
self.write(dct)
return
creator = self.db.query(Member).filter_by(id=topic.user_id).first()
up_users = list(topic.up_users)
if user_id in up_users:
up_users.remove(user_id)
topic.ups = ','.join(str(i) for i in up_users)
topic.impact -= self._calc_topic_impact()
creator.reputation -= self._calc_user_impact()
self.db.add(creator)
self.db.add(topic)
self.db.commit()
dct = {'status': 'ok'}
dct['data'] = {'action': 'cancel', 'count': len(up_users)}
self.write(dct)
self.cache.delete('topic:%s' % str(id))
return
up_users.append(user_id)
topic.ups = ','.join(str(i) for i in up_users)
topic.impact += self._calc_topic_impact()
creator.reputation += self._calc_user_impact()
self.db.add(topic)
self.db.add(creator)
self.db.commit()
dct = {'status': 'ok'}
dct['data'] = {'action': 'active', 'count': len(up_users)}
self.write(dct)
self.cache.delete('topic:%s' % str(id))
return
def _calc_topic_impact(self):
if self.current_user.reputation < 2:
return 0
factor = int(options.up_factor_for_topic)
return factor * int(math.log(self.current_user.reputation))
def _calc_user_impact(self):
if self.current_user.reputation < 2:
return 0
factor = int(options.up_factor_for_user)
impact = factor * int(math.log(self.current_user.reputation))
return min(impact, int(options.up_max_for_user))
class DownTopicHandler(BaseHandler):
"""Down a topic will reduce impact of the topic,
and decrease reputation of the creator
"""
@require_user
def post(self, id):
topic = self.db.query(Topic).filter_by(id=id).first()
if not topic:
self.send_error(404)
return
user_id = self.current_user.id
if topic.user_id == user_id:
# you can't vote your own topic
dct = {'status': 'fail', 'msg': "cannot down vote your own topic"}
self.write(dct)
return
if user_id in topic.up_users:
# you can't down and up vote at the same time
dct = {'status': 'fail', 'msg': "cannot down vote your up topic"}
self.write(dct)
return
creator = self.db.query(Member).filter_by(id=topic.user_id).first()
down_users = list(topic.down_users)
if user_id in down_users:
#TODO: can you cancel a down vote ?
down_users.remove(user_id)
topic.downs = ','.join(str(i) for i in down_users)
topic.impact += self._calc_topic_impact()
creator.reputation += self._calc_user_impact()
self.db.add(creator)
self.db.add(topic)
self.db.commit()
dct = {'status': 'ok'}
dct['data'] = {'action': 'cancel', 'count': len(down_users)}
self.write(dct)
self.cache.delete('topic:%s' % str(id))
return
down_users.append(user_id)
topic.downs = ','.join(str(i) for i in down_users)
topic.impact -= self._calc_topic_impact()
creator.reputation -= self._calc_user_impact()
self.db.add(creator)
self.db.add(topic)
self.db.commit()
dct = {'status': 'ok'}
dct['data'] = {'action': 'active', 'count': len(down_users)}
self.write(dct)
self.cache.delete('topic:%s' % str(id))
return
def _calc_topic_impact(self):
if self.current_user.reputation < 2:
return 0
factor = int(options.down_factor_for_topic)
return factor * int(math.log(self.current_user.reputation))
def _calc_user_impact(self):
if self.current_user.reputation < 2:
return 0
factor = int(options.down_factor_for_user)
impact = factor * int(math.log(self.current_user.reputation))
return min(impact, int(options.down_max_for_user))
class AcceptReplyHandler(BaseHandler, NotifyMixin):
"""Vote for a reply will affect the topic impact and reply user's
reputation
"""
def _is_exist(self, topic_id, reply_id):
reply = self.db.query(Reply).filter_by(id=reply_id).first()
if not reply or reply.topic_id != int(topic_id):
return False
topic = self.db.query(Topic).filter_by(id=topic_id).first()
if not topic:
return False
return reply, topic
def _calc_user_impact(self):
if self.current_user.reputation < 2:
return 0
factor = int(options.accept_reply_factor_for_user)
impact = factor * int(math.log(self.current_user.reputation))
return min(impact, int(options.vote_max_for_user))
def post(self, topic_id, reply_id):
reply_topic = self._is_exist(topic_id, reply_id)
if not reply_topic:
self.send_error(404)
return
reply, topic = reply_topic
user_id = self.current_user.id
if user_id != topic.user_id:
dct = {'status': 'fail', 'msg': 'you are not topic owner'}
self.write(dct)
return
if user_id == reply.user_id:
dct = {'status': 'fail', 'msg': 'cannot accept your own reply'}
self.write(dct)
return
creator = self.db.query(Member).filter_by(id=reply.user_id).first()
if reply.accepted == 'y':
creator.reputation -= self._calc_user_impact()
reply.accepted = 'n'
self.db.add(creator)
self.db.add(reply)
self.db.commit()
self.cache.delete('ReplyListModule:%s:1' % topic.id)
dct = {'status': 'ok', 'data': 'cancel'}
self.write(dct)
return
creator.reputation += self._calc_user_impact()
reply.accepted = 'y'
self.db.add(reply)
self.db.add(creator)
link = '/topic/%s' % topic.id
self.create_notify(reply.user_id, topic.title, reply.content,
link, 'accept')
self.db.commit()
self.cache.delete('ReplyListModule:%s:1' % topic.id)
dct = {'status': 'ok', 'data': 'active'}
self.write(dct)
return
handlers = [
('/api/topic/(\d+)/up', UpTopicHandler),
('/api/topic/(\d+)/down', DownTopicHandler),
('/api/topic/(\d+)/(\d+)/accept', AcceptReplyHandler),
]
| [
"[email protected]"
] | |
fa45fa3e4460095d824552aa35a17268abf6e8d3 | 481ce69bd3611715fef0be99c655c95d67f16d5f | /riopy/tests/test_halocell.py | bb4e9a4da694b436c60b341228f6c83519a79a89 | [
"BSD-3-Clause"
] | permissive | fsimkovic/riopy | 0ffed18c72573e824affa97d5c17ca462c5f2031 | 5dc4083d1c0919d94ceeac802d3fb40748b947f3 | refs/heads/master | 2021-03-24T10:14:25.904758 | 2018-08-30T13:16:02 | 2018-08-30T13:16:02 | 117,836,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,868 | py |
import unittest
from riopy.halocell import HaloCell
class HaloCellTest(unittest.TestCase):
def test_ops_1(self):
mate_ops = HaloCell.ops(0)
self.assertListEqual([(0, 0, 0)], mate_ops)
def test_ops_2(self):
mate_ops = HaloCell.ops(1)
true_ops = [(0, 1, 1), (1, -1, -1), (1, 0, 1), (1, -1, 0), (-1, 1, -1), (1, 1, -1), (0, 0, -1), (0, -1, -1), (1,
0, 0), (-1, 0, 1), (-1, 0, 0), (-1, 0, -1), (0, 0, 1), (0, -1, 1), (0, 0, 0), (0, -1, 0), (1, 1, 1), (-1, 1,
0), (1, 1, 0), (-1, 1, 1), (-1, -1, 1), (1, 0, -1), (-1, -1, 0), (1, -1, 1), (0, 1, 0), (0, 1, -1), (-1,
-1, -1)]
self.assertListEqual(true_ops, mate_ops)
def test_ops_3(self):
mate_ops = HaloCell.ops(2)
true_ops = [(2, 1, -1), (1, -1, -2), (-1, -1, -2), (2, 2, -2), (0, 2, 1), (2, 2, 2), (-2, 0, 1), (2, -1, 2), (0,
0, -1), (2, 0, 1), (1, 2, 0), (-2, -1, 1), (-2, 1, -2), (-2, 1, 0), (1, 2, 2), (-1, 0, 0), (-1, 2, 1), (2,
-1, 1), (-1, 1, 1), (1, 2, -2), (-2, 1, 1), (2, 1, -2), (1, -1, 0), (-1, -2, -1), (2, 0, 2), (0, 1, 0),
(1, -1, 2), (-1, -1, -1), (-2, -1, -2), (-2, 2, -1), (-1, 2, 0), (0, 1, 2), (-2, -1, -1), (1, 0, 1), (1, 0,
-1), (1, -2, 0), (-1, 1, -2), (2, -1, -2), (-2, -2, 0), (-1, 0, 2), (-2, 1, -1), (-2, 0, 0), (2, 2, 1),
(1, -2, 1), (2, 2, -1), (-2, 2, 2), (2, 0, -1), (0, 0, 1), (-2, 2, -2), (-2, -2, 1), (1, 1, -2), (0, -2, 0),
(-2, 0, -2), (-2, -2, -1), (1, 1, 0), (0, -2, 2), (-1, -1, 1), (1, -2, 2), (2, 1, 1), (0, 2, -2), (1, 0,
-2), (1, 1, 2), (0, -1, 0), (-2, 2, 1), (2, -2, 2), (0, 2, 2), (0, 2, 0), (1, 1, -1), (0, 0, -2), (0,
-1, -2), (1, -2, -1), (0, -2, 1), (-2, 0, -1), (2, 0, 0), (1, 2, 1), (-1, -2, 2), (-1, 0, -2), (-1,
1, 2), (0, -1, 2), (-1, -2, 0), (-1, 2, -2), (-1, 2, 2), (-1, 1, 0), (0, -2, -2), (2, -2, 0),
(-1, -2, 1), (1, -1, 1), (1, 0, 2), (0, 1, -2), (0, 1, 1), (0, -1, -1), (-1, 1, -1), (1, -1, -1),
(-2, 2, 0), (2, -1, -1), (1, 0, 0), (-1, -1, 0), (-2, -2, -2), (-2, -2, 2), (-1, 2, -1), (-2, -1,
2), (-1, 0, 1), (0, 2, -1), (2, -2, -1), (0, -2, -1), (1, 2, -1), (-1, -2, -2), (1, -2, -2),
(-1, 0, -1), (0, 0, 2), (2, 2, 0), (0, -1, 1), (0, 0, 0), (2, 1, 2), (0, 1, -1), (1, 1, 1), (-2, 0,
2), (2, 0, -2), (2, -2, 1), (-1, -1, 2), (2, 1, 0), (2, -2, -2), (-2, 1, 2), (-2, -1, 0), (2,
-1, 0)]
self.assertListEqual(true_ops, mate_ops)
def test_ops_4(self):
mate_ops = HaloCell.ops(3)
true_ops = [(3, 0, 3), (1, -3, 1), (1, -2, -2), (0, 2, 1), (-1, 3, 2), (-2, 3, 0), (3, -2, -3), (0, 3, -1), (1,
2, 0), (2, -3, -2), (-3, -3, -1), (3, -2, -1), (1, 1, -3), (0, -3, 0), (-2, 0, 0), (0, 0, -2), (-1, 1, 1),
(-1, 2, 2), (1, -1, 0), (0, 1, -3), (0, 1, 0), (-1, -1, -1), (1, 0, 3), (2, -2, 0), (-3, -3, -2), (0, -2,
-3), (-2, 1, -1), (1, 2, -3), (1, -1, 1), (3, 1, -3), (-2, 3, 2), (-3, 2, 3), (-1, 0, 2), (-2, 1, 2),
(0, -2, 0), (1, 1, 0), (3, 0, 1), (-2, -1, 1), (0, 1, -1), (-3, 3, -2), (2, -3, -1), (3, -3, 2), (1, -3, 0),
(-2, -2, 3), (0, 2, 0), (2, 1, -3), (-2, -3, 3), (1, -2, -1), (0, -3, 1), (3, 1, 3), (1, 2, 1), (2, -3, 0),
(0, 2, -2), (-2, 1, 3), (-1, 1, -1), (1, -2, 1), (0, 1, -2), (-3, -2, 2), (0, 1, 1), (1, -1, -2), (0, -1,
-3), (3, 3, 0), (1, 0, 0), (1, -3, -2), (0, -1, 2), (1, 2, -2), (2, 2, -1), (3, -1, 2), (0, -3, -1),
(-3, -1, -3), (-3, -2, 0), (-2, -3, 2), (0, -2, 3), (2, -2, 3), (1, 1, 3), (-2, -2, 0), (-3, 3, -3), (3, 0,
2), (-2, 1, -3), (3, 0, 0), (3, -3, 1), (-1, -1, -2), (3, -1, -3), (-1, -2, -2), (1, -3, 3), (0, 2, 3),
(-1, 0, 1), (2, 0, 1), (0, -3, 2), (3, 1, 2), (1, 2, 2), (-2, 3, 3), (-2, 2, 3), (2, -1, 1), (3, -3, 0),
(-3, -2, -1), (1, 2, -1), (-2, 1, 1), (-2, -1, -2), (-1, 0, -1), (1, -1, 2), (2, 3, 3), (2, -2, -1), (0, 1,
2), (-2, -1, -3), (1, 0, 1), (1, 0, -1), (-2, 1, -2), (-2, 3, 1), (2, -3, 1), (-2, -1, 0), (1, 3, -2),
(-1, -2, -3), (0, 3, 0), (-3, 0, 3), (2, 2, 0), (0, 3, 1), (-1, -2, 1), (1, 1, -1), (-1, 0, -2), (-2, -2,
-1), (-1, -3, 3), (0, -2, 2), (0, 2, -1), (1, -2, 0), (1, 1, 2), (3, -2, 2), (-3, 0, 1), (2, 1, -2),
(-1, -3, 2), (-3, -3, 1), (1, -2, -3), (-3, 3, 0), (0, -1, -2), (2, 2, -3), (1, 1, -2), (-1, 3, -1), (0, -1,
3), (0, 2, 2), (2, 0, 0), (0, -3, 3), (3, 1, 1), (1, 2, 3), (-1, -3, -1), (-3, -3, -3), (-1, 2, -1), (2,
-1, 2), (0, -2, -2), (2, 3, 0), (-1, 0, -3), (-3, 1, -3), (-1, 1, 0), (1, -2, 3), (-1, -1, 3), (0,
1, 3), (0, 3, -3), (-1, -2, 0), (-3, 0, -1), (-1, -2, 3), (3, 2, 1), (-3, 0, 0), (3, 2, -2), (3,
2, -1), (0, -2, -1), (-1, -1, 2), (-3, -1, 1), (-1, 3, -2), (-2, 2, -2), (3, -3, -3), (-1,
1, -3), (-3, -2, -3), (3, 3, -3), (3, 3, -1), (3, -1, 3), (-1, 3, 0), (-2, -2, 2), (-3,
-1, 3), (-2, 2, 2), (0, 0, -1), (3, 2, 2), (-2, -2, 1), (-2, 1, 0), (3, -1, 1), (2,
0, 3), (3, 1, 0), (3, -3, -1), (2, -2, -3), (-3, 1, 0), (-3, -2, -2), (-3, 1,
-2), (-1, 2, 1), (-2, -1, 2), (2, 1, -1), (1, 3, -1), (1, 3, 3), (2, 2, -2),
(-1, 2, -3), (0, 3, 3), (2, 1, 3), (1, 0, -2), (2, 3, 2), (2, 0, -1), (1, 0,
-3), (-3, 2, 1), (0, 0, 3), (2, -2, -2), (-3, -1, -1), (-3, 1, -1), (-3, -1,
-2), (0, -3, -3), (2, 1, 1), (3, -1, -1), (0, -1, 0), (3, 0, -1), (2, 3,
-2), (1, -1, 3), (-1, -1, 1), (3, -2, -2), (-1, -3, -3), (0, 3, -2),
(0, 0, -3), (3, 2, 3), (-2, 0, -1), (-2, 0, -3), (2, 0, 2), (2, -3, 2),
(-1, -1, 0), (-3, 1, 3), (-1, 2, 0), (-2, -3, -2), (2, 2, 1), (1, 3,
-3), (-2, -3, -1), (-3, 2, -2), (-3, -2, 3), (0, -1, -1), (1, 3, 2),
(2, -1, 0), (-3, -3, 0), (2, 0, -2), (1, -3, -1), (2, -1, -1), (2, -3,
-3), (3, -1, -2), (2, 3, 1), (-2, 0, 1), (-1, 2, -2), (3, -3, -2),
(2, -1, -3), (-3, 2, 2), (0, 0, 2), (2, 1, 0), (3, 0, -3), (-3, -1, 0),
(0, -3, -2), (3, 3, 2), (-2, 3, -3), (-3, -2, 1), (-1, 2, 3), (-3, 3,
2), (3, -2, 1), (2, 0, -3), (-1, -3, -2), (2, -3, 3), (3, -1, 0),
(2, 1, 2), (2, -1, 3), (-1, -3, 0), (3, 2, 0), (-3, 2, 0), (-3, 0, -3),
(3, 1, -1), (-1, 1, 3), (-3, 1, 2), (-3, 0, -2), (-2, -1, -1), (-3, 1,
1), (-2, 0, 3), (3, -2, 0), (3, 3, -2), (-1, -2, -1), (-2, 2, -1),
(-3, -3, 3), (-2, -3, 0), (3, 0, -2), (-1, 0, 0), (-2, -3, -3), (-1, 3,
-3), (-2, -2, -2), (2, -2, 2), (3, 2, -3), (0, 0, 1), (0, 2, -3),
(2, 2, 3), (1, -2, 2), (-1, 3, 3), (3, 3, 1), (-3, 2, -1), (-3, -1, 2),
(2, -2, 1), (-1, -1, -3), (-2, -1, 3), (-1, 3, 1), (1, 1, 1), (-2, -2,
-3), (1, -1, -3), (-3, 0, 2), (-3, 3, -1), (-1, -2, 2), (1, 3, 1),
(2, 3, -3), (-3, 2, -3), (-2, 3, -2), (-2, 3, -1), (2, 3, -1), (-2, 2,
1), (-2, -3, 1), (1, -3, 2), (1, 0, 2), (-1, 1, -2), (1, 3, 0), (2,
-1, -2), (-2, 2, -3), (-3, -3, 2), (3, 1, -2), (0, 3, 2), (-1,
0, 3), (3, 3, 3), (1, -1, -1), (-2, 0, -2), (-3, 3, 1), (0,
-2, 1), (0, 0, 0), (1, -3, -3), (-3, 3, 3), (-2, 0, 2),
(2, 2, 2), (0, -1, 1), (-1, 1, 2), (3, -2, 3), (-1, -3, 1),
(3, -3, 3), (-2, 2, 0)]
self.assertListEqual(true_ops, mate_ops)
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
"[email protected]"
] | |
c44a12d77ff12294af2fe0d956eadc83432d93a5 | 77900cdd9a815caf1cd04705321ca93f5072179f | /Project/.history/main_20211116163259.py | e8cab25e7e1da7157cd5e01b04d2ba8c87d33c20 | [] | no_license | Bom19990111/helloword_python | 717799d994223d65de5adaeabecf396ff2bc1fb7 | 2ee2e67a60043f03c1ce4b070470c7d2dcdc72a7 | refs/heads/master | 2023-09-06T04:17:02.057628 | 2021-11-21T20:00:46 | 2021-11-21T20:00:46 | 407,063,273 | 0 | 1 | null | 2021-11-21T20:00:47 | 2021-09-16T07:18:35 | Python | UTF-8 | Python | false | false | 2,680 | py | import product as p
def choose():
action = 0
while action >= 0:
if action == 1:
p.AddProduct()
print("--------------------------------")
elif action == 2:
p.DeleteProduct()
elif action == 3:
p.UpdateProduct()
elif action == 4:
p.ShowAllProduct()
elif action == 5:
p.FindProductByName()
elif action == 6:
p.SortProductNameA_Z()
print("Đã sắp xếp thành công! Vui lòng chọn số 4 để xem kết quả".upper())
print("********************************")
elif action == 7:
p.SortProductNameZ_A()
print("Đã sắp xếp thành công! Vui lòng chọn số 4 để xem kết quả".upper())
print("********************************")
elif action == 8:
p.SortPriceAsc()
print("Đã sắp xếp thành công! Vui lòng chọn số 4 để xem kết quả".upper())
print("********************************")
elif action == 9:
p.SortPriceDesc()
print("Đã sắp xếp thành công! Vui lòng chọn số 4 để xem kết quả".upper())
print("********************************")
elif action == 10:
p.ImportExecel()()
elif action == 11:
p.ExportExecl()
print("Vui lòng chọn chức năng bạn muốn: ")
print("0. Thoát khỏi chương trình. ")
print("1. Thêm mới sản phẩm. ")
print("2. Xóa sản phẩm. ")
print("3. Cập nhật thông tin sản phẩm. ")
print("4. Xem danh sách tất cả sản phẩm. ")
print("5. Tìm kiếm sản phẩm theo tên hoặc theo thương hiệu. ")
print("6. Sắp xếp tên sản phẩm A-Z. ")
print("7. Sắp xếp tên sản phẩm Z-A. ")
print("8. Sắp xếp giá sản phẩm tăng dần. ")
print("9. Sắp xếp tên sản phẩm giảm dần. ")
print("10. Import file excel. ")
print("11. Export file excel. ")
try:
action = int(input("Bạn chọn chức năng? "))
except ValueError:
if action == 12:
print("Không có chức năng bạn chọn, mời chọn lại!".upper())
else:
print("Không có chức năng bạn chọn, mời chọn lại!".upper())
try:
choose()
except:
print("Dừng chương trình!")
if action == 0:
print("Đã thoát chương trình")
break
choose()
| [
"[email protected]"
] | |
a6793fef67f7d299ac2575aa4f3fdafd5fc99bae | 89b0920101eaf09b0afb9a5449f3fabd68ac21c1 | /analysis-blocks/scripts/SlowAdder.py | fd81750d4252600627d344e3bba39799ecfc459f | [] | no_license | metamorph-inc/openmeta-examples-and-templates | cffdcecf8b4fca1ea8ae4f8880a5f2720ec05f4b | 4f6cc54510c742b9a3bf39338a5a01df510c1243 | refs/heads/master | 2023-04-10T06:25:28.691426 | 2020-03-12T23:20:36 | 2020-03-12T23:20:36 | 91,151,238 | 7 | 3 | null | 2018-12-03T23:01:19 | 2017-05-13T05:17:18 | Python | UTF-8 | Python | false | false | 825 | py | from __future__ import print_function
from openmdao.api import IndepVarComp, Component, Problem, Group, FileRef
import numpy as np
import time
class SlowAdder(Component):
def __init__(self):
super(SlowAdder, self).__init__()
self.add_param("x", val=0.0)
self.add_param("y", val=0.0)
self.add_output("z", val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
time.sleep(10)
unknowns["z"] = params["x"] + params["y"]
def main():
top = Problem()
root = top.root = Group()
root.add('Input', IndepVarComp([('x', 1.0), ('y', 2.0)]))
root.add('p', SlowAdder())
root.connect('Input.x', 'p.x')
root.connect('Input.y', 'p.y')
top.setup()
top.run()
print('z =', root.p.unknowns['z'])
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
892d1f7a730ca334b28fb0d84012c7de626b560d | e2ef58aa444e9e97ed26ef52bc69ac8bd79bc93e | /podoc/tests/test_utils.py | 4e8a2b8169b447a65c22f3dda2df52035c514a17 | [
"BSD-3-Clause"
] | permissive | willingc/podoc | 1dc1c18f40e06b47760f0f6227ec70bd2dd1d19e | 7021aec70d47a8a3a934c5799828f412e38b7c23 | refs/heads/master | 2021-01-17T10:04:47.414887 | 2016-04-02T09:56:55 | 2016-04-02T09:56:55 | 53,595,629 | 0 | 0 | null | 2016-03-10T15:32:25 | 2016-03-10T15:32:23 | Python | UTF-8 | Python | false | false | 2,014 | py | # -*- coding: utf-8 -*-
"""Test utility functions."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import json
import logging
import os.path as op
from pytest import mark, raises
from ..utils import (Bunch, Path, load_text, dump_text, _get_file,
assert_equal,
pandoc, has_pandoc, get_pandoc_formats)
logger = logging.getLogger(__name__)
require_pandoc = mark.skipif(not(has_pandoc()),
reason='pypandoc is not available')
#------------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------------
def test_bunch():
obj = Bunch()
obj['a'] = 1
assert obj.a == 1
obj.b = 2
assert obj['b'] == 2
assert obj.copy().a == 1
def test_path():
print(Path(__file__))
assert Path(__file__).exists()
def test_assert_equal():
assert_equal([0], [0])
assert_equal({'a': 1, 'b': [2, 3], '_c': 0},
{'a': 1, 'b': [2, 3], '_c': 1})
with raises(AssertionError):
assert_equal({'a': 1, 'b': [2, 3], '_c': 0},
{'a': 1, 'b': [2, 4], '_c': 0})
#------------------------------------------------------------------------------
# Test file I/O
#------------------------------------------------------------------------------
def test_open_dump_text(tempdir):
path = op.join(tempdir, 'test.txt')
dump_text('hello *world*', path)
assert load_text(path) == 'hello *world*'
assert _get_file(path, 'r').read() == 'hello *world*'
with open(path, 'r') as f:
assert _get_file(f, 'r').read() == 'hello *world*'
def test_pandoc():
out = pandoc('hello *world*', 'json', format='markdown')
assert isinstance(json.loads(out), list)
sl, tl = get_pandoc_formats()
assert 'markdown' in sl
assert 'markdown' in tl
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.