blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
abd7adc1822c7a3ded2bfbb351e303bc38039614 | 99a310f6bb6c7a6c728f1b3ae78054487372042d | /aoc2019/intcode/state_machine.py | b68372737c28f105cbb818391176e19138743da5 | []
| no_license | jepebe/aoc2018 | 46ce6b46479a0faf2c2970413af14a071dcfdb79 | 4bf91b99bec4b59529533ef70f24bf6496bada99 | refs/heads/master | 2023-01-11T16:44:42.125394 | 2023-01-06T06:27:14 | 2023-01-06T06:27:14 | 159,912,721 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,926 | py | from collections import defaultdict
def get_address(state_machine, parameter, write_mode=False):
mode = state_machine['parameter_modes'][parameter]
pos = state_machine['pos']
if mode == 0:
addr = state_machine['instructions'][pos]
elif mode == 1:
if write_mode:
print('Writing in immediate mode?')
addr = pos
elif mode == 2:
addr = state_machine['instructions'][pos]
relative_pos = state_machine['relative_pos']
addr = addr + relative_pos
else:
raise ('Unknown addressing mode %i for read' % mode)
return addr
def read(state_machine, parameter):
addr = get_address(state_machine, parameter)
state_machine['pos'] += 1
if addr >= len(state_machine['instructions']):
return state_machine['memory'][addr]
else:
return state_machine['instructions'][addr]
def write(state_machine, parameter, value):
addr = get_address(state_machine, parameter, write_mode=True)
state_machine['pos'] += 1
if addr >= len(state_machine['instructions']):
state_machine['memory'][addr] = value
else:
state_machine['instructions'][addr] = value
def add(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
write(state_machine, 2, a + b)
def multiply(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
write(state_machine, 2, a * b)
def get_input(state_machine):
if len(state_machine['input']) == 0:
state_machine['wait'] = True
state_machine['pos'] -= 1
state_machine['instruction_count'] -= 1
else:
data = state_machine['input'].pop(0)
write(state_machine, 0, data)
def output(state_machine):
value = read(state_machine, 0)
state_machine['output'].append(value)
if state_machine['output_enabled']:
print('Output from state machine %s' % value)
def jump_if_true(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
if a != 0:
state_machine['pos'] = b
def jump_if_false(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
if a == 0:
state_machine['pos'] = b
def less_than(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
write(state_machine, 2, 1 if a < b else 0)
def equals(state_machine):
a = read(state_machine, 0)
b = read(state_machine, 1)
write(state_machine, 2, 1 if a == b else 0)
def adjust_relative(state_machine):
a = read(state_machine, 0)
state_machine['relative_pos'] += a
def halt(state_machine):
state_machine['halt'] = True
# print('Instruction count: %i' % state_machine['instruction_count'])
def create_state_machine(instructions):
return {
'instructions': list(instructions),
'backup_instructions': list(instructions),
'memory': defaultdict(int),
'operation': 0,
'parameter_modes': [0],
'pos': 0,
'relative_pos': 0,
'instruction_count': 0,
'input': [],
'output': [],
'last_output': None,
'output_enabled': False,
'opcodes': {
1: add,
2: multiply,
3: get_input,
4: output,
5: jump_if_true,
6: jump_if_false,
7: less_than,
8: equals,
9: adjust_relative,
99: halt
},
'halt': False,
'wait': False
}
def reset_state_machine(state_machine):
state_machine['instructions'] = list(state_machine['backup_instructions'])
state_machine['memory'] = defaultdict(int)
state_machine['operation'] = 0
state_machine['parameter_modes'] = [0]
state_machine['pos'] = 0
state_machine['relative_pos'] = 0
state_machine['instruction_count'] = 0
state_machine['input'] = []
state_machine['output'] = []
state_machine['last_output'] = None
state_machine['output_enabled'] = False
state_machine['halt'] = False
state_machine['wait'] = False
def parse(state_machine):
pos = state_machine['pos']
opcode = state_machine['instructions'][pos]
op = opcode % 100
p1 = ((opcode - op) // 100) % 10
p2 = ((opcode - op) // 1000) % 10
p3 = ((opcode - op) // 10000) % 10
state_machine['operation'] = state_machine['opcodes'][op]
state_machine['parameter_modes'] = [p1, p2, p3]
state_machine['pos'] += 1
def run_state_machine(state_machine):
while not state_machine['halt'] and not state_machine['wait']:
parse(state_machine)
operation = state_machine['operation']
operation(state_machine)
state_machine['instruction_count'] += 1
def add_input(state_machine, data):
state_machine['input'].append(data)
if state_machine['wait']:
state_machine['wait'] = False
def get_output(state_machine):
if not has_output(state_machine):
raise UserWarning('No output available!')
state_machine['last_output'] = state_machine['output'][0]
return state_machine['output'].pop(0)
def has_output(state_machine):
return len(state_machine['output']) > 0
def get_last_output(state_machine):
return state_machine['last_output']
def flush_output(state_machine):
while has_output(state_machine):
get_output(state_machine)
def load_instructions(filename):
with open(filename) as f:
instructions = f.readline().split(',')
instructions = [int(x) for x in instructions]
return instructions
def load_state_machine(filename):
instructions = load_instructions(filename)
return create_state_machine(instructions)
def is_running(state_machine):
return not state_machine['halt']
def print_output(state_machine):
import sys
while has_output(state_machine):
v = get_output(state_machine)
sys.stdout.write(str(v) if v > 255 else chr(v))
| [
"[email protected]"
]
| |
64cddf5250ac60f94ef5c62aedfa3eb120d3e5f8 | 8ca70628ca811e08fb77b8e251fc8e5049486a65 | /airbyte-integrations/bases/base-python/base_python/cdk/streams/exceptions.py | 6727216dd5dd50496241a0890070cb87439e8f82 | [
"MIT"
]
| permissive | Tana8M/airbyte | a19544d2f7997ec7551793f7077d3e02bfe6ac84 | 49296ef657be272684c7259ed0d6be06e574dbe1 | refs/heads/master | 2023-04-15T15:04:22.849307 | 2021-04-23T23:12:55 | 2021-04-23T23:12:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | """
MIT License
Copyright (c) 2020 Airbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Union
import requests
class BaseBackoffException(requests.exceptions.HTTPError):
pass
class UserDefinedBackoffException(BaseBackoffException):
"""
An exception that exposes how long it attempted to backoff
"""
def __init__(self, backoff: Union[int, float], request: requests.PreparedRequest, response: requests.Response):
"""
:param backoff: how long to backoff in seconds
:param request: the request that triggered this backoff exception
:param response: the response that triggered the backoff exception
"""
self.backoff = backoff
super().__init__(request=request, response=response)
class DefaultBackoffException(BaseBackoffException):
pass
| [
"[email protected]"
]
| |
e9a4c0ee8774a16092863b3972e7e903593cac32 | 492cb86b533bc74962a0e25ad190dab131f7cb09 | /humanScape/urls.py | d66fdef307d8290976f7ee67668986092280f3c9 | []
| no_license | acdacd66/humanscape | 75f27815f6c1ac5975b3822e5abc5738aa9b3118 | 6fbeeca3346569c7f861bbffcbec731a6a9d6e51 | refs/heads/main | 2023-09-02T01:55:49.806746 | 2021-11-16T17:29:36 | 2021-11-16T17:29:36 | 428,570,173 | 0 | 1 | null | 2021-11-16T11:22:32 | 2021-11-16T08:10:30 | Python | UTF-8 | Python | false | false | 820 | py | """humanScape URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path("clinical/", include("clinicalInformation.urls")),
]
| [
"[email protected]"
]
| |
3daab6c956e8d126316ecdb6ef6e71d8af6a258d | 1c8a1b7cfb5c78fe94c4cc62a78dbfff96161924 | /day05/test04.py | 7715b05a49b005d9cad71dc19124fa6797945c72 | []
| no_license | WHUTyuen/PIL_opencv | d264858f0eaa4ecc555747efd5f277f48a432b91 | 3ae6e7d878215866c304e64eac05bf1011ecb428 | refs/heads/main | 2023-01-01T14:00:33.331676 | 2020-11-01T11:35:18 | 2020-11-01T11:35:18 | 309,072,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | import cv2
import numpy as np
A = cv2.imread('3.jpg')
B = cv2.imread('4.jpg')
G = A.copy()
gpA = [G]
for i in range(6):
G = cv2.pyrDown(G)
gpA.append(G)
G = B.copy()
gpB = [G]
for i in range(6):
G = cv2.pyrDown(G)
gpB.append(G)
# generate Laplacian Pyramid for A
lpA = [gpA[5]]
for i in range(5, 0, -1):
GE = cv2.pyrUp(gpA[i])
L = cv2.subtract(gpA[i - 1], GE)
lpA.append(L)
# generate Laplacian Pyramid for B
lpB = [gpB[5]]
for i in range(5, 0, -1):
GE = cv2.pyrUp(gpB[i])
L = cv2.subtract(gpB[i - 1], GE)
lpB.append(L)
# Now add left and right halves of images in each level
LS = []
for la, lb in zip(lpA, lpB):
rows, cols, dpt = la.shape
ls = np.hstack((la[:, 0:cols // 2], lb[:, cols // 2:]))
LS.append(ls)
# now reconstruct
ls_ = LS[0]
for i in range(1, 6):
ls_ = cv2.pyrUp(ls_)
ls_ = cv2.add(ls_, LS[i])
# image with direct connecting each half
real = np.hstack((A[:, :cols // 2], B[:, cols // 2:]))
cv2.imshow('Pyramid_blending.jpg', ls_)
cv2.imshow('Direct_blending.jpg', real)
cv2.waitKey(0)
| [
"[email protected]"
]
| |
27c38c01ec059532373e8cd03289ccde4ded2e1d | f0f3f8731145e236e8e08dafb4201108d35af488 | /wish_list_items/migrations/0007_auto_20160414_1317.py | 8478f0d9cfbb5235617279dac1587637337832db | []
| no_license | AaronScruggs/wish_list_project | 49fdfc9c3a9e72470084bbf283085c15aa659a3e | a2a741823e0a570390ce344f3407f6f3b57f2590 | refs/heads/master | 2021-01-01T05:18:10.817456 | 2016-04-19T00:36:24 | 2016-04-19T00:36:24 | 56,259,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-14 20:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wish_list_items', '0006_auto_20160414_1312'),
]
operations = [
migrations.AlterField(
model_name='wishitem',
name='item_url',
field=models.URLField(default=True, null=True),
),
]
| [
"[email protected]"
]
| |
4816b6ce56b6ba10760fc6ec50b511666a0ef942 | c0f5d309576f791f8cc062e2d0cad340eec41d7d | /3.py | 846552142673f67774ae9cc5803b41248ec09248 | []
| no_license | mjjin1214/algorithm | fa91455ab792c38d01fd210c12e53e50f516eb55 | 423119406061443939b4b966c7d9f1513544dd03 | refs/heads/master | 2020-04-22T19:31:23.981387 | 2019-04-05T07:58:10 | 2019-04-05T07:58:10 | 170,610,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | import sys
sys.stdin = open('input2.txt')
def subset(n, su):
global visit, count
if n == len(score):
if not visit & (1<<su):
visit ^= (1<<su)
count += 1
return
subset(n+1, su+score[n])
subset(n+1, su)
T = int(input())
for t in range(T):
N = int(input())
score = list(set(map(int, input().split())))
visit = count = 0
subset(0, 0)
print('#{} {}'.format(t+1, count+N-len(score))) | [
"[email protected]"
]
| |
cb1e1c4fd0adabebcd87bc33eefe453ec2df48fa | 942ee5e8d54e8ebe9c5c841fbfdd1da652946944 | /1001-1500/1443.Minimum Time to Collect All Apples in a Tree.py | e8ae7ff0deadce1de133f2d3d5feb31d43fde59a | []
| no_license | kaiwensun/leetcode | 0129c174457f32887fbca078fb448adce46dd89d | 6b607f4aae3a4603e61f2e2b7480fdfba1d9b947 | refs/heads/master | 2023-08-31T07:30:50.459062 | 2023-08-27T07:59:16 | 2023-08-27T07:59:16 | 57,526,914 | 69 | 9 | null | 2023-08-20T06:34:41 | 2016-05-01T05:37:29 | Python | UTF-8 | Python | false | false | 728 | py | from collections import defaultdict
class Solution(object):
def minTime(self, n, edges, hasApple):
"""
:type n: int
:type edges: List[List[int]]
:type hasApple: List[bool]
:rtype: int
"""
graph = defaultdict(list)
for edge in edges:
graph[edge[0]].append(edge[1])
graph[edge[1]].append(edge[0])
visited = set()
def dfs(root):
res = 0
if root not in visited:
visited.add(root)
for nbr in graph[root]:
res += dfs(nbr)
if res or hasApple[root]:
res += 2
return res
return max(0, dfs(0) - 2)
| [
"[email protected]"
]
| |
c2f2d9873572b84a36f2345329ebd77f92a88cbe | 98e1716c1c3d071b2fedef0ac029eb410f55762c | /part15-statistical-thinking-1/No04-Bee-swarm-plot.py | 0b503f7631dcaaedd5a7afe2edbda8d651de8a7c | []
| no_license | iamashu/Data-Camp-exercise-PythonTrack | 564531bcf1dff119949cbb75e1fd63d89cb2779f | c72a4e806494f0e263ced9594597dc8882c2131c | refs/heads/master | 2020-07-22T00:23:12.024386 | 2019-04-12T09:24:42 | 2019-04-12T09:24:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | #Bee swarm plot
'''
Make a bee swarm plot of the iris petal lengths. Your x-axis should contain each of the three species, and the y-axis the petal lengths. A data frame containing the data is in your namespace as df.
For your reference, the code Justin used to create the bee swarm plot in the video is provided below:
_ = sns.swarmplot(x='state', y='dem_share', data=df_swing)
_ = plt.xlabel('state')
_ = plt.ylabel('percent of vote for Obama')
plt.show()
In the IPython Shell, you can use sns.swarmplot? or help(sns.swarmplot) for more details on how to make bee swarm plots using seaborn.
Instructions
In the IPython Shell, inspect the DataFrame df using df.head(). This will let you identify which column names you need to pass as the x and y keyword arguments in your call to sns.swarmplot().
Use sns.swarmplot() to make a bee swarm plot from the DataFrame containing the Fisher iris data set, df. The x-axis should contain each of the three species, and the y-axis should contain the petal lengths.
Label the axes.
Show your plot.
'''
# code
sns.swarmplot(x='species', y='petal length (cm)', data=df)
# Label the axes
plt.xlabel('species')
plt.ylabel('petal length (cm)')
# Show the plot
plt.show() | [
"[email protected]"
]
| |
f7bb5b008461cd4f51770163a3cf7e600d784405 | 81c5c07e1144747dc0e98f8dffb287a69be1eba7 | /score_mcc_bin.py | 686c4e86fcab42e4f12a69f6f893e59e1cfe31ee | []
| no_license | twistedmove/e2e_antispoofing | acbb9ec5bc4454c1698fc355d0c0fee3bf70006e | 686dfb515b2c568a1006136f56bbaad0419f0787 | refs/heads/master | 2020-09-07T10:41:12.024794 | 2019-10-06T19:28:19 | 2019-10-06T19:28:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,454 | py | import argparse
import numpy as np
import glob
import torch
import torch.nn.functional as F
import os
from kaldi_io import read_mat_scp
import model as model_
import scipy.io as sio
from utils import compute_eer_labels, set_device, read_trials, get_freer_gpu
def prep_feats(data_):
#data_ = ( data_ - data_.mean(0) ) / data_.std(0)
features = data_.T
if features.shape[1]<50:
mul = int(np.ceil(50/features.shape[1]))
features = np.tile(features, (1, mul))
features = features[:, :50]
return torch.from_numpy(features[np.newaxis, np.newaxis, :, :]).float()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compute scores for mcc model')
parser.add_argument('--path-to-data', type=str, default='./data/feats.scp', metavar='Path', help='Path to input data')
parser.add_argument('--trials-path', type=str, default='./data/trials', metavar='Path', help='Path to trials file')
parser.add_argument('--cp-path', type=str, default=None, metavar='Path', help='Path for file containing model')
parser.add_argument('--out-path', type=str, default='./out.txt', metavar='Path', help='Path to output hdf file')
parser.add_argument('--model', choices=['lstm', 'resnet', 'resnet_pca', 'lcnn_9', 'lcnn_29', 'lcnn_9_pca', 'lcnn_29_pca', 'lcnn_9_prodspec', 'lcnn_9_icqspec', 'lcnn_9_CC', 'lcnn_29_CC', 'resnet_CC'], default='lcnn_9', help='Model arch')
parser.add_argument('--n-classes', type=int, default=-1, metavar='N', help='Number of classes for the mcc case (default: binary classification)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')
parser.add_argument('--no-output-file', action='store_true', default=False, help='Disables writing scores into out file')
parser.add_argument('--no-eer', action='store_true', default=False, help='Disables computation of EER')
parser.add_argument('--eval', action='store_true', default=False, help='Enables eval trials reading')
parser.add_argument('--ncoef', type=int, default=90, metavar='N', help='Number of cepstral coefs (default: 90)')
parser.add_argument('--init-coef', type=int, default=0, metavar='N', help='First cepstral coefs (default: 0)')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False
if args.cp_path is None:
raise ValueError('There is no checkpoint/model path. Use arg --cp-path to indicate the path!')
if os.path.isfile(args.out_path):
os.remove(args.out_path)
print(args.out_path + ' Removed')
print('Cuda Mode is: {}'.format(args.cuda))
print('Selected model is: {}'.format(args.model))
if args.cuda:
device = get_freer_gpu()
if args.model == 'lstm':
model = model_.cnn_lstm(nclasses=args.n_classes)
elif args.model == 'resnet':
model = model_.ResNet(nclasses=args.n_classes)
elif args.model == 'resnet_pca':
model = model_.ResNet_pca(nclasses=args.n_classes)
elif args.model == 'lcnn_9':
model = model_.lcnn_9layers(nclasses=args.n_classes)
elif args.model == 'lcnn_29':
model = model_.lcnn_29layers_v2(nclasses=args.n_classes)
elif args.model == 'lcnn_9_pca':
model = model_.lcnn_9layers_pca(nclasses=args.n_classes)
elif args.model == 'lcnn_29_pca':
model = model_.lcnn_29layers_v2_pca(nclasses=args.n_classes)
elif args.model == 'lcnn_9_icqspec':
model = model_.lcnn_9layers_icqspec(nclasses=args.n_classes)
elif args.model == 'lcnn_9_prodspec':
model = model_.lcnn_9layers_prodspec(nclasses=args.n_classes)
elif args.model == 'lcnn_9_CC':
model = model_.lcnn_9layers_CC(nclasses=args.n_classes, ncoef=args.ncoef, init_coef=args.init_coef)
elif args.model == 'lcnn_29_CC':
model = model_.lcnn_29layers_CC(nclasses=args.n_classes, ncoef=args.ncoef, init_coef=args.init_coef)
elif args.model == 'resnet_CC':
model = model_.ResNet_CC(nclasses=args.n_classes, ncoef=args.ncoef, init_coef=args.init_coef)
print('Loading model')
ckpt = torch.load(args.cp_path, map_location = lambda storage, loc: storage)
model.load_state_dict(ckpt['model_state'], strict=False)
model.eval()
print('Model loaded')
print('Loading data')
if args.eval:
test_utts = read_trials(args.trials_path, eval_=args.eval)
else:
test_utts, attack_type_list, label_list = read_trials(args.trials_path, eval_=args.eval)
data = { k:m for k,m in read_mat_scp(args.path_to_data) }
print('Data loaded')
print('Start of scores computation')
score_list = []
with torch.no_grad():
for i, utt in enumerate(test_utts):
print('Computing score for utterance '+ utt)
feats = prep_feats(data[utt])
try:
if args.cuda:
feats = feats.to(device)
model = model.to(device)
score = 1.-F.softmax(model.forward(feats), dim=1)[:,1:].sum().item()
except:
feats = feats.cpu()
model = model.cpu()
score = 1.-F.softmax(model.forward(feats), dim=1)[:,1:].sum().item()
score_list.append(score)
print('Score: {}'.format(score_list[-1]))
if not args.no_output_file:
print('Storing scores in output file:')
print(args.out_path)
with open(args.out_path, 'w') as f:
if args.eval:
for i, utt in enumerate(test_utts):
f.write("%s" % ' '.join([utt, str(score_list[i])+'\n']))
else:
for i, utt in enumerate(test_utts):
f.write("%s" % ' '.join([utt, attack_type_list[i], label_list[i], str(score_list[i])+'\n']))
if not args.no_eer and not args.eval:
print('EER: {}'.format(compute_eer_labels(label_list, score_list)))
print('All done!!')
| [
"[email protected]"
]
| |
158c8395e7b37a739bbe7438d2a3fb3853747fb2 | 0b20f4ce14b9ff77c84cedbecbaa29831335920d | /tests/cloudformation/file_formats/test_yaml.py | 76149f86216a57acc3de965d65a22daae34bad5a | [
"Apache-2.0"
]
| permissive | sergesec488/checkov | 219c1b3864ab4f70b39a4cd79b041e98f3145364 | 56008e1c531b3626f14716067731be6e673040bc | refs/heads/master | 2023-04-10T12:26:49.749864 | 2021-02-26T18:36:52 | 2021-02-26T18:40:58 | 342,883,133 | 0 | 1 | Apache-2.0 | 2023-03-30T13:31:25 | 2021-02-27T15:01:08 | null | UTF-8 | Python | false | false | 681 | py | import os
import unittest
from checkov.cloudformation.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestYamlFileFormat(unittest.TestCase):
def test_summary(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = current_dir + "/yaml"
report = runner.run(root_folder=test_files_dir)
summary = report.get_summary()
self.assertEqual(summary['passed'], 1)
self.assertEqual(summary['failed'], 0)
self.assertEqual(summary['skipped'], 0)
self.assertEqual(summary['parsing_errors'], 0)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
1161c6ec01e8bf8124199a123fc850feb16f7924 | 27c94d7e040902d3cdadd5862b15e67ec2ee4b6e | /exps/NAS-Bench-201-algos/DARTS-V1.py | 67441af82a7bc2f760fa028163eb4ca9c8887773 | [
"MIT"
]
| permissive | D-X-Y/AutoDL-Projects | 8a0779a7710d809af2b052787928d8d34c14d0d9 | f46486e21b71ae6459a700be720d7648b5429569 | refs/heads/main | 2023-08-13T10:53:49.550889 | 2022-04-24T22:18:16 | 2022-04-24T22:18:16 | 168,538,768 | 989 | 197 | MIT | 2022-04-24T22:16:21 | 2019-01-31T14:30:50 | Python | UTF-8 | Python | false | false | 15,785 | py | ##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020 #
########################################################
# DARTS: Differentiable Architecture Search, ICLR 2019 #
########################################################
import sys, time, random, argparse
from copy import deepcopy
import torch
from pathlib import Path
from xautodl.config_utils import load_config, dict2config, configure2str
from xautodl.datasets import get_datasets, get_nas_search_loaders
from xautodl.procedures import (
prepare_seed,
prepare_logger,
save_checkpoint,
copy_checkpoint,
get_optim_scheduler,
)
from xautodl.utils import get_model_infos, obtain_accuracy
from xautodl.log_utils import AverageMeter, time_string, convert_secs2time
from xautodl.models import get_cell_based_tiny_net, get_search_spaces
from nas_201_api import NASBench201API as API
def search_func(
xloader,
network,
criterion,
scheduler,
w_optimizer,
a_optimizer,
epoch_str,
print_freq,
logger,
gradient_clip,
):
data_time, batch_time = AverageMeter(), AverageMeter()
base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
network.train()
end = time.time()
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(
xloader
):
scheduler.update(None, 1.0 * step / len(xloader))
base_targets = base_targets.cuda(non_blocking=True)
arch_targets = arch_targets.cuda(non_blocking=True)
# measure data loading time
data_time.update(time.time() - end)
# update the weights
w_optimizer.zero_grad()
_, logits = network(base_inputs)
base_loss = criterion(logits, base_targets)
base_loss.backward()
if gradient_clip > 0:
torch.nn.utils.clip_grad_norm_(network.parameters(), gradient_clip)
w_optimizer.step()
# record
base_prec1, base_prec5 = obtain_accuracy(
logits.data, base_targets.data, topk=(1, 5)
)
base_losses.update(base_loss.item(), base_inputs.size(0))
base_top1.update(base_prec1.item(), base_inputs.size(0))
base_top5.update(base_prec5.item(), base_inputs.size(0))
# update the architecture-weight
a_optimizer.zero_grad()
_, logits = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
arch_loss.backward()
a_optimizer.step()
# record
arch_prec1, arch_prec5 = obtain_accuracy(
logits.data, arch_targets.data, topk=(1, 5)
)
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if step % print_freq == 0 or step + 1 == len(xloader):
Sstr = (
"*SEARCH* "
+ time_string()
+ " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))
)
Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format(
batch_time=batch_time, data_time=data_time
)
Wstr = "Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]".format(
loss=base_losses, top1=base_top1, top5=base_top5
)
Astr = "Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]".format(
loss=arch_losses, top1=arch_top1, top5=arch_top5
)
logger.log(Sstr + " " + Tstr + " " + Wstr + " " + Astr)
return base_losses.avg, base_top1.avg, base_top5.avg
def valid_func(xloader, network, criterion):
data_time, batch_time = AverageMeter(), AverageMeter()
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
network.eval()
end = time.time()
with torch.no_grad():
for step, (arch_inputs, arch_targets) in enumerate(xloader):
arch_targets = arch_targets.cuda(non_blocking=True)
# measure data loading time
data_time.update(time.time() - end)
# prediction
_, logits = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
# record
arch_prec1, arch_prec5 = obtain_accuracy(
logits.data, arch_targets.data, topk=(1, 5)
)
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update(arch_prec1.item(), arch_inputs.size(0))
arch_top5.update(arch_prec5.item(), arch_inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
return arch_losses.avg, arch_top1.avg, arch_top5.avg
def main(xargs):
assert torch.cuda.is_available(), "CUDA is not available."
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_num_threads(xargs.workers)
prepare_seed(xargs.rand_seed)
logger = prepare_logger(args)
train_data, valid_data, xshape, class_num = get_datasets(
xargs.dataset, xargs.data_path, -1
)
# config_path = 'configs/nas-benchmark/algos/DARTS.config'
config = load_config(
xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger
)
search_loader, _, valid_loader = get_nas_search_loaders(
train_data,
valid_data,
xargs.dataset,
"configs/nas-benchmark/",
config.batch_size,
xargs.workers,
)
logger.log(
"||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}".format(
xargs.dataset, len(search_loader), len(valid_loader), config.batch_size
)
)
logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config))
search_space = get_search_spaces("cell", xargs.search_space_name)
if xargs.model_config is None:
model_config = dict2config(
{
"name": "DARTS-V1",
"C": xargs.channel,
"N": xargs.num_cells,
"max_nodes": xargs.max_nodes,
"num_classes": class_num,
"space": search_space,
"affine": False,
"track_running_stats": bool(xargs.track_running_stats),
},
None,
)
else:
model_config = load_config(
xargs.model_config,
{
"num_classes": class_num,
"space": search_space,
"affine": False,
"track_running_stats": bool(xargs.track_running_stats),
},
None,
)
search_model = get_cell_based_tiny_net(model_config)
logger.log("search-model :\n{:}".format(search_model))
w_optimizer, w_scheduler, criterion = get_optim_scheduler(
search_model.get_weights(), config
)
a_optimizer = torch.optim.Adam(
search_model.get_alphas(),
lr=xargs.arch_learning_rate,
betas=(0.5, 0.999),
weight_decay=xargs.arch_weight_decay,
)
logger.log("w-optimizer : {:}".format(w_optimizer))
logger.log("a-optimizer : {:}".format(a_optimizer))
logger.log("w-scheduler : {:}".format(w_scheduler))
logger.log("criterion : {:}".format(criterion))
flop, param = get_model_infos(search_model, xshape)
# logger.log('{:}'.format(search_model))
logger.log("FLOP = {:.2f} M, Params = {:.2f} MB".format(flop, param))
if xargs.arch_nas_dataset is None:
api = None
else:
api = API(xargs.arch_nas_dataset)
logger.log("{:} create API = {:} done".format(time_string(), api))
last_info, model_base_path, model_best_path = (
logger.path("info"),
logger.path("model"),
logger.path("best"),
)
network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()
if last_info.exists(): # automatically resume from previous checkpoint
logger.log(
"=> loading checkpoint of the last-info '{:}' start".format(last_info)
)
last_info = torch.load(last_info)
start_epoch = last_info["epoch"]
checkpoint = torch.load(last_info["last_checkpoint"])
genotypes = checkpoint["genotypes"]
valid_accuracies = checkpoint["valid_accuracies"]
search_model.load_state_dict(checkpoint["search_model"])
w_scheduler.load_state_dict(checkpoint["w_scheduler"])
w_optimizer.load_state_dict(checkpoint["w_optimizer"])
a_optimizer.load_state_dict(checkpoint["a_optimizer"])
logger.log(
"=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(
last_info, start_epoch
)
)
else:
logger.log("=> do not find the last-info file : {:}".format(last_info))
start_epoch, valid_accuracies, genotypes = (
0,
{"best": -1},
{-1: search_model.genotype()},
)
# start training
start_time, search_time, epoch_time, total_epoch = (
time.time(),
AverageMeter(),
AverageMeter(),
config.epochs + config.warmup,
)
for epoch in range(start_epoch, total_epoch):
w_scheduler.update(epoch, 0.0)
need_time = "Time Left: {:}".format(
convert_secs2time(epoch_time.val * (total_epoch - epoch), True)
)
epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch)
logger.log(
"\n[Search the {:}-th epoch] {:}, LR={:}".format(
epoch_str, need_time, min(w_scheduler.get_lr())
)
)
search_w_loss, search_w_top1, search_w_top5 = search_func(
search_loader,
network,
criterion,
w_scheduler,
w_optimizer,
a_optimizer,
epoch_str,
xargs.print_freq,
logger,
xargs.gradient_clip,
)
search_time.update(time.time() - start_time)
logger.log(
"[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s".format(
epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum
)
)
valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(
valid_loader, network, criterion
)
logger.log(
"[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format(
epoch_str, valid_a_loss, valid_a_top1, valid_a_top5
)
)
# check the best accuracy
valid_accuracies[epoch] = valid_a_top1
if valid_a_top1 > valid_accuracies["best"]:
valid_accuracies["best"] = valid_a_top1
genotypes["best"] = search_model.genotype()
find_best = True
else:
find_best = False
genotypes[epoch] = search_model.genotype()
logger.log(
"<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch])
)
# save checkpoint
save_path = save_checkpoint(
{
"epoch": epoch + 1,
"args": deepcopy(xargs),
"search_model": search_model.state_dict(),
"w_optimizer": w_optimizer.state_dict(),
"a_optimizer": a_optimizer.state_dict(),
"w_scheduler": w_scheduler.state_dict(),
"genotypes": genotypes,
"valid_accuracies": valid_accuracies,
},
model_base_path,
logger,
)
last_info = save_checkpoint(
{
"epoch": epoch + 1,
"args": deepcopy(args),
"last_checkpoint": save_path,
},
logger.path("info"),
logger,
)
if find_best:
logger.log(
"<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.".format(
epoch_str, valid_a_top1
)
)
copy_checkpoint(model_base_path, model_best_path, logger)
with torch.no_grad():
# logger.log('arch-parameters :\n{:}'.format( nn.functional.softmax(search_model.arch_parameters, dim=-1).cpu() ))
logger.log("{:}".format(search_model.show_alphas()))
if api is not None:
logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200")))
# measure elapsed time
epoch_time.update(time.time() - start_time)
start_time = time.time()
logger.log("\n" + "-" * 100)
logger.log(
"DARTS-V1 : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format(
total_epoch, search_time.sum, genotypes[total_epoch - 1]
)
)
if api is not None:
logger.log("{:}".format(api.query_by_arch(genotypes[total_epoch - 1], "200")))
logger.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser("DARTS first order")
parser.add_argument("--data_path", type=str, help="Path to dataset")
parser.add_argument(
"--dataset",
type=str,
choices=["cifar10", "cifar100", "ImageNet16-120"],
help="Choose between Cifar10/100 and ImageNet-16.",
)
# channels and number-of-cells
parser.add_argument("--search_space_name", type=str, help="The search space name.")
parser.add_argument("--max_nodes", type=int, help="The maximum number of nodes.")
parser.add_argument("--channel", type=int, help="The number of channels.")
parser.add_argument(
"--num_cells", type=int, help="The number of cells in one stage."
)
parser.add_argument(
"--track_running_stats",
type=int,
choices=[0, 1],
help="Whether use track_running_stats or not in the BN layer.",
)
parser.add_argument("--config_path", type=str, help="The config path.")
parser.add_argument(
"--model_config",
type=str,
help="The path of the model configuration. When this arg is set, it will cover max_nodes / channels / num_cells.",
)
parser.add_argument("--gradient_clip", type=float, default=5, help="")
# architecture leraning rate
parser.add_argument(
"--arch_learning_rate",
type=float,
default=3e-4,
help="learning rate for arch encoding",
)
parser.add_argument(
"--arch_weight_decay",
type=float,
default=1e-3,
help="weight decay for arch encoding",
)
# log
parser.add_argument(
"--workers",
type=int,
default=2,
help="number of data loading workers (default: 2)",
)
parser.add_argument(
"--save_dir", type=str, help="Folder to save checkpoints and log."
)
parser.add_argument(
"--arch_nas_dataset",
type=str,
help="The path to load the architecture dataset (nas-benchmark).",
)
parser.add_argument("--print_freq", type=int, help="print frequency (default: 200)")
parser.add_argument("--rand_seed", type=int, help="manual seed")
args = parser.parse_args()
if args.rand_seed is None or args.rand_seed < 0:
args.rand_seed = random.randint(1, 100000)
main(args)
| [
"[email protected]"
]
| |
c03967857b3abb3a4db4df537c2c4342ac393b68 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/scatter/marker/line/_width.py | 108770c589b4e4605b6ff605e20647ef337325b7 | [
"MIT"
]
| permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 465 | py | import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name='width', parent_name='scatter.marker.line', **kwargs
):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=True,
edit_type='style',
min=0,
role='style',
**kwargs
)
| [
"[email protected]"
]
| |
f60987e55994a05e1fbf45fa4d8ded677baca05b | 732374714ffe0e0f2c07a493a2ee71c9271fdce0 | /mysite/settings.py | bcd771fb691401a56d55a3106a4ee650b115e261 | []
| no_license | aaronahmid/mosunhomesrealtors | 721fb20d671f1a58c64abc8bdf1209a5ab3236f1 | 561b56fd90179e163f0c861dae1d451cc1cfc662 | refs/heads/main | 2023-08-13T02:22:46.005517 | 2021-10-09T05:15:59 | 2021-10-09T05:15:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,269 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
import dj_database_url
import django_heroku
import cloudinary
import cloudinary.uploader
import cloudinary.api
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure--+3$m3fs+h3qdye&74^k@qadoro606d*%%qacpzw=&7g!ruu@l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1', '.herokuapp.com', 'www.mosunhomes-realtors.com', 'mosunhomes-realtors.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
'cloudinary',
'cloudinary_storage',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Lagos'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_URL = '/static/'
STATICFILES_DIRS = os.path.join(BASE_DIR, "blog/static"),
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
cloudinary.config(
cloud_name = "thormiwa",
api_key = "584634363435482",
api_secret = "XGzynridSBzxfDGpkyOMnHAHGrA"
)
DEFAULT_FILE_STORAGE = 'cloudinary_storage.storage.MediaCloudinaryStorage'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
# Activate Django-Heroku.
django_heroku.settings(locals()) | [
"[email protected]"
]
| |
93b57b5d8ab7beae315d919322890e775a1998e9 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_188/ch78_2019_04_04_19_41_08_100209.py | 61335cb39d3ea9f3ad223e32472973ee949e080e | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | from math import sqrt
def calcula_tempo(atletas):
tempo_de_conclusao = {}
for nome in atletas:
tempo_atleta = sqrt(200 / atletas[nome])
tempo_de_conclusao[nome] = tempo_atleta
return tempo_de_conclusao
def atleta_mais_rapido(dicionario):
menor_tempo = 0
melhor_atleta = ""
for nome in dicionario:
if menor_tempo > dicionario[nome]:
menor_tempo = dicionario[nome]
melhor_atleta = nome
return melhor_atleta
def tempo_mais_curto(dicionario):
menor_tempo = 0
for nome in dicionario:
if menor_tempo > dicionario[nome]:
menor_tempo = dicionario[nome]
return menor_tempo
nomes_aceleracoes_ateltas = {}
sair = False
while not sair:
nome = input("Digite o nome do atleta: ")
aceleracao = int(input("Digite a aceleracao do atleta: "))
if nome == "sair":
sair = True
else:
nomes_aceleracoes_atletas[nome] = aceleracao
nomes_tempos_atletas = calcula_tempo(nomes_aceleracoes_atletas)
nome = atleta_mais_rapido(nomes_tempos_atletas)
tempo = tempo_mais_curto(nomes_tempos_atletas)
print('O vencedor é {0} com tempo de conclusão de {1} s'.format(nome, tempo)) | [
"[email protected]"
]
| |
b4cebd6904d477cd8224278ad3c87bbe2000ae9e | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-vpc/aliyunsdkvpc/request/v20160428/CreateRouterInterfaceRequest.py | f3794b0030c799277bdbb14c640f9f31c41bee1c | [
"Apache-2.0"
]
| permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,414 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateRouterInterfaceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'CreateRouterInterface','vpc')
def get_AccessPointId(self):
return self.get_query_params().get('AccessPointId')
def set_AccessPointId(self,AccessPointId):
self.add_query_param('AccessPointId',AccessPointId)
def get_OppositeRouterId(self):
return self.get_query_params().get('OppositeRouterId')
def set_OppositeRouterId(self,OppositeRouterId):
self.add_query_param('OppositeRouterId',OppositeRouterId)
def get_OppositeAccessPointId(self):
return self.get_query_params().get('OppositeAccessPointId')
def set_OppositeAccessPointId(self,OppositeAccessPointId):
self.add_query_param('OppositeAccessPointId',OppositeAccessPointId)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Role(self):
return self.get_query_params().get('Role')
def set_Role(self,Role):
self.add_query_param('Role',Role)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_HealthCheckTargetIp(self):
return self.get_query_params().get('HealthCheckTargetIp')
def set_HealthCheckTargetIp(self,HealthCheckTargetIp):
self.add_query_param('HealthCheckTargetIp',HealthCheckTargetIp)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_Spec(self):
return self.get_query_params().get('Spec')
def set_Spec(self,Spec):
self.add_query_param('Spec',Spec)
def get_OppositeInterfaceId(self):
return self.get_query_params().get('OppositeInterfaceId')
def set_OppositeInterfaceId(self,OppositeInterfaceId):
self.add_query_param('OppositeInterfaceId',OppositeInterfaceId)
def get_InstanceChargeType(self):
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self,InstanceChargeType):
self.add_query_param('InstanceChargeType',InstanceChargeType)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_AutoPay(self):
return self.get_query_params().get('AutoPay')
def set_AutoPay(self,AutoPay):
self.add_query_param('AutoPay',AutoPay)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OppositeRegionId(self):
return self.get_query_params().get('OppositeRegionId')
def set_OppositeRegionId(self,OppositeRegionId):
self.add_query_param('OppositeRegionId',OppositeRegionId)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_OppositeInterfaceOwnerId(self):
return self.get_query_params().get('OppositeInterfaceOwnerId')
def set_OppositeInterfaceOwnerId(self,OppositeInterfaceOwnerId):
self.add_query_param('OppositeInterfaceOwnerId',OppositeInterfaceOwnerId)
def get_RouterType(self):
return self.get_query_params().get('RouterType')
def set_RouterType(self,RouterType):
self.add_query_param('RouterType',RouterType)
def get_HealthCheckSourceIp(self):
return self.get_query_params().get('HealthCheckSourceIp')
def set_HealthCheckSourceIp(self,HealthCheckSourceIp):
self.add_query_param('HealthCheckSourceIp',HealthCheckSourceIp)
def get_RouterId(self):
return self.get_query_params().get('RouterId')
def set_RouterId(self,RouterId):
self.add_query_param('RouterId',RouterId)
def get_OppositeRouterType(self):
return self.get_query_params().get('OppositeRouterType')
def set_OppositeRouterType(self,OppositeRouterType):
self.add_query_param('OppositeRouterType',OppositeRouterType)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_PricingCycle(self):
return self.get_query_params().get('PricingCycle')
def set_PricingCycle(self,PricingCycle):
self.add_query_param('PricingCycle',PricingCycle) | [
"[email protected]"
]
| |
620b6dda3cf88205a7c9f1e46efff99abe37eb7d | 256728286889a60e5d8896efc6869483daba3280 | /cinemanio/sites/imdb/migrations/0001_initial.py | 1f14d2c4b9e565d481ff8bf0acc5215f8e05d89a | [
"MIT"
]
| permissive | cinemanio/backend | 5236be94d08ec79b9fc8d8973aee93ec8fad9b1b | c393dc8c2d59dc99aa2c3314d3372b6e2bf5497f | refs/heads/master | 2021-05-01T13:02:08.102705 | 2019-11-10T14:33:37 | 2019-11-10T14:33:37 | 121,069,149 | 4 | 0 | MIT | 2020-02-12T00:09:03 | 2018-02-11T01:00:31 | Python | UTF-8 | Python | false | false | 1,273 | py | # Generated by Django 2.0.1 on 2018-01-26 01:06
import cinemanio.sites.imdb.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ImdbMovie',
fields=[
('id', models.PositiveIntegerField(primary_key=True, serialize=False, verbose_name='IMDb ID')),
('rating', models.FloatField(blank=True, db_index=True, null=True, verbose_name='IMDb rating')),
('movie', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='imdb', to='core.Movie')),
],
bases=(models.Model, cinemanio.sites.imdb.models.UrlMixin),
),
migrations.CreateModel(
name='ImdbPerson',
fields=[
('id', models.PositiveIntegerField(primary_key=True, serialize=False, verbose_name='IMDb ID')),
('person', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='imdb', to='core.Person')),
],
bases=(models.Model, cinemanio.sites.imdb.models.UrlMixin),
),
]
| [
"[email protected]"
]
| |
edf977c8ee2771f059d611fdf4b49337c5b6119e | a4174a9d51577d9b72b4e5dcf1be56bc9b0d242b | /retinanet/model/head/builder.py | b4153ffafb41099f951afdc540259b1454c0ab31 | [
"Apache-2.0"
]
| permissive | lchen-wyze/retinanet-tensorflow2.x | 996396724c858fdc954880f3c20db7865d930a87 | 86404a2da6ec636d4b1aef768ac52f018c127798 | refs/heads/master | 2023-08-23T06:12:39.629288 | 2021-10-18T15:52:23 | 2021-10-18T15:52:23 | 418,040,957 | 0 | 0 | Apache-2.0 | 2021-10-17T06:26:21 | 2021-10-17T06:26:21 | null | UTF-8 | Python | false | false | 2,157 | py | import numpy as np
import tensorflow as tf
from retinanet.model.head.detection_head import DetectionHead
def build_detection_heads(
params,
min_level,
max_level,
conv_2d_op_params=None,
normalization_op_params=None,
activation_fn=None):
if activation_fn is None:
raise ValueError('`activation_fn` cannot be None')
box_head = DetectionHead(
num_convs=params.num_convs,
filters=params.filters,
output_filters=params.num_anchors * 4,
min_level=min_level,
max_level=max_level,
prediction_bias_initializer='zeros',
conv_2d_op_params=conv_2d_op_params,
normalization_op_params=normalization_op_params,
activation_fn=activation_fn,
name='box-head')
prior_prob_init = tf.constant_initializer(-np.log((1 - 0.01) / 0.01))
class_head = DetectionHead(
num_convs=params.num_convs,
filters=params.filters,
output_filters=params.num_anchors*params.num_classes,
min_level=min_level,
max_level=max_level,
prediction_bias_initializer=prior_prob_init,
conv_2d_op_params=conv_2d_op_params,
normalization_op_params=normalization_op_params,
activation_fn=activation_fn,
name='class-head')
return box_head, class_head
def build_auxillary_head(
num_convs,
filters,
num_anchors,
min_level,
max_level,
conv_2d_op_params=None,
normalization_op_params=None,
activation_fn=None):
if activation_fn is None:
raise ValueError('`activation_fn` cannot be None')
prior_prob_init = tf.constant_initializer(-np.log((1 - 0.5) / 0.5))
auxillary_head = DetectionHead(
num_convs=num_convs,
filters=filters,
output_filters=num_anchors,
min_level=min_level,
max_level=max_level,
prediction_bias_initializer=prior_prob_init,
conv_2d_op_params=conv_2d_op_params,
normalization_op_params=normalization_op_params,
activation_fn=activation_fn,
name='auxillary-head')
return auxillary_head
| [
"[email protected]"
]
| |
1f0aab49aa5a6590e8778e8b8366970e2e0a08f6 | 62babb33b9bede95aac217db04636956279bb2e2 | /bit operation/1395C Boboniu and Bit Operations.py | 90ae03a3fd60423b3df792021485ced2af7a8c6a | []
| no_license | tycyd/codeforces | 0322e31daf18544944c769fd2a50c6d006015e34 | e0773f069c6c5793f9d9a07b61878a589e375a5f | refs/heads/master | 2023-08-12T05:00:39.467404 | 2021-09-30T16:39:21 | 2021-09-30T16:39:21 | 266,847,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | from sys import stdin, stdout
# 1 1 1 => 1 0 0, 0 1 1
# 1 1 0 0 => 1 0 0
#
def boboniu_and_bit_operations(n, m, a_a, b_a):
for k in range(513):
cnt = 0
for a in a_a:
for b in b_a:
if ((a & b) | k) == k:
cnt += 1
break
if cnt == n:
return k
return -1
n, m = map(int, stdin.readline().split())
a_a = list(map(int, stdin.readline().split()))
b_a = list(map(int, stdin.readline().split()))
stdout.write(str(boboniu_and_bit_operations(n, m, a_a, b_a)) + '\n')
| [
"[email protected]"
]
| |
bfa7845a3715e92b22b02ae33fc01bfb05d211e5 | 29e82a7b9412b10600fb5c7638c0918e08af67d7 | /exps/algos/R_EA.py | bc3345bcc4569b24352b35198ea4b2200718e996 | [
"MIT"
]
| permissive | chenmingTHU/NAS-Projects | faa2edccd821b0ae0876179a1b02e7872d4bd91e | f8f44bfb31ed50c7156f9125ba34e49159848fb7 | refs/heads/master | 2020-12-02T14:48:17.363203 | 2019-12-29T09:17:26 | 2019-12-29T09:17:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,177 | py | ##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
##################################################################
# Regularized Evolution for Image Classifier Architecture Search #
##################################################################
import os, sys, time, glob, random, argparse
import numpy as np, collections
from copy import deepcopy
import torch
import torch.nn as nn
from pathlib import Path
lib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
from config_utils import load_config, dict2config, configure2str
from datasets import get_datasets, SearchDataset
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
from utils import get_model_infos, obtain_accuracy
from log_utils import AverageMeter, time_string, convert_secs2time
from nas_102_api import NASBench102API as API
from models import CellStructure, get_search_spaces
class Model(object):
def __init__(self):
self.arch = None
self.accuracy = None
def __str__(self):
"""Prints a readable version of this bitstring."""
return '{:}'.format(self.arch)
def valid_func(xloader, network, criterion):
data_time, batch_time = AverageMeter(), AverageMeter()
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
network.train()
end = time.time()
with torch.no_grad():
for step, (arch_inputs, arch_targets) in enumerate(xloader):
arch_targets = arch_targets.cuda(non_blocking=True)
# measure data loading time
data_time.update(time.time() - end)
# prediction
_, logits = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
# record
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update (arch_prec1.item(), arch_inputs.size(0))
arch_top5.update (arch_prec5.item(), arch_inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
return arch_losses.avg, arch_top1.avg, arch_top5.avg
def train_and_eval(arch, nas_bench, extra_info):
if nas_bench is not None:
arch_index = nas_bench.query_index_by_arch( arch )
assert arch_index >= 0, 'can not find this arch : {:}'.format(arch)
info = nas_bench.get_more_info(arch_index, 'cifar10-valid', None, True)
valid_acc, time_cost = info['valid-accuracy'], info['train-all-time'] + info['valid-per-time']
#_, valid_acc = info.get_metrics('cifar10-valid', 'x-valid' , 25, True) # use the validation accuracy after 25 training epochs
else:
# train a model from scratch.
raise ValueError('NOT IMPLEMENT YET')
return valid_acc, time_cost
def random_architecture_func(max_nodes, op_names):
# return a random architecture
def random_architecture():
genotypes = []
for i in range(1, max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
op_name = random.choice( op_names )
xlist.append((op_name, j))
genotypes.append( tuple(xlist) )
return CellStructure( genotypes )
return random_architecture
def mutate_arch_func(op_names):
"""Computes the architecture for a child of the given parent architecture.
The parent architecture is cloned and mutated to produce the child architecture. The child architecture is mutated by randomly switch one operation to another.
"""
def mutate_arch_func(parent_arch):
child_arch = deepcopy( parent_arch )
node_id = random.randint(0, len(child_arch.nodes)-1)
node_info = list( child_arch.nodes[node_id] )
snode_id = random.randint(0, len(node_info)-1)
xop = random.choice( op_names )
while xop == node_info[snode_id][0]:
xop = random.choice( op_names )
node_info[snode_id] = (xop, node_info[snode_id][1])
child_arch.nodes[node_id] = tuple( node_info )
return child_arch
return mutate_arch_func
def regularized_evolution(cycles, population_size, sample_size, time_budget, random_arch, mutate_arch, nas_bench, extra_info):
"""Algorithm for regularized evolution (i.e. aging evolution).
Follows "Algorithm 1" in Real et al. "Regularized Evolution for Image
Classifier Architecture Search".
Args:
cycles: the number of cycles the algorithm should run for.
population_size: the number of individuals to keep in the population.
sample_size: the number of individuals that should participate in each tournament.
time_budget: the upper bound of searching cost
Returns:
history: a list of `Model` instances, representing all the models computed
during the evolution experiment.
"""
population = collections.deque()
history, total_time_cost = [], 0 # Not used by the algorithm, only used to report results.
# Initialize the population with random models.
while len(population) < population_size:
model = Model()
model.arch = random_arch()
model.accuracy, time_cost = train_and_eval(model.arch, nas_bench, extra_info)
population.append(model)
history.append(model)
total_time_cost += time_cost
# Carry out evolution in cycles. Each cycle produces a model and removes
# another.
#while len(history) < cycles:
while total_time_cost < time_budget:
# Sample randomly chosen models from the current population.
start_time, sample = time.time(), []
while len(sample) < sample_size:
# Inefficient, but written this way for clarity. In the case of neural
# nets, the efficiency of this line is irrelevant because training neural
# nets is the rate-determining step.
candidate = random.choice(list(population))
sample.append(candidate)
# The parent is the best model in the sample.
parent = max(sample, key=lambda i: i.accuracy)
# Create the child model and store it.
child = Model()
child.arch = mutate_arch(parent.arch)
total_time_cost += time.time() - start_time
child.accuracy, time_cost = train_and_eval(child.arch, nas_bench, extra_info)
if total_time_cost + time_cost > time_budget: # return
return history, total_time_cost
else:
total_time_cost += time_cost
population.append(child)
history.append(child)
# Remove the oldest model.
population.popleft()
return history, total_time_cost
def main(xargs, nas_bench):
assert torch.cuda.is_available(), 'CUDA is not available.'
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_num_threads( xargs.workers )
prepare_seed(xargs.rand_seed)
logger = prepare_logger(args)
assert xargs.dataset == 'cifar10', 'currently only support CIFAR-10'
train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1)
split_Fpath = 'configs/nas-benchmark/cifar-split.txt'
cifar_split = load_config(split_Fpath, None, None)
train_split, valid_split = cifar_split.train, cifar_split.valid
logger.log('Load split file from {:}'.format(split_Fpath))
config_path = 'configs/nas-benchmark/algos/R-EA.config'
config = load_config(config_path, {'class_num': class_num, 'xshape': xshape}, logger)
# To split data
train_data_v2 = deepcopy(train_data)
train_data_v2.transform = valid_data.transform
valid_data = train_data_v2
search_data = SearchDataset(xargs.dataset, train_data, train_split, valid_split)
# data loader
train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split) , num_workers=xargs.workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), num_workers=xargs.workers, pin_memory=True)
logger.log('||||||| {:10s} ||||||| Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(train_loader), len(valid_loader), config.batch_size))
logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config))
extra_info = {'config': config, 'train_loader': train_loader, 'valid_loader': valid_loader}
search_space = get_search_spaces('cell', xargs.search_space_name)
random_arch = random_architecture_func(xargs.max_nodes, search_space)
mutate_arch = mutate_arch_func(search_space)
#x =random_arch() ; y = mutate_arch(x)
logger.log('{:} use nas_bench : {:}'.format(time_string(), nas_bench))
logger.log('-'*30 + ' start searching with the time budget of {:} s'.format(xargs.time_budget))
history, total_cost = regularized_evolution(xargs.ea_cycles, xargs.ea_population, xargs.ea_sample_size, xargs.time_budget, random_arch, mutate_arch, nas_bench if args.ea_fast_by_api else None, extra_info)
logger.log('{:} regularized_evolution finish with history of {:} arch with {:.1f} s.'.format(time_string(), len(history), total_cost))
best_arch = max(history, key=lambda i: i.accuracy)
best_arch = best_arch.arch
logger.log('{:} best arch is {:}'.format(time_string(), best_arch))
info = nas_bench.query_by_arch( best_arch )
if info is None: logger.log('Did not find this architecture : {:}.'.format(best_arch))
else : logger.log('{:}'.format(info))
logger.log('-'*100)
logger.close()
return logger.log_dir, nas_bench.query_index_by_arch( best_arch )
if __name__ == '__main__':
parser = argparse.ArgumentParser("Regularized Evolution Algorithm")
parser.add_argument('--data_path', type=str, help='Path to dataset')
parser.add_argument('--dataset', type=str, choices=['cifar10', 'cifar100', 'ImageNet16-120'], help='Choose between Cifar10/100 and ImageNet-16.')
# channels and number-of-cells
parser.add_argument('--search_space_name', type=str, help='The search space name.')
parser.add_argument('--max_nodes', type=int, help='The maximum number of nodes.')
parser.add_argument('--channel', type=int, help='The number of channels.')
parser.add_argument('--num_cells', type=int, help='The number of cells in one stage.')
parser.add_argument('--ea_cycles', type=int, help='The number of cycles in EA.')
parser.add_argument('--ea_population', type=int, help='The population size in EA.')
parser.add_argument('--ea_sample_size', type=int, help='The sample size in EA.')
parser.add_argument('--ea_fast_by_api', type=int, help='Use our API to speed up the experiments or not.')
parser.add_argument('--time_budget', type=int, help='The total time cost budge for searching (in seconds).')
# log
parser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)')
parser.add_argument('--save_dir', type=str, help='Folder to save checkpoints and log.')
parser.add_argument('--arch_nas_dataset', type=str, help='The path to load the architecture dataset (tiny-nas-benchmark).')
parser.add_argument('--print_freq', type=int, help='print frequency (default: 200)')
parser.add_argument('--rand_seed', type=int, default=-1, help='manual seed')
args = parser.parse_args()
#if args.rand_seed is None or args.rand_seed < 0: args.rand_seed = random.randint(1, 100000)
args.ea_fast_by_api = args.ea_fast_by_api > 0
if args.arch_nas_dataset is None or not os.path.isfile(args.arch_nas_dataset):
nas_bench = None
else:
print ('{:} build NAS-Benchmark-API from {:}'.format(time_string(), args.arch_nas_dataset))
nas_bench = API(args.arch_nas_dataset)
if args.rand_seed < 0:
save_dir, all_indexes, num = None, [], 500
for i in range(num):
print ('{:} : {:03d}/{:03d}'.format(time_string(), i, num))
args.rand_seed = random.randint(1, 100000)
save_dir, index = main(args, nas_bench)
all_indexes.append( index )
torch.save(all_indexes, save_dir / 'results.pth')
else:
main(args, nas_bench)
| [
"[email protected]"
]
| |
4286d6e8f7466f4a7c7b415049764bd995510e58 | 272cf6bd5f56812e14c2ed0df60d626859ec2c96 | /imdb_scrapy/spiders/script.py | e4449b1818474a1e4a37f9c3fa7e6064e5dd476e | []
| no_license | abhinavjha98/scrapy_simple_hired | a1b5933be5a401585f6cdfef48299b765cf25303 | a0dbf812d1d4a5e16d8bf46633bdc95b747f2fd3 | refs/heads/master | 2023-01-24T05:46:24.639774 | 2020-11-30T17:17:09 | 2020-11-30T17:17:09 | 298,634,627 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,352 | py | # -*- coding: utf-8 -*-
import scrapy
import urllib
import requests
# item class included here
class DmozItem(scrapy.Item):
# define the fields for your item here like:
ApplyLink = scrapy.Field()
Title = scrapy.Field()
Company = scrapy.Field()
Location = scrapy.Field()
salary = scrapy.Field()
Logo = scrapy.Field()
Description = scrapy.Field()
class DmozSpider(scrapy.Spider):
name = "dmoz"
page_number = 2
start_urls = [
'https://www.simplyhired.com/search?q=java&l=Philadelphia%2C+PA&job=fYxbZPaOvxUi_StIPQGdAhmm__9ReBI5jbVy7amchpkhgoG5xdkwUA'
]
BASE_URL = 'https://www.simplyhired.com'
def parse(self, response):
links = response.css('a.card-link').xpath("@href").extract()
for link in links:
absolute_url = self.BASE_URL + link
yield scrapy.Request(absolute_url, callback=self.parse_attr)
next_page = "https://www.simplyhired.com/search?q=java&l=Philadelphia%2C+PA&pn="+str(DmozSpider.page_number)+"&job=fYxbZPaOvxUi_StIPQGdAhmm__9ReBI5jbVy7amchpkhgoG5xdkwUA"
if DmozSpider.page_number<=91:
DmozSpider.page_number +=1
yield response.follow(next_page,callback=self.parse)
def parse_attr(self, response):
item = DmozItem()
logo = response.css('img.viewjob-company-logoImg').xpath("@src").extract()
try:
item["Logo"] = DmozSpider.BASE_URL+""+logo[0]
except:
item["Logo"] = 'none'
item["Title"] = response.css("div.viewjob-jobTitle::text").extract()
item["Location"] = response.css("div.viewjob-labelWithIcon::text")[1].extract()
item["Company"] = response.css("div.viewjob-labelWithIcon::text")[0].extract()
aa=response.css("div.p::text").extract()
text_list=""
for text in aa:
text = text.rstrip("\n")
text_list=text_list+text
item["Description"] = text_list
links = response.css('a.btn-apply').xpath("@href").extract()
# final_url = urllib.request.urlopen("https://www.simplyhired.com"+links[0],None,1).geturl()
final_url = requests.get("https://www.simplyhired.com"+links[0])
item["ApplyLink"] = final_url.url
item["salary"]=response.css("span.viewjob-labelWithIcon::text").extract()
return item | [
"[email protected]"
]
| |
eb2bae37e9c648b4f4f8701e435601f4d4be96e9 | 0f556b9d4e250df73bf1e0929dbd4afad51e82fe | /smaller_than/smaller_than.py | cb6b4e049621e62ab38344e518e8ebe479383f31 | []
| no_license | unabl4/PythonCodeClub | 0ef1cb4d145860a4fda528c2eea513d0ba6b8327 | 72d5887342c1e0b304307a0e0ac9eb78f0202c35 | refs/heads/master | 2021-04-30T04:42:03.266029 | 2019-02-18T22:09:12 | 2019-02-18T22:09:12 | 121,541,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | def smaller_than(number_1, number_2):
return None if number_1 == number_2 else min(number_1, number_2)
| [
"[email protected]"
]
| |
8f3b6dd785a104a1985f13ba77bbd4751286ee03 | 7fd8a09fd94d09d568d67afcb4ecf3b60a936fe2 | /Tests/TestEnvironment/test_config.py | ad9fcccfe8d638613e2087450489742dbd85bc2a | [
"MIT"
]
| permissive | dev-11/eigen-technical-task | 4c2ac82c02f2cbd6b7020d2cbfc33beca20db37f | c0b041fc2bd27d2706ccdab94f6eb618f17098bd | refs/heads/master | 2021-05-20T22:14:32.015768 | 2021-03-28T12:02:50 | 2021-03-28T12:02:50 | 252,434,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | DIRECTORIES_TO_SCAN = ['test_docs/']
TXT_FILE_EXTENSION = 'txt'
DEFAULT_INTERESTING_WEIGHT = 1
INTERESTING_RATING_THRESHOLD = 5
| [
"[email protected]"
]
| |
19838a190c48902a9799ae5a54116786d9d5576b | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2901/58744/247697.py | 300e82a6c695d61e1fd561bfba7acad1b071cf0a | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | num = int(input())
def isAlternatingBits(num):
former_bit = 0 if num & 1 else 1
while num > 0:
if num & 1 == former_bit:
return False
num >>= 1
former_bit = 0 if former_bit else 1
return True
print(str(isAlternatingBits(num)).lower())
| [
"[email protected]"
]
| |
79a8e4c562139987c47fe34f81f4bc9c48703f36 | 3db7b5409f2f9c57ab3f98bda50f8b548d98063d | /samples/tests/test_model_samples.py | ed82dd678c2f104779586f523aeefb3e7b00a9f1 | [
"Apache-2.0"
]
| permissive | googleapis/python-bigquery | 66db156b52e97565f6211b2fab5aac4e519fa798 | 3645e32aeebefe9d5a4bc71a6513942741f0f196 | refs/heads/main | 2023-09-01T07:41:24.893598 | 2023-08-23T19:04:13 | 2023-08-23T19:04:13 | 226,992,475 | 622 | 287 | Apache-2.0 | 2023-09-12T04:31:26 | 2019-12-10T00:09:04 | Python | UTF-8 | Python | false | false | 1,507 | py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from .. import delete_model
from .. import get_model
from .. import list_models
from .. import update_model
if typing.TYPE_CHECKING:
import pytest
def test_model_samples(
capsys: "pytest.CaptureFixture[str]", dataset_id: str, model_id: str
) -> None:
"""Since creating a model is a long operation, test all model samples in
the same test, following a typical end-to-end flow.
"""
get_model.get_model(model_id)
out, err = capsys.readouterr()
assert model_id in out
list_models.list_models(dataset_id)
out, err = capsys.readouterr()
assert "Models contained in '{}':".format(dataset_id) in out
update_model.update_model(model_id)
out, err = capsys.readouterr()
assert "This model was modified from a Python program." in out
delete_model.delete_model(model_id)
out, err = capsys.readouterr()
assert "Deleted model '{}'.".format(model_id) in out
| [
"[email protected]"
]
| |
f183c720412c131b71409791d712d87142101b8b | 6e8d58340f2be5f00d55e2629052c0bbc9dcf390 | /eggs/SQLAlchemy-0.5.6_dev_r6498-py2.6.egg/sqlalchemy/databases/mysql.py | ba6b026ea29aac857be41bbe8563e904dfc2ff43 | [
"CC-BY-2.5",
"MIT"
]
| permissive | JCVI-Cloud/galaxy-tools-prok | e57389750d33ac766e1658838cdb0aaf9a59c106 | 3c44ecaf4b2e1f2d7269eabef19cbd2e88b3a99c | refs/heads/master | 2021-05-02T06:23:05.414371 | 2014-03-21T18:12:43 | 2014-03-21T18:12:43 | 6,092,693 | 0 | 2 | NOASSERTION | 2020-07-25T20:38:17 | 2012-10-05T15:57:38 | Python | UTF-8 | Python | false | false | 97,080 | py | # -*- fill-column: 78 -*-
# mysql.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Michael Bayer [email protected]
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the MySQL database.
Overview
--------
For normal SQLAlchemy usage, importing this module is unnecessary. It will be
loaded on-demand when a MySQL connection is needed. The generic column types
like :class:`~sqlalchemy.String` and :class:`~sqlalchemy.Integer` will
automatically be adapted to the optimal matching MySQL column type.
But if you would like to use one of the MySQL-specific or enhanced column
types when creating tables with your :class:`~sqlalchemy.Table` definitions,
then you will need to import them from this module::
from sqlalchemy.databases import mysql
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('ittybittyblob', mysql.MSTinyBlob),
Column('biggy', mysql.MSBigInteger(unsigned=True)))
All standard MySQL column types are supported. The OpenGIS types are
available for use via table reflection but have no special support or mapping
to Python classes. If you're using these types and have opinions about how
OpenGIS can be smartly integrated into SQLAlchemy please join the mailing
list!
Supported Versions and Features
-------------------------------
SQLAlchemy supports 6 major MySQL versions: 3.23, 4.0, 4.1, 5.0, 5.1 and 6.0,
with capabilities increasing with more modern servers.
Versions 4.1 and higher support the basic SQL functionality that SQLAlchemy
uses in the ORM and SQL expressions. These versions pass the applicable tests
in the suite 100%. No heroic measures are taken to work around major missing
SQL features- if your server version does not support sub-selects, for
example, they won't work in SQLAlchemy either.
Currently, the only DB-API driver supported is `MySQL-Python` (also referred to
as `MySQLdb`). Either 1.2.1 or 1.2.2 are recommended. The alpha, beta and
gamma releases of 1.2.1 and 1.2.2 should be avoided. Support for Jython and
IronPython is planned.
===================================== ===============
Feature Minimum Version
===================================== ===============
sqlalchemy.orm 4.1.1
Table Reflection 3.23.x
DDL Generation 4.1.1
utf8/Full Unicode Connections 4.1.1
Transactions 3.23.15
Two-Phase Transactions 5.0.3
Nested Transactions 5.0.3
===================================== ===============
See the official MySQL documentation for detailed information about features
supported in any given server release.
Character Sets
--------------
Many MySQL server installations default to a ``latin1`` encoding for client
connections. All data sent through the connection will be converted into
``latin1``, even if you have ``utf8`` or another character set on your tables
and columns. With versions 4.1 and higher, you can change the connection
character set either through server configuration or by including the
``charset`` parameter in the URL used for ``create_engine``. The ``charset``
option is passed through to MySQL-Python and has the side-effect of also
enabling ``use_unicode`` in the driver by default. For regular encoded
strings, also pass ``use_unicode=0`` in the connection arguments::
# set client encoding to utf8; all strings come back as unicode
create_engine('mysql:///mydb?charset=utf8')
# set client encoding to utf8; all strings come back as utf8 str
create_engine('mysql:///mydb?charset=utf8&use_unicode=0')
Storage Engines
---------------
Most MySQL server installations have a default table type of ``MyISAM``, a
non-transactional table type. During a transaction, non-transactional storage
engines do not participate and continue to store table changes in autocommit
mode. For fully atomic transactions, all participating tables must use a
transactional engine such as ``InnoDB``, ``Falcon``, ``SolidDB``, `PBXT`, etc.
Storage engines can be elected when creating tables in SQLAlchemy by supplying
a ``mysql_engine='whatever'`` to the ``Table`` constructor. Any MySQL table
creation option can be specified in this syntax::
Table('mytable', metadata,
Column('data', String(32)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
Keys
----
Not all MySQL storage engines support foreign keys. For ``MyISAM`` and
similar engines, the information loaded by table reflection will not include
foreign keys. For these tables, you may supply a
:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
Table('mytable', metadata,
ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
autoload=True
)
When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT``` on
an integer primary key column::
>>> t = Table('mytable', metadata,
... Column('mytable_id', Integer, primary_key=True)
... )
>>> t.create()
CREATE TABLE mytable (
id INTEGER NOT NULL AUTO_INCREMENT,
PRIMARY KEY (id)
)
You can disable this behavior by supplying ``autoincrement=False`` to the
:class:`~sqlalchemy.Column`. This flag can also be used to enable
auto-increment on a secondary column in a multi-column key for some storage
engines::
Table('mytable', metadata,
Column('gid', Integer, primary_key=True, autoincrement=False),
Column('id', Integer, primary_key=True)
)
SQL Mode
--------
MySQL SQL modes are supported. Modes that enable ``ANSI_QUOTES`` (such as
``ANSI``) require an engine option to modify SQLAlchemy's quoting style.
When using an ANSI-quoting mode, supply ``use_ansiquotes=True`` when
creating your ``Engine``::
create_engine('mysql://localhost/test', use_ansiquotes=True)
This is an engine-wide option and is not toggleable on a per-connection basis.
SQLAlchemy does not presume to ``SET sql_mode`` for you with this option. For
the best performance, set the quoting style server-wide in ``my.cnf`` or by
supplying ``--sql-mode`` to ``mysqld``. You can also use a
:class:`sqlalchemy.pool.Pool` listener hook to issue a ``SET SESSION
sql_mode='...'`` on connect to configure each connection.
If you do not specify ``use_ansiquotes``, the regular MySQL quoting style is
used by default.
If you do issue a ``SET sql_mode`` through SQLAlchemy, the dialect must be
updated if the quoting style is changed. Again, this change will affect all
connections::
connection.execute('SET sql_mode="ansi"')
connection.dialect.use_ansiquotes = True
MySQL SQL Extensions
--------------------
Many of the MySQL SQL extensions are handled through SQLAlchemy's generic
function and operator support::
table.select(table.c.password==func.md5('plaintext'))
table.select(table.c.username.op('regexp')('^[a-d]'))
And of course any valid MySQL statement can be executed as a string as well.
Some limited direct support for MySQL extensions to SQL is currently
available.
* SELECT pragma::
select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
* UPDATE with LIMIT::
update(..., mysql_limit=10)
Troubleshooting
---------------
If you have problems that seem server related, first check that you are
using the most recent stable MySQL-Python package available. The Database
Notes page on the wiki at http://www.sqlalchemy.org is a good resource for
timely information affecting MySQL in SQLAlchemy.
"""
import datetime, decimal, inspect, re, sys
from array import array as _array
from sqlalchemy import exc, log, schema, sql, util
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy.sql import functions as sql_functions
from sqlalchemy.sql import compiler
from sqlalchemy.engine import base as engine_base, default
from sqlalchemy import types as sqltypes
__all__ = (
'MSBigInteger', 'MSMediumInteger', 'MSBinary', 'MSBit', 'MSBlob', 'MSBoolean',
'MSChar', 'MSDate', 'MSDateTime', 'MSDecimal', 'MSDouble',
'MSEnum', 'MSFloat', 'MSInteger', 'MSLongBlob', 'MSLongText',
'MSMediumBlob', 'MSMediumText', 'MSNChar', 'MSNVarChar',
'MSNumeric', 'MSSet', 'MSSmallInteger', 'MSString', 'MSText',
'MSTime', 'MSTimeStamp', 'MSTinyBlob', 'MSTinyInteger',
'MSTinyText', 'MSVarBinary', 'MSYear' )
RESERVED_WORDS = set(
['accessible', 'add', 'all', 'alter', 'analyze','and', 'as', 'asc',
'asensitive', 'before', 'between', 'bigint', 'binary', 'blob', 'both',
'by', 'call', 'cascade', 'case', 'change', 'char', 'character', 'check',
'collate', 'column', 'condition', 'constraint', 'continue', 'convert',
'create', 'cross', 'current_date', 'current_time', 'current_timestamp',
'current_user', 'cursor', 'database', 'databases', 'day_hour',
'day_microsecond', 'day_minute', 'day_second', 'dec', 'decimal',
'declare', 'default', 'delayed', 'delete', 'desc', 'describe',
'deterministic', 'distinct', 'distinctrow', 'div', 'double', 'drop',
'dual', 'each', 'else', 'elseif', 'enclosed', 'escaped', 'exists',
'exit', 'explain', 'false', 'fetch', 'float', 'float4', 'float8',
'for', 'force', 'foreign', 'from', 'fulltext', 'grant', 'group', 'having',
'high_priority', 'hour_microsecond', 'hour_minute', 'hour_second', 'if',
'ignore', 'in', 'index', 'infile', 'inner', 'inout', 'insensitive',
'insert', 'int', 'int1', 'int2', 'int3', 'int4', 'int8', 'integer',
'interval', 'into', 'is', 'iterate', 'join', 'key', 'keys', 'kill',
'leading', 'leave', 'left', 'like', 'limit', 'linear', 'lines', 'load',
'localtime', 'localtimestamp', 'lock', 'long', 'longblob', 'longtext',
'loop', 'low_priority', 'master_ssl_verify_server_cert', 'match',
'mediumblob', 'mediumint', 'mediumtext', 'middleint',
'minute_microsecond', 'minute_second', 'mod', 'modifies', 'natural',
'not', 'no_write_to_binlog', 'null', 'numeric', 'on', 'optimize',
'option', 'optionally', 'or', 'order', 'out', 'outer', 'outfile',
'precision', 'primary', 'procedure', 'purge', 'range', 'read', 'reads',
'read_only', 'read_write', 'real', 'references', 'regexp', 'release',
'rename', 'repeat', 'replace', 'require', 'restrict', 'return',
'revoke', 'right', 'rlike', 'schema', 'schemas', 'second_microsecond',
'select', 'sensitive', 'separator', 'set', 'show', 'smallint', 'spatial',
'specific', 'sql', 'sqlexception', 'sqlstate', 'sqlwarning',
'sql_big_result', 'sql_calc_found_rows', 'sql_small_result', 'ssl',
'starting', 'straight_join', 'table', 'terminated', 'then', 'tinyblob',
'tinyint', 'tinytext', 'to', 'trailing', 'trigger', 'true', 'undo',
'union', 'unique', 'unlock', 'unsigned', 'update', 'usage', 'use',
'using', 'utc_date', 'utc_time', 'utc_timestamp', 'values', 'varbinary',
'varchar', 'varcharacter', 'varying', 'when', 'where', 'while', 'with',
'write', 'x509', 'xor', 'year_month', 'zerofill', # 5.0
'columns', 'fields', 'privileges', 'soname', 'tables', # 4.1
'accessible', 'linear', 'master_ssl_verify_server_cert', 'range',
'read_only', 'read_write', # 5.1
])
AUTOCOMMIT_RE = re.compile(
r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|LOAD +DATA|REPLACE)',
re.I | re.UNICODE)
SET_RE = re.compile(
r'\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w',
re.I | re.UNICODE)
class _NumericType(object):
"""Base for MySQL numeric types."""
def __init__(self, kw):
self.unsigned = kw.pop('unsigned', False)
self.zerofill = kw.pop('zerofill', False)
def _extend(self, spec):
"Extend a numeric-type declaration with MySQL specific extensions."
if self.unsigned:
spec += ' UNSIGNED'
if self.zerofill:
spec += ' ZEROFILL'
return spec
class _StringType(object):
"""Base for MySQL string types."""
def __init__(self, charset=None, collation=None,
ascii=False, unicode=False, binary=False,
national=False, **kwargs):
self.charset = charset
# allow collate= or collation=
self.collation = kwargs.get('collate', collation)
self.ascii = ascii
self.unicode = unicode
self.binary = binary
self.national = national
def _extend(self, spec):
"""Extend a string-type declaration with standard SQL CHARACTER SET /
COLLATE annotations and MySQL specific extensions.
"""
if self.charset:
charset = 'CHARACTER SET %s' % self.charset
elif self.ascii:
charset = 'ASCII'
elif self.unicode:
charset = 'UNICODE'
else:
charset = None
if self.collation:
collation = 'COLLATE %s' % self.collation
elif self.binary:
collation = 'BINARY'
else:
collation = None
if self.national:
# NATIONAL (aka NCHAR/NVARCHAR) trumps charsets.
return ' '.join([c for c in ('NATIONAL', spec, collation)
if c is not None])
return ' '.join([c for c in (spec, charset, collation)
if c is not None])
def __repr__(self):
attributes = inspect.getargspec(self.__init__)[0][1:]
attributes.extend(inspect.getargspec(_StringType.__init__)[0][1:])
params = {}
for attr in attributes:
val = getattr(self, attr)
if val is not None and val is not False:
params[attr] = val
return "%s(%s)" % (self.__class__.__name__,
', '.join(['%s=%r' % (k, params[k]) for k in params]))
class MSNumeric(sqltypes.Numeric, _NumericType):
"""MySQL NUMERIC type."""
def __init__(self, precision=10, scale=2, asdecimal=True, **kw):
"""Construct a NUMERIC.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
_NumericType.__init__(self, kw)
sqltypes.Numeric.__init__(self, precision, scale, asdecimal=asdecimal, **kw)
def get_col_spec(self):
if self.precision is None:
return self._extend("NUMERIC")
else:
return self._extend("NUMERIC(%(precision)s, %(scale)s)" % {'precision': self.precision, 'scale' : self.scale})
def bind_processor(self, dialect):
return None
def result_processor(self, dialect):
if not self.asdecimal:
def process(value):
if isinstance(value, decimal.Decimal):
return float(value)
else:
return value
return process
else:
return None
class MSDecimal(MSNumeric):
"""MySQL DECIMAL type."""
def __init__(self, precision=10, scale=2, asdecimal=True, **kw):
"""Construct a DECIMAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MSDecimal, self).__init__(precision, scale, asdecimal=asdecimal, **kw)
def get_col_spec(self):
if self.precision is None:
return self._extend("DECIMAL")
elif self.scale is None:
return self._extend("DECIMAL(%(precision)s)" % {'precision': self.precision})
else:
return self._extend("DECIMAL(%(precision)s, %(scale)s)" % {'precision': self.precision, 'scale' : self.scale})
class MSDouble(sqltypes.Float, _NumericType):
"""MySQL DOUBLE type."""
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DOUBLE.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
if ((precision is None and scale is not None) or
(precision is not None and scale is None)):
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether.")
_NumericType.__init__(self, kw)
sqltypes.Float.__init__(self, asdecimal=asdecimal, **kw)
self.scale = scale
self.precision = precision
def get_col_spec(self):
if self.precision is not None and self.scale is not None:
return self._extend("DOUBLE(%(precision)s, %(scale)s)" %
{'precision': self.precision,
'scale' : self.scale})
else:
return self._extend('DOUBLE')
class MSReal(MSDouble):
"""MySQL REAL type."""
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a REAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
MSDouble.__init__(self, precision, scale, asdecimal, **kw)
def get_col_spec(self):
if self.precision is not None and self.scale is not None:
return self._extend("REAL(%(precision)s, %(scale)s)" %
{'precision': self.precision,
'scale' : self.scale})
else:
return self._extend('REAL')
class MSFloat(sqltypes.Float, _NumericType):
"""MySQL FLOAT type."""
def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
"""Construct a FLOAT.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
_NumericType.__init__(self, kw)
sqltypes.Float.__init__(self, asdecimal=asdecimal, **kw)
self.scale = scale
self.precision = precision
def get_col_spec(self):
if self.scale is not None and self.precision is not None:
return self._extend("FLOAT(%s, %s)" % (self.precision, self.scale))
elif self.precision is not None:
return self._extend("FLOAT(%s)" % (self.precision,))
else:
return self._extend("FLOAT")
def bind_processor(self, dialect):
return None
class MSInteger(sqltypes.Integer, _NumericType):
"""MySQL INTEGER type."""
def __init__(self, display_width=None, **kw):
"""Construct an INTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
if 'length' in kw:
util.warn_deprecated("'length' is deprecated for MSInteger and subclasses. Use 'display_width'.")
self.display_width = kw.pop('length')
else:
self.display_width = display_width
_NumericType.__init__(self, kw)
sqltypes.Integer.__init__(self, **kw)
def get_col_spec(self):
if self.display_width is not None:
return self._extend("INTEGER(%(display_width)s)" % {'display_width': self.display_width})
else:
return self._extend("INTEGER")
class MSBigInteger(MSInteger):
"""MySQL BIGINTEGER type."""
def __init__(self, display_width=None, **kw):
"""Construct a BIGINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MSBigInteger, self).__init__(display_width, **kw)
def get_col_spec(self):
if self.display_width is not None:
return self._extend("BIGINT(%(display_width)s)" % {'display_width': self.display_width})
else:
return self._extend("BIGINT")
class MSMediumInteger(MSInteger):
"""MySQL MEDIUMINTEGER type."""
def __init__(self, display_width=None, **kw):
"""Construct a MEDIUMINTEGER
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MSMediumInteger, self).__init__(display_width, **kw)
def get_col_spec(self):
if self.display_width is not None:
return self._extend("MEDIUMINT(%(display_width)s)" % {'display_width': self.display_width})
else:
return self._extend("MEDIUMINT")
class MSTinyInteger(MSInteger):
"""MySQL TINYINT type."""
def __init__(self, display_width=None, **kw):
"""Construct a TINYINT.
Note: following the usual MySQL conventions, TINYINT(1) columns
reflected during Table(..., autoload=True) are treated as
Boolean columns.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MSTinyInteger, self).__init__(display_width, **kw)
def get_col_spec(self):
if self.display_width is not None:
return self._extend("TINYINT(%s)" % self.display_width)
else:
return self._extend("TINYINT")
class MSSmallInteger(sqltypes.Smallinteger, MSInteger):
"""MySQL SMALLINTEGER type."""
def __init__(self, display_width=None, **kw):
"""Construct a SMALLINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
self.display_width = display_width
_NumericType.__init__(self, kw)
sqltypes.SmallInteger.__init__(self, **kw)
def get_col_spec(self):
if self.display_width is not None:
return self._extend("SMALLINT(%(display_width)s)" % {'display_width': self.display_width})
else:
return self._extend("SMALLINT")
class MSBit(sqltypes.TypeEngine):
"""MySQL BIT type.
This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater for
MyISAM, MEMORY, InnoDB and BDB. For older versions, use a MSTinyInteger()
type.
"""
def __init__(self, length=None):
"""Construct a BIT.
:param length: Optional, number of bits.
"""
self.length = length
def result_processor(self, dialect):
"""Convert a MySQL's 64 bit, variable length binary string to a long."""
def process(value):
if value is not None:
v = 0L
for i in map(ord, value):
v = v << 8 | i
value = v
return value
return process
def get_col_spec(self):
if self.length is not None:
return "BIT(%s)" % self.length
else:
return "BIT"
class MSDateTime(sqltypes.DateTime):
"""MySQL DATETIME type."""
def get_col_spec(self):
return "DATETIME"
class MSDate(sqltypes.Date):
"""MySQL DATE type."""
def get_col_spec(self):
return "DATE"
class MSTime(sqltypes.Time):
"""MySQL TIME type."""
def get_col_spec(self):
return "TIME"
def result_processor(self, dialect):
def process(value):
# convert from a timedelta value
if value is not None:
return datetime.time(value.seconds/60/60, value.seconds/60%60, value.seconds - (value.seconds/60*60))
else:
return None
return process
class MSTimeStamp(sqltypes.TIMESTAMP):
"""MySQL TIMESTAMP type.
To signal the orm to automatically re-select modified rows to retrieve the
updated timestamp, add a ``server_default`` to your
:class:`~sqlalchemy.Column` specification::
from sqlalchemy.databases import mysql
Column('updated', mysql.MSTimeStamp,
server_default=sql.text('CURRENT_TIMESTAMP')
)
The full range of MySQL 4.1+ TIMESTAMP defaults can be specified in
the the default::
server_default=sql.text('CURRENT TIMESTAMP ON UPDATE CURRENT_TIMESTAMP')
"""
def get_col_spec(self):
return "TIMESTAMP"
class MSYear(sqltypes.TypeEngine):
"""MySQL YEAR type, for single byte storage of years 1901-2155."""
def __init__(self, display_width=None):
self.display_width = display_width
def get_col_spec(self):
if self.display_width is None:
return "YEAR"
else:
return "YEAR(%s)" % self.display_width
class MSText(_StringType, sqltypes.Text):
"""MySQL TEXT type, for text up to 2^16 characters."""
def __init__(self, length=None, **kwargs):
"""Construct a TEXT.
:param length: Optional, if provided the server may optimize storage
by substituting the smallest TEXT type sufficient to store
``length`` characters.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
_StringType.__init__(self, **kwargs)
sqltypes.Text.__init__(self, length,
kwargs.get('convert_unicode', False), kwargs.get('assert_unicode', None))
def get_col_spec(self):
if self.length:
return self._extend("TEXT(%d)" % self.length)
else:
return self._extend("TEXT")
class MSTinyText(MSText):
"""MySQL TINYTEXT type, for text up to 2^8 characters."""
def __init__(self, **kwargs):
"""Construct a TINYTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(MSTinyText, self).__init__(**kwargs)
def get_col_spec(self):
return self._extend("TINYTEXT")
class MSMediumText(MSText):
"""MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
def __init__(self, **kwargs):
"""Construct a MEDIUMTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(MSMediumText, self).__init__(**kwargs)
def get_col_spec(self):
return self._extend("MEDIUMTEXT")
class MSLongText(MSText):
"""MySQL LONGTEXT type, for text up to 2^32 characters."""
def __init__(self, **kwargs):
"""Construct a LONGTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(MSLongText, self).__init__(**kwargs)
def get_col_spec(self):
return self._extend("LONGTEXT")
class MSString(_StringType, sqltypes.String):
"""MySQL VARCHAR type, for variable-length character data."""
def __init__(self, length=None, **kwargs):
"""Construct a VARCHAR.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
_StringType.__init__(self, **kwargs)
sqltypes.String.__init__(self, length,
kwargs.get('convert_unicode', False), kwargs.get('assert_unicode', None))
def get_col_spec(self):
if self.length:
return self._extend("VARCHAR(%d)" % self.length)
else:
return self._extend("VARCHAR")
class MSChar(_StringType, sqltypes.CHAR):
"""MySQL CHAR type, for fixed-length character data."""
def __init__(self, length, **kwargs):
"""Construct an NCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
_StringType.__init__(self, **kwargs)
sqltypes.CHAR.__init__(self, length,
kwargs.get('convert_unicode', False))
def get_col_spec(self):
return self._extend("CHAR(%(length)s)" % {'length' : self.length})
class MSNVarChar(_StringType, sqltypes.String):
"""MySQL NVARCHAR type.
For variable-length character data in the server's configured national
character set.
"""
def __init__(self, length=None, **kwargs):
"""Construct an NVARCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
_StringType.__init__(self, **kwargs)
sqltypes.String.__init__(self, length,
kwargs.get('convert_unicode', False))
def get_col_spec(self):
# We'll actually generate the equiv. "NATIONAL VARCHAR" instead
# of "NVARCHAR".
return self._extend("VARCHAR(%(length)s)" % {'length': self.length})
class MSNChar(_StringType, sqltypes.CHAR):
"""MySQL NCHAR type.
For fixed-length character data in the server's configured national
character set.
"""
def __init__(self, length=None, **kwargs):
"""Construct an NCHAR. Arguments are:
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
_StringType.__init__(self, **kwargs)
sqltypes.CHAR.__init__(self, length,
kwargs.get('convert_unicode', False))
def get_col_spec(self):
# We'll actually generate the equiv. "NATIONAL CHAR" instead of "NCHAR".
return self._extend("CHAR(%(length)s)" % {'length': self.length})
class _BinaryType(sqltypes.Binary):
"""Base for MySQL binary types."""
def get_col_spec(self):
if self.length:
return "BLOB(%d)" % self.length
else:
return "BLOB"
def result_processor(self, dialect):
def process(value):
if value is None:
return None
else:
return util.buffer(value)
return process
class MSVarBinary(_BinaryType):
"""MySQL VARBINARY type, for variable length binary data."""
def __init__(self, length=None, **kw):
"""Construct a VARBINARY. Arguments are:
:param length: Maximum data length, in characters.
"""
super(MSVarBinary, self).__init__(length, **kw)
def get_col_spec(self):
if self.length:
return "VARBINARY(%d)" % self.length
else:
return "BLOB"
class MSBinary(_BinaryType):
"""MySQL BINARY type, for fixed length binary data"""
def __init__(self, length=None, **kw):
"""Construct a BINARY.
This is a fixed length type, and short values will be right-padded
with a server-version-specific pad value.
:param length: Maximum data length, in bytes. If length is not
specified, this will generate a BLOB. This usage is deprecated.
"""
super(MSBinary, self).__init__(length, **kw)
def get_col_spec(self):
if self.length:
return "BINARY(%d)" % self.length
else:
return "BLOB"
def result_processor(self, dialect):
def process(value):
if value is None:
return None
else:
return util.buffer(value)
return process
class MSBlob(_BinaryType):
"""MySQL BLOB type, for binary data up to 2^16 bytes"""
def __init__(self, length=None, **kw):
"""Construct a BLOB. Arguments are:
:param length: Optional, if provided the server may optimize storage
by substituting the smallest TEXT type sufficient to store
``length`` characters.
"""
super(MSBlob, self).__init__(length, **kw)
def get_col_spec(self):
if self.length:
return "BLOB(%d)" % self.length
else:
return "BLOB"
def result_processor(self, dialect):
def process(value):
if value is None:
return None
else:
return util.buffer(value)
return process
def __repr__(self):
return "%s()" % self.__class__.__name__
class MSTinyBlob(MSBlob):
"""MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
def get_col_spec(self):
return "TINYBLOB"
class MSMediumBlob(MSBlob):
"""MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
def get_col_spec(self):
return "MEDIUMBLOB"
class MSLongBlob(MSBlob):
"""MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
def get_col_spec(self):
return "LONGBLOB"
class MSEnum(MSString):
"""MySQL ENUM type."""
def __init__(self, *enums, **kw):
"""Construct an ENUM.
Example:
Column('myenum', MSEnum("foo", "bar", "baz"))
Arguments are:
:param enums: The range of valid values for this ENUM. Values will be
quoted when generating the schema according to the quoting flag (see
below).
:param strict: Defaults to False: ensure that a given value is in this
ENUM's range of permissible values when inserting or updating rows.
Note that MySQL will not raise a fatal error if you attempt to store
an out of range value- an alternate value will be stored instead.
(See MySQL ENUM documentation.)
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
:param quoting: Defaults to 'auto': automatically determine enum value
quoting. If all enum values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
"""
self.quoting = kw.pop('quoting', 'auto')
if self.quoting == 'auto':
# What quoting character are we using?
q = None
for e in enums:
if len(e) == 0:
self.quoting = 'unquoted'
break
elif q is None:
q = e[0]
if e[0] != q or e[-1] != q:
self.quoting = 'unquoted'
break
else:
self.quoting = 'quoted'
if self.quoting == 'quoted':
util.warn_pending_deprecation(
'Manually quoting ENUM value literals is deprecated. Supply '
'unquoted values and use the quoting= option in cases of '
'ambiguity.')
strip_enums = []
for a in enums:
if a[0:1] == '"' or a[0:1] == "'":
# strip enclosing quotes and unquote interior
a = a[1:-1].replace(a[0] * 2, a[0])
strip_enums.append(a)
self.enums = strip_enums
else:
self.enums = list(enums)
self.strict = kw.pop('strict', False)
length = max([len(v) for v in self.enums] + [0])
super(MSEnum, self).__init__(length, **kw)
def bind_processor(self, dialect):
super_convert = super(MSEnum, self).bind_processor(dialect)
def process(value):
if self.strict and value is not None and value not in self.enums:
raise exc.InvalidRequestError('"%s" not a valid value for '
'this enum' % value)
if super_convert:
return super_convert(value)
else:
return value
return process
def get_col_spec(self):
quoted_enums = []
for e in self.enums:
quoted_enums.append("'%s'" % e.replace("'", "''"))
return self._extend("ENUM(%s)" % ",".join(quoted_enums))
class MSSet(MSString):
"""MySQL SET type."""
def __init__(self, *values, **kw):
"""Construct a SET.
Example::
Column('myset', MSSet("'foo'", "'bar'", "'baz'"))
Arguments are:
:param values: The range of valid values for this SET. Values will be
used exactly as they appear when generating schemas. Strings must
be quoted, as in the example above. Single-quotes are suggested for
ANSI compatibility and are required for portability to servers with
ANSI_QUOTES enabled.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
self.__ddl_values = values
strip_values = []
for a in values:
if a[0:1] == '"' or a[0:1] == "'":
# strip enclosing quotes and unquote interior
a = a[1:-1].replace(a[0] * 2, a[0])
strip_values.append(a)
self.values = strip_values
length = max([len(v) for v in strip_values] + [0])
super(MSSet, self).__init__(length, **kw)
def result_processor(self, dialect):
def process(value):
# The good news:
# No ',' quoting issues- commas aren't allowed in SET values
# The bad news:
# Plenty of driver inconsistencies here.
if isinstance(value, util.set_types):
# ..some versions convert '' to an empty set
if not value:
value.add('')
# ..some return sets.Set, even for pythons that have __builtin__.set
if not isinstance(value, set):
value = set(value)
return value
# ...and some versions return strings
if value is not None:
return set(value.split(','))
else:
return value
return process
def bind_processor(self, dialect):
super_convert = super(MSSet, self).bind_processor(dialect)
def process(value):
if value is None or isinstance(value, (int, long, basestring)):
pass
else:
if None in value:
value = set(value)
value.remove(None)
value.add('')
value = ','.join(value)
if super_convert:
return super_convert(value)
else:
return value
return process
def get_col_spec(self):
return self._extend("SET(%s)" % ",".join(self.__ddl_values))
class MSBoolean(sqltypes.Boolean):
"""MySQL BOOLEAN type."""
def get_col_spec(self):
return "BOOL"
def result_processor(self, dialect):
def process(value):
if value is None:
return None
return value and True or False
return process
def bind_processor(self, dialect):
def process(value):
if value is True:
return 1
elif value is False:
return 0
elif value is None:
return None
else:
return value and True or False
return process
colspecs = {
sqltypes.Integer: MSInteger,
sqltypes.Smallinteger: MSSmallInteger,
sqltypes.Numeric: MSNumeric,
sqltypes.Float: MSFloat,
sqltypes.DateTime: MSDateTime,
sqltypes.Date: MSDate,
sqltypes.Time: MSTime,
sqltypes.String: MSString,
sqltypes.Binary: MSBlob,
sqltypes.Boolean: MSBoolean,
sqltypes.Text: MSText,
sqltypes.CHAR: MSChar,
sqltypes.NCHAR: MSNChar,
sqltypes.TIMESTAMP: MSTimeStamp,
sqltypes.BLOB: MSBlob,
MSDouble: MSDouble,
MSReal: MSReal,
_BinaryType: _BinaryType,
}
# Everything 3.23 through 5.1 excepting OpenGIS types.
ischema_names = {
'bigint': MSBigInteger,
'binary': MSBinary,
'bit': MSBit,
'blob': MSBlob,
'boolean':MSBoolean,
'char': MSChar,
'date': MSDate,
'datetime': MSDateTime,
'decimal': MSDecimal,
'double': MSDouble,
'enum': MSEnum,
'fixed': MSDecimal,
'float': MSFloat,
'int': MSInteger,
'integer': MSInteger,
'longblob': MSLongBlob,
'longtext': MSLongText,
'mediumblob': MSMediumBlob,
'mediumint': MSMediumInteger,
'mediumtext': MSMediumText,
'nchar': MSNChar,
'nvarchar': MSNVarChar,
'numeric': MSNumeric,
'set': MSSet,
'smallint': MSSmallInteger,
'text': MSText,
'time': MSTime,
'timestamp': MSTimeStamp,
'tinyblob': MSTinyBlob,
'tinyint': MSTinyInteger,
'tinytext': MSTinyText,
'varbinary': MSVarBinary,
'varchar': MSString,
'year': MSYear,
}
class MySQLExecutionContext(default.DefaultExecutionContext):
def post_exec(self):
if self.compiled.isinsert and not self.executemany:
if (not len(self._last_inserted_ids) or
self._last_inserted_ids[0] is None):
self._last_inserted_ids = ([self.cursor.lastrowid] +
self._last_inserted_ids[1:])
elif (not self.isupdate and not self.should_autocommit and
self.statement and SET_RE.match(self.statement)):
# This misses if a user forces autocommit on text('SET NAMES'),
# which is probably a programming error anyhow.
self.connection.info.pop(('mysql', 'charset'), None)
def should_autocommit_text(self, statement):
return AUTOCOMMIT_RE.match(statement)
class MySQLDialect(default.DefaultDialect):
"""Details of the MySQL dialect. Not used directly in application code."""
name = 'mysql'
supports_alter = True
supports_unicode_statements = False
# identifiers are 64, however aliases can be 255...
max_identifier_length = 255
supports_sane_rowcount = True
default_paramstyle = 'format'
def __init__(self, use_ansiquotes=None, **kwargs):
self.use_ansiquotes = use_ansiquotes
default.DefaultDialect.__init__(self, **kwargs)
def dbapi(cls):
import MySQLdb as mysql
return mysql
dbapi = classmethod(dbapi)
def create_connect_args(self, url):
opts = url.translate_connect_args(database='db', username='user',
password='passwd')
opts.update(url.query)
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'connect_timeout', int)
util.coerce_kw_type(opts, 'client_flag', int)
util.coerce_kw_type(opts, 'local_infile', int)
# Note: using either of the below will cause all strings to be returned
# as Unicode, both in raw SQL operations and with column types like
# String and MSString.
util.coerce_kw_type(opts, 'use_unicode', bool)
util.coerce_kw_type(opts, 'charset', str)
# Rich values 'cursorclass' and 'conv' are not supported via
# query string.
ssl = {}
for key in ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher']:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts['ssl'] = ssl
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
client_flag = opts.get('client_flag', 0)
if self.dbapi is not None:
try:
import MySQLdb.constants.CLIENT as CLIENT_FLAGS
client_flag |= CLIENT_FLAGS.FOUND_ROWS
except:
pass
opts['client_flag'] = client_flag
return [[], opts]
def type_descriptor(self, typeobj):
return sqltypes.adapt_type(typeobj, colspecs)
def do_executemany(self, cursor, statement, parameters, context=None):
rowcount = cursor.executemany(statement, parameters)
if context is not None:
context._rowcount = rowcount
def supports_unicode_statements(self):
return True
def do_commit(self, connection):
"""Execute a COMMIT."""
# COMMIT/ROLLBACK were introduced in 3.23.15.
# Yes, we have at least one user who has to talk to these old versions!
#
# Ignore commit/rollback if support isn't present, otherwise even basic
# operations via autocommit fail.
try:
connection.commit()
except:
if self._server_version_info(connection) < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_rollback(self, connection):
"""Execute a ROLLBACK."""
try:
connection.rollback()
except:
if self._server_version_info(connection) < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_begin_twophase(self, connection, xid):
connection.execute("XA BEGIN %s", xid)
def do_prepare_twophase(self, connection, xid):
connection.execute("XA END %s", xid)
connection.execute("XA PREPARE %s", xid)
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
connection.execute("XA END %s", xid)
connection.execute("XA ROLLBACK %s", xid)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
connection.execute("XA COMMIT %s", xid)
def do_recover_twophase(self, connection):
resultset = connection.execute("XA RECOVER")
return [row['data'][0:row['gtrid_length']] for row in resultset]
def do_ping(self, connection):
connection.ping()
def is_disconnect(self, e):
if isinstance(e, self.dbapi.OperationalError):
return e.args[0] in (2006, 2013, 2014, 2045, 2055)
elif isinstance(e, self.dbapi.InterfaceError): # if underlying connection is closed, this is the error you get
return "(0, '')" in str(e)
else:
return False
def get_default_schema_name(self, connection):
return connection.execute('SELECT DATABASE()').scalar()
get_default_schema_name = engine_base.connection_memoize(
('dialect', 'default_schema_name'))(get_default_schema_name)
def table_names(self, connection, schema):
"""Return a Unicode SHOW TABLES from a given schema."""
charset = self._detect_charset(connection)
self._autoset_identifier_style(connection)
rp = connection.execute("SHOW TABLES FROM %s" %
self.identifier_preparer.quote_identifier(schema))
return [row[0] for row in _compat_fetchall(rp, charset=charset)]
def has_table(self, connection, table_name, schema=None):
# SHOW TABLE STATUS LIKE and SHOW TABLES LIKE do not function properly
# on macosx (and maybe win?) with multibyte table names.
#
# TODO: if this is not a problem on win, make the strategy swappable
# based on platform. DESCRIBE is slower.
# [ticket:726]
# full_name = self.identifier_preparer.format_table(table,
# use_schema=True)
self._autoset_identifier_style(connection)
full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
schema, table_name))
st = "DESCRIBE %s" % full_name
rs = None
try:
try:
rs = connection.execute(st)
have = rs.rowcount > 0
rs.close()
return have
except exc.SQLError, e:
if e.orig.args[0] == 1146:
return False
raise
finally:
if rs:
rs.close()
def server_version_info(self, connection):
"""A tuple of the database server version.
Formats the remote server version as a tuple of version values,
e.g. ``(5, 0, 44)``. If there are strings in the version number
they will be in the tuple too, so don't count on these all being
``int`` values.
This is a fast check that does not require a round trip. It is also
cached per-Connection.
"""
return self._server_version_info(connection.connection.connection)
server_version_info = engine_base.connection_memoize(
('mysql', 'server_version_info'))(server_version_info)
def _server_version_info(self, dbapi_con):
"""Convert a MySQL-python server_info string into a tuple."""
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.get_server_info()):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def reflecttable(self, connection, table, include_columns):
"""Load column definitions from the server."""
charset = self._detect_charset(connection)
self._autoset_identifier_style(connection)
try:
reflector = self.reflector
except AttributeError:
preparer = self.identifier_preparer
if (self.server_version_info(connection) < (4, 1) and
self.use_ansiquotes):
# ANSI_QUOTES doesn't affect SHOW CREATE TABLE on < 4.1
preparer = MySQLIdentifierPreparer(self)
self.reflector = reflector = MySQLSchemaReflector(preparer)
sql = self._show_create_table(connection, table, charset)
if sql.startswith('CREATE ALGORITHM'):
# Adapt views to something table-like.
columns = self._describe_table(connection, table, charset)
sql = reflector._describe_to_create(table, columns)
self._adjust_casing(connection, table)
return reflector.reflect(connection, table, sql, charset,
only=include_columns)
def _adjust_casing(self, connection, table, charset=None):
"""Adjust Table name to the server case sensitivity, if needed."""
casing = self._detect_casing(connection)
# For winxx database hosts. TODO: is this really needed?
if casing == 1 and table.name != table.name.lower():
table.name = table.name.lower()
lc_alias = schema._get_table_key(table.name, table.schema)
table.metadata.tables[lc_alias] = table
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Allow user override, won't sniff if force_charset is set.
if ('mysql', 'force_charset') in connection.info:
return connection.info[('mysql', 'force_charset')]
# Note: MySQL-python 1.2.1c7 seems to ignore changes made
# on a connection via set_character_set()
if self.server_version_info(connection) < (4, 1, 0):
try:
return connection.connection.character_set_name()
except AttributeError:
# < 1.2.1 final MySQL-python drivers have no charset support.
# a query is needed.
pass
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict([(row[0], row[1]) for row in _compat_fetchall(rs)])
if 'character_set_results' in opts:
return opts['character_set_results']
try:
return connection.connection.character_set_name()
except AttributeError:
# Still no charset on < 1.2.1 final...
if 'character_set' in opts:
return opts['character_set']
else:
util.warn(
"Could not detect the connection character set with this "
"combination of MySQL server and MySQL-python. "
"MySQL-python >= 1.2.2 is recommended. Assuming latin1.")
return 'latin1'
_detect_charset = engine_base.connection_memoize(
('mysql', 'charset'))(_detect_charset)
def _detect_casing(self, connection):
"""Sniff out identifier case sensitivity.
Cached per-connection. This value can not change without a server
restart.
"""
# http://dev.mysql.com/doc/refman/5.0/en/name-case-sensitivity.html
charset = self._detect_charset(connection)
row = _compat_fetchone(connection.execute(
"SHOW VARIABLES LIKE 'lower_case_table_names'"),
charset=charset)
if not row:
cs = 0
else:
# 4.0.15 returns OFF or ON according to [ticket:489]
# 3.23 doesn't, 4.0.27 doesn't..
if row[1] == 'OFF':
cs = 0
elif row[1] == 'ON':
cs = 1
else:
cs = int(row[1])
row.close()
return cs
_detect_casing = engine_base.connection_memoize(
('mysql', 'lower_case_table_names'))(_detect_casing)
def _detect_collations(self, connection):
"""Pull the active COLLATIONS list from the server.
Cached per-connection.
"""
collations = {}
if self.server_version_info(connection) < (4, 1, 0):
pass
else:
charset = self._detect_charset(connection)
rs = connection.execute('SHOW COLLATION')
for row in _compat_fetchall(rs, charset):
collations[row[0]] = row[1]
return collations
_detect_collations = engine_base.connection_memoize(
('mysql', 'collations'))(_detect_collations)
def use_ansiquotes(self, useansi):
self._use_ansiquotes = useansi
if useansi:
self.preparer = MySQLANSIIdentifierPreparer
else:
self.preparer = MySQLIdentifierPreparer
# icky
if hasattr(self, 'identifier_preparer'):
self.identifier_preparer = self.preparer(self)
if hasattr(self, 'reflector'):
del self.reflector
use_ansiquotes = property(lambda s: s._use_ansiquotes, use_ansiquotes,
doc="True if ANSI_QUOTES is in effect.")
def _autoset_identifier_style(self, connection, charset=None):
"""Detect and adjust for the ANSI_QUOTES sql mode.
If the dialect's use_ansiquotes is unset, query the server's sql mode
and reset the identifier style.
Note that this currently *only* runs during reflection. Ideally this
would run the first time a connection pool connects to the database,
but the infrastructure for that is not yet in place.
"""
if self.use_ansiquotes is not None:
return
row = _compat_fetchone(
connection.execute("SHOW VARIABLES LIKE 'sql_mode'"),
charset=charset)
if not row:
mode = ''
else:
mode = row[1] or ''
# 4.0
if mode.isdigit():
mode_no = int(mode)
mode = (mode_no | 4 == mode_no) and 'ANSI_QUOTES' or ''
self.use_ansiquotes = 'ANSI_QUOTES' in mode
def _show_create_table(self, connection, table, charset=None,
full_name=None):
"""Run SHOW CREATE TABLE for a ``Table``."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "SHOW CREATE TABLE %s" % full_name
rp = None
try:
try:
rp = connection.execute(st)
except exc.SQLError, e:
if e.orig.args[0] == 1146:
raise exc.NoSuchTableError(full_name)
else:
raise
row = _compat_fetchone(rp, charset=charset)
if not row:
raise exc.NoSuchTableError(full_name)
return row[1].strip()
finally:
if rp:
rp.close()
return sql
def _describe_table(self, connection, table, charset=None,
full_name=None):
"""Run DESCRIBE for a ``Table`` and return processed rows."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "DESCRIBE %s" % full_name
rp, rows = None, None
try:
try:
rp = connection.execute(st)
except exc.SQLError, e:
if e.orig.args[0] == 1146:
raise exc.NoSuchTableError(full_name)
else:
raise
rows = _compat_fetchall(rp, charset=charset)
finally:
if rp:
rp.close()
return rows
class _MySQLPythonRowProxy(object):
"""Return consistent column values for all versions of MySQL-python.
Smooth over data type issues (esp. with alpha driver versions) and
normalize strings as Unicode regardless of user-configured driver
encoding settings.
"""
# Some MySQL-python versions can return some columns as
# sets.Set(['value']) (seriously) but thankfully that doesn't
# seem to come up in DDL queries.
def __init__(self, rowproxy, charset):
self.rowproxy = rowproxy
self.charset = charset
def __getitem__(self, index):
item = self.rowproxy[index]
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, str):
return item.decode(self.charset)
else:
return item
def __getattr__(self, attr):
item = getattr(self.rowproxy, attr)
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, str):
return item.decode(self.charset)
else:
return item
class MySQLCompiler(compiler.DefaultCompiler):
operators = compiler.DefaultCompiler.operators.copy()
operators.update({
sql_operators.concat_op: lambda x, y: "concat(%s, %s)" % (x, y),
sql_operators.mod: '%%',
sql_operators.match_op: lambda x, y: "MATCH (%s) AGAINST (%s IN BOOLEAN MODE)" % (x, y)
})
functions = compiler.DefaultCompiler.functions.copy()
functions.update ({
sql_functions.random: 'rand%(expr)s',
"utc_timestamp":"UTC_TIMESTAMP"
})
extract_map = compiler.DefaultCompiler.extract_map.copy()
extract_map.update ({
'milliseconds': 'millisecond',
})
def visit_typeclause(self, typeclause):
type_ = typeclause.type.dialect_impl(self.dialect)
if isinstance(type_, MSInteger):
if getattr(type_, 'unsigned', False):
return 'UNSIGNED INTEGER'
else:
return 'SIGNED INTEGER'
elif isinstance(type_, (MSDecimal, MSDateTime, MSDate, MSTime)):
return type_.get_col_spec()
elif isinstance(type_, MSText):
return 'CHAR'
elif (isinstance(type_, _StringType) and not
isinstance(type_, (MSEnum, MSSet))):
if getattr(type_, 'length'):
return 'CHAR(%s)' % type_.length
else:
return 'CHAR'
elif isinstance(type_, _BinaryType):
return 'BINARY'
elif isinstance(type_, MSNumeric):
return type_.get_col_spec().replace('NUMERIC', 'DECIMAL')
elif isinstance(type_, MSTimeStamp):
return 'DATETIME'
elif isinstance(type_, (MSDateTime, MSDate, MSTime)):
return type_.get_col_spec()
else:
return None
def visit_cast(self, cast, **kwargs):
# No cast until 4, no decimals until 5.
type_ = self.process(cast.typeclause)
if type_ is None:
return self.process(cast.clause)
return 'CAST(%s AS %s)' % (self.process(cast.clause), type_)
def post_process_text(self, text):
if '%%' in text:
util.warn("The SQLAlchemy MySQLDB dialect now automatically escapes '%' in text() expressions to '%%'.")
return text.replace('%', '%%')
def get_select_precolumns(self, select):
if isinstance(select._distinct, basestring):
return select._distinct.upper() + " "
elif select._distinct:
return "DISTINCT "
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
# 'JOIN ... ON ...' for inner joins isn't available until 4.0.
# Apparently < 3.23.17 requires theta joins for inner joins
# (but not outer). Not generating these currently, but
# support can be added, preferably after dialects are
# refactored to be version-sensitive.
return ''.join(
(self.process(join.left, asfrom=True),
(join.isouter and " LEFT OUTER JOIN " or " INNER JOIN "),
self.process(join.right, asfrom=True),
" ON ",
self.process(join.onclause)))
def for_update_clause(self, select):
if select.for_update == 'read':
return ' LOCK IN SHARE MODE'
else:
return super(MySQLCompiler, self).for_update_clause(select)
def limit_clause(self, select):
# MySQL supports:
# LIMIT <limit>
# LIMIT <offset>, <limit>
# and in server versions > 3.3:
# LIMIT <limit> OFFSET <offset>
# The latter is more readable for offsets but we're stuck with the
# former until we can refine dialects by server revision.
limit, offset = select._limit, select._offset
if (limit, offset) == (None, None):
return ''
elif offset is not None:
# As suggested by the MySQL docs, need to apply an
# artificial limit if one wasn't provided
if limit is None:
limit = 18446744073709551615
return ' \n LIMIT %s, %s' % (offset, limit)
else:
# No offset provided, so just use the limit
return ' \n LIMIT %s' % (limit,)
def visit_update(self, update_stmt):
self.stack.append({'from': set([update_stmt.table])})
self.isupdate = True
colparams = self._get_colparams(update_stmt)
text = "UPDATE " + self.preparer.format_table(update_stmt.table) + " SET " + ', '.join(["%s=%s" % (self.preparer.format_column(c[0]), c[1]) for c in colparams])
if update_stmt._whereclause:
text += " WHERE " + self.process(update_stmt._whereclause)
limit = update_stmt.kwargs.get('mysql_limit', None)
if limit:
text += " LIMIT %s" % limit
self.stack.pop(-1)
return text
# ug. "InnoDB needs indexes on foreign keys and referenced keys [...].
# Starting with MySQL 4.1.2, these indexes are created automatically.
# In older versions, the indexes must be created explicitly or the
# creation of foreign key constraints fails."
class MySQLSchemaGenerator(compiler.SchemaGenerator):
def get_column_specification(self, column, first_pk=False):
"""Builds column DDL."""
colspec = [self.preparer.format_column(column),
column.type.dialect_impl(self.dialect).get_col_spec()]
default = self.get_column_default_string(column)
if default is not None:
colspec.append('DEFAULT ' + default)
if not column.nullable:
colspec.append('NOT NULL')
if column.primary_key and column.autoincrement:
try:
first = [c for c in column.table.primary_key.columns
if (c.autoincrement and
isinstance(c.type, sqltypes.Integer) and
not c.foreign_keys)].pop(0)
if column is first:
colspec.append('AUTO_INCREMENT')
except IndexError:
pass
return ' '.join(colspec)
def post_create_table(self, table):
"""Build table-level CREATE options like ENGINE and COLLATE."""
table_opts = []
for k in table.kwargs:
if k.startswith('mysql_'):
opt = k[6:].upper()
joiner = '='
if opt in ('TABLESPACE', 'DEFAULT CHARACTER SET',
'CHARACTER SET', 'COLLATE'):
joiner = ' '
table_opts.append(joiner.join((opt, table.kwargs[k])))
return ' '.join(table_opts)
class MySQLSchemaDropper(compiler.SchemaDropper):
def visit_index(self, index):
self.append("\nDROP INDEX %s ON %s" %
(self.preparer.quote(self._validate_identifier(index.name, False), index.quote),
self.preparer.format_table(index.table)))
self.execute()
def drop_foreignkey(self, constraint):
self.append("ALTER TABLE %s DROP FOREIGN KEY %s" %
(self.preparer.format_table(constraint.table),
self.preparer.format_constraint(constraint)))
self.execute()
class MySQLSchemaReflector(object):
"""Parses SHOW CREATE TABLE output."""
def __init__(self, identifier_preparer):
"""Construct a MySQLSchemaReflector.
identifier_preparer
An ANSIIdentifierPreparer type, used to determine the identifier
quoting style in effect.
"""
self.preparer = identifier_preparer
self._prep_regexes()
def reflect(self, connection, table, show_create, charset, only=None):
"""Parse MySQL SHOW CREATE TABLE and fill in a ''Table''.
show_create
Unicode output of SHOW CREATE TABLE
table
A ''Table'', to be loaded with Columns, Indexes, etc.
table.name will be set if not already
charset
FIXME, some constructed values (like column defaults)
currently can't be Unicode. ''charset'' will convert them
into the connection character set.
only
An optional sequence of column names. If provided, only
these columns will be reflected, and any keys or constraints
that include columns outside this set will also be omitted.
That means that if ``only`` includes only one column in a
2 part primary key, the entire primary key will be omitted.
"""
keys, constraints = [], []
if only:
only = set(only)
for line in re.split(r'\r?\n', show_create):
if line.startswith(' ' + self.preparer.initial_quote):
self._add_column(table, line, charset, only)
# a regular table options line
elif line.startswith(') '):
self._set_options(table, line)
# an ANSI-mode table options line
elif line == ')':
pass
elif line.startswith('CREATE '):
self._set_name(table, line)
# Not present in real reflection, but may be if loading from a file.
elif not line:
pass
else:
type_, spec = self.parse_constraints(line)
if type_ is None:
util.warn("Unknown schema content: %r" % line)
elif type_ == 'key':
keys.append(spec)
elif type_ == 'constraint':
constraints.append(spec)
else:
pass
self._set_keys(table, keys, only)
self._set_constraints(table, constraints, connection, only)
def _set_name(self, table, line):
"""Override a Table name with the reflected name.
table
A ``Table``
line
The first line of SHOW CREATE TABLE output.
"""
# Don't override by default.
if table.name is None:
table.name = self.parse_name(line)
def _add_column(self, table, line, charset, only=None):
spec = self.parse_column(line)
if not spec:
util.warn("Unknown column definition %r" % line)
return
if not spec['full']:
util.warn("Incomplete reflection of column definition %r" % line)
name, type_, args, notnull = \
spec['name'], spec['coltype'], spec['arg'], spec['notnull']
if only and name not in only:
self.logger.info("Omitting reflected column %s.%s" %
(table.name, name))
return
# Convention says that TINYINT(1) columns == BOOLEAN
if type_ == 'tinyint' and args == '1':
type_ = 'boolean'
args = None
try:
col_type = ischema_names[type_]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
col_type = sqltypes.NullType
# Column type positional arguments eg. varchar(32)
if args is None or args == '':
type_args = []
elif args[0] == "'" and args[-1] == "'":
type_args = self._re_csv_str.findall(args)
else:
type_args = [int(v) for v in self._re_csv_int.findall(args)]
# Column type keyword options
type_kw = {}
for kw in ('unsigned', 'zerofill'):
if spec.get(kw, False):
type_kw[kw] = True
for kw in ('charset', 'collate'):
if spec.get(kw, False):
type_kw[kw] = spec[kw]
if type_ == 'enum':
type_kw['quoting'] = 'quoted'
type_instance = col_type(*type_args, **type_kw)
col_args, col_kw = [], {}
# NOT NULL
if spec.get('notnull', False):
col_kw['nullable'] = False
# AUTO_INCREMENT
if spec.get('autoincr', False):
col_kw['autoincrement'] = True
elif issubclass(col_type, sqltypes.Integer):
col_kw['autoincrement'] = False
# DEFAULT
default = spec.get('default', None)
if default is not None and default != 'NULL':
# Defaults should be in the native charset for the moment
default = default.encode(charset)
if type_ == 'timestamp':
# can't be NULL for TIMESTAMPs
if (default[0], default[-1]) != ("'", "'"):
default = sql.text(default)
else:
default = default[1:-1]
col_args.append(schema.DefaultClause(default))
table.append_column(schema.Column(name, type_instance,
*col_args, **col_kw))
def _set_keys(self, table, keys, only):
"""Add ``Index`` and ``PrimaryKeyConstraint`` items to a ``Table``.
Most of the information gets dropped here- more is reflected than
the schema objects can currently represent.
table
A ``Table``
keys
A sequence of key specifications produced by `constraints`
only
Optional `set` of column names. If provided, keys covering
columns not in this set will be omitted.
"""
for spec in keys:
flavor = spec['type']
col_names = [s[0] for s in spec['columns']]
if only and not set(col_names).issubset(only):
if flavor is None:
flavor = 'index'
self.logger.info(
"Omitting %s KEY for (%s), key covers ommitted columns." %
(flavor, ', '.join(col_names)))
continue
constraint = False
if flavor == 'PRIMARY':
key = schema.PrimaryKeyConstraint()
constraint = True
elif flavor == 'UNIQUE':
key = schema.Index(spec['name'], unique=True)
elif flavor in (None, 'FULLTEXT', 'SPATIAL'):
key = schema.Index(spec['name'])
else:
self.logger.info(
"Converting unknown KEY type %s to a plain KEY" % flavor)
key = schema.Index(spec['name'])
for col in [table.c[name] for name in col_names]:
key.append_column(col)
if constraint:
table.append_constraint(key)
def _set_constraints(self, table, constraints, connection, only):
"""Apply constraints to a ``Table``."""
default_schema = None
for spec in constraints:
# only FOREIGN KEYs
ref_name = spec['table'][-1]
ref_schema = len(spec['table']) > 1 and spec['table'][-2] or table.schema
if not ref_schema:
if default_schema is None:
default_schema = connection.dialect.get_default_schema_name(
connection)
if table.schema == default_schema:
ref_schema = table.schema
loc_names = spec['local']
if only and not set(loc_names).issubset(only):
self.logger.info(
"Omitting FOREIGN KEY for (%s), key covers ommitted "
"columns." % (', '.join(loc_names)))
continue
ref_key = schema._get_table_key(ref_name, ref_schema)
if ref_key in table.metadata.tables:
ref_table = table.metadata.tables[ref_key]
else:
ref_table = schema.Table(
ref_name, table.metadata, schema=ref_schema,
autoload=True, autoload_with=connection)
ref_names = spec['foreign']
if ref_schema:
refspec = [".".join([ref_schema, ref_name, column]) for column in ref_names]
else:
refspec = [".".join([ref_name, column]) for column in ref_names]
con_kw = {}
for opt in ('name', 'onupdate', 'ondelete'):
if spec.get(opt, False):
con_kw[opt] = spec[opt]
key = schema.ForeignKeyConstraint(loc_names, refspec, link_to_name=True, **con_kw)
table.append_constraint(key)
def _set_options(self, table, line):
"""Apply safe reflected table options to a ``Table``.
table
A ``Table``
line
The final line of SHOW CREATE TABLE output.
"""
options = self.parse_table_options(line)
for nope in ('auto_increment', 'data_directory', 'index_directory'):
options.pop(nope, None)
for opt, val in options.items():
table.kwargs['mysql_%s' % opt] = val
def _prep_regexes(self):
"""Pre-compile regular expressions."""
self._re_columns = []
self._pr_options = []
self._re_options_util = {}
_final = self.preparer.final_quote
quotes = dict(zip(('iq', 'fq', 'esc_fq'),
[re.escape(s) for s in
(self.preparer.initial_quote,
_final,
self.preparer._escape_identifier(_final))]))
self._pr_name = _pr_compile(
r'^CREATE (?:\w+ +)?TABLE +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes,
self.preparer._unescape_identifier)
# `col`,`col2`(32),`col3`(15) DESC
#
# Note: ASC and DESC aren't reflected, so we'll punt...
self._re_keyexprs = _re_compile(
r'(?:'
r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)'
r'(?:\((\d+)\))?(?=\,|$))+' % quotes)
# 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27')
# 123 or 123,456
self._re_csv_int = _re_compile(r'\d+')
# `colname` <type> [type opts]
# (NOT NULL | NULL)
# DEFAULT ('value' | CURRENT_TIMESTAMP...)
# COMMENT 'comment'
# COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
# STORAGE (DISK|MEMORY)
self._re_column = _re_compile(
r' '
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|'
r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?'
r'(?: +(?P<unsigned>UNSIGNED))?'
r'(?: +(?P<zerofill>ZEROFILL))?'
r'(?: +CHARACTER SET +(?P<charset>\w+))?'
r'(?: +COLLATE +(P<collate>\w+))?'
r'(?: +(?P<notnull>NOT NULL))?'
r'(?: +DEFAULT +(?P<default>'
r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+)'
r'(?:ON UPDATE \w+)?'
r'))?'
r'(?: +(?P<autoincr>AUTO_INCREMENT))?'
r'(?: +COMMENT +(P<comment>(?:\x27\x27|[^\x27])+))?'
r'(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?'
r'(?: +STORAGE +(?P<storage>\w+))?'
r'(?: +(?P<extra>.*))?'
r',?$'
% quotes
)
# Fallback, try to parse as little as possible
self._re_column_loose = _re_compile(
r' '
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?'
r'.*?(?P<notnull>NOT NULL)?'
% quotes
)
# (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
# (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
# KEY_BLOCK_SIZE size | WITH PARSER name
self._re_key = _re_compile(
r' '
r'(?:(?P<type>\S+) )?KEY'
r'(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?'
r'(?: +USING +(?P<using_pre>\S+))?'
r' +\((?P<columns>.+?)\)'
r'(?: +USING +(?P<using_post>\S+))?'
r'(?: +KEY_BLOCK_SIZE +(?P<keyblock>\S+))?'
r'(?: +WITH PARSER +(?P<parser>\S+))?'
r',?$'
% quotes
)
# CONSTRAINT `name` FOREIGN KEY (`local_col`)
# REFERENCES `remote` (`remote_col`)
# MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
# ON DELETE CASCADE ON UPDATE RESTRICT
#
# unique constraints come back as KEYs
kw = quotes.copy()
kw['on'] = 'RESTRICT|CASCASDE|SET NULL|NOACTION'
self._re_constraint = _re_compile(
r' '
r'CONSTRAINT +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'FOREIGN KEY +'
r'\((?P<local>[^\)]+?)\) REFERENCES +'
r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
r'\((?P<foreign>[^\)]+?)\)'
r'(?: +(?P<match>MATCH \w+))?'
r'(?: +ON DELETE (?P<ondelete>%(on)s))?'
r'(?: +ON UPDATE (?P<onupdate>%(on)s))?'
% kw
)
# PARTITION
#
# punt!
self._re_partition = _re_compile(
r' '
r'(?:SUB)?PARTITION')
# Table-level options (COLLATE, ENGINE, etc.)
for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT',
'AVG_ROW_LENGTH', 'CHARACTER SET',
'DEFAULT CHARSET', 'CHECKSUM',
'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD',
'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT',
'KEY_BLOCK_SIZE'):
self._add_option_word(option)
for option in (('COMMENT', 'DATA_DIRECTORY', 'INDEX_DIRECTORY',
'PASSWORD', 'CONNECTION')):
self._add_option_string(option)
self._add_option_regex('UNION', r'\([^\)]+\)')
self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK')
self._add_option_regex('RAID_TYPE',
r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
self._re_options_util['='] = _re_compile(r'\s*=\s*$')
def _add_option_string(self, directive):
regex = (r'(?P<directive>%s\s*(?:=\s*)?)'
r'(?:\x27.(?P<val>.*?)\x27(?!\x27)\x27)' %
re.escape(directive))
self._pr_options.append(
_pr_compile(regex, lambda v: v.replace("''", "'")))
def _add_option_word(self, directive):
regex = (r'(?P<directive>%s\s*(?:=\s*)?)'
r'(?P<val>\w+)' % re.escape(directive))
self._pr_options.append(_pr_compile(regex))
def _add_option_regex(self, directive, regex):
regex = (r'(?P<directive>%s\s*(?:=\s*)?)'
r'(?P<val>%s)' % (re.escape(directive), regex))
self._pr_options.append(_pr_compile(regex))
def parse_name(self, line):
"""Extract the table name.
line
The first line of SHOW CREATE TABLE
"""
regex, cleanup = self._pr_name
m = regex.match(line)
if not m:
return None
return cleanup(m.group('name'))
def parse_column(self, line):
"""Extract column details.
Falls back to a 'minimal support' variant if full parse fails.
line
Any column-bearing line from SHOW CREATE TABLE
"""
m = self._re_column.match(line)
if m:
spec = m.groupdict()
spec['full'] = True
return spec
m = self._re_column_loose.match(line)
if m:
spec = m.groupdict()
spec['full'] = False
return spec
return None
def parse_constraints(self, line):
"""Parse a KEY or CONSTRAINT line.
line
A line of SHOW CREATE TABLE output
"""
# KEY
m = self._re_key.match(line)
if m:
spec = m.groupdict()
# convert columns into name, length pairs
spec['columns'] = self._parse_keyexprs(spec['columns'])
return 'key', spec
# CONSTRAINT
m = self._re_constraint.match(line)
if m:
spec = m.groupdict()
spec['table'] = \
self.preparer.unformat_identifiers(spec['table'])
spec['local'] = [c[0]
for c in self._parse_keyexprs(spec['local'])]
spec['foreign'] = [c[0]
for c in self._parse_keyexprs(spec['foreign'])]
return 'constraint', spec
# PARTITION and SUBPARTITION
m = self._re_partition.match(line)
if m:
# Punt!
return 'partition', line
# No match.
return (None, line)
def parse_table_options(self, line):
"""Build a dictionary of all reflected table-level options.
line
The final line of SHOW CREATE TABLE output.
"""
options = {}
if not line or line == ')':
return options
r_eq_trim = self._re_options_util['=']
for regex, cleanup in self._pr_options:
m = regex.search(line)
if not m:
continue
directive, value = m.group('directive'), m.group('val')
directive = r_eq_trim.sub('', directive).lower()
if cleanup:
value = cleanup(value)
options[directive] = value
return options
def _describe_to_create(self, table, columns):
"""Re-format DESCRIBE output as a SHOW CREATE TABLE string.
DESCRIBE is a much simpler reflection and is sufficient for
reflecting views for runtime use. This method formats DDL
for columns only- keys are omitted.
`columns` is a sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
SHOW FULL COLUMNS FROM rows must be rearranged for use with
this function.
"""
buffer = []
for row in columns:
(name, col_type, nullable, default, extra) = \
[row[i] for i in (0, 1, 2, 4, 5)]
line = [' ']
line.append(self.preparer.quote_identifier(name))
line.append(col_type)
if not nullable:
line.append('NOT NULL')
if default:
if 'auto_increment' in default:
pass
elif (col_type.startswith('timestamp') and
default.startswith('C')):
line.append('DEFAULT')
line.append(default)
elif default == 'NULL':
line.append('DEFAULT')
line.append(default)
else:
line.append('DEFAULT')
line.append("'%s'" % default.replace("'", "''"))
if extra:
line.append(extra)
buffer.append(' '.join(line))
return ''.join([('CREATE TABLE %s (\n' %
self.preparer.quote_identifier(table.name)),
',\n'.join(buffer),
'\n) '])
def _parse_keyexprs(self, identifiers):
"""Unpack '"col"(2),"col" ASC'-ish strings into components."""
return self._re_keyexprs.findall(identifiers)
log.class_logger(MySQLSchemaReflector)
class _MySQLIdentifierPreparer(compiler.IdentifierPreparer):
"""MySQL-specific schema identifier configuration."""
reserved_words = RESERVED_WORDS
def __init__(self, dialect, **kw):
super(_MySQLIdentifierPreparer, self).__init__(dialect, **kw)
def _quote_free_identifiers(self, *ids):
"""Unilaterally identifier-quote any number of strings."""
return tuple([self.quote_identifier(i) for i in ids if i is not None])
class MySQLIdentifierPreparer(_MySQLIdentifierPreparer):
"""Traditional MySQL-specific schema identifier configuration."""
def __init__(self, dialect):
super(MySQLIdentifierPreparer, self).__init__(dialect, initial_quote="`")
def _escape_identifier(self, value):
return value.replace('`', '``')
def _unescape_identifier(self, value):
return value.replace('``', '`')
class MySQLANSIIdentifierPreparer(_MySQLIdentifierPreparer):
"""ANSI_QUOTES MySQL schema identifier configuration."""
pass
def _compat_fetchall(rp, charset=None):
"""Proxy result rows to smooth over MySQL-Python driver inconsistencies."""
return [_MySQLPythonRowProxy(row, charset) for row in rp.fetchall()]
def _compat_fetchone(rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver inconsistencies."""
return _MySQLPythonRowProxy(rp.fetchone(), charset)
def _pr_compile(regex, cleanup=None):
"""Prepare a 2-tuple of compiled regex and callable."""
return (_re_compile(regex), cleanup)
def _re_compile(regex):
"""Compile a string to regex, I and UNICODE."""
return re.compile(regex, re.I | re.UNICODE)
dialect = MySQLDialect
dialect.statement_compiler = MySQLCompiler
dialect.schemagenerator = MySQLSchemaGenerator
dialect.schemadropper = MySQLSchemaDropper
dialect.execution_ctx_cls = MySQLExecutionContext
| [
"[email protected]"
]
| |
a43a6ca183fe13cab45ff1ffe654cb22df55bdd3 | b3f6daa5d6c987eb8a61d5fe125bf2a98997e259 | /8kyu/Simple multiplication/index.py | 0853411208f8f60cc3ab604295bcd6f49ea44358 | []
| no_license | krnets/codewars-practice | 53a0a6c9d2d8c2b94d6799a12f48dd588179a5ce | 5f8e1cc1aebd900b9e5a276884419fc3e1ddef24 | refs/heads/master | 2022-12-20T19:33:43.337581 | 2022-12-16T05:32:39 | 2022-12-16T05:32:39 | 217,464,785 | 1 | 0 | null | 2020-07-20T08:36:31 | 2019-10-25T06:20:41 | JavaScript | UTF-8 | Python | false | false | 668 | py | # 8kyu - Simple multiplication
""" This kata is about multiplying a given number by eight if it is an even number and by nine otherwise. """
# def simple_multiplication(number):
# return 8 * number if number % 2 == 0 else 9 * number
# def simple_multiplication(number):
# return number * (8 if number % 2 == 0 else 9)
# def simple_multiplication(number):
# return number * [8, 9][number % 2]
def simple_multiplication(number):
return number * (8 + number % 2)
q = simple_multiplication(2) # 16
q
q = simple_multiplication(1) # 9
q
q = simple_multiplication(8) # 64
q
q = simple_multiplication(4) # 32
q
q = simple_multiplication(5) # 45
q
| [
"[email protected]"
]
| |
c4629c6296276f6dd000ac6acc97097972160f92 | 4755dabdcff6a45b9c15bf9ea814c6b8037874bd | /build/laser_proc/catkin_generated/pkg.installspace.context.pc.py | 8aa2d2e231584bb4c6aa2e425d2a5cc3e336be50 | []
| no_license | Rallstad/RobotSnake | 676a97bdfde0699736d613e73d539929a0c2b492 | 37ee6d5af0458b855acf7c2b83e0ee17833dbfd1 | refs/heads/master | 2023-01-03T05:46:46.268422 | 2018-05-27T16:01:47 | 2018-05-27T16:01:47 | 308,665,980 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/snake/Documents/catkin_ws/install/include".split(';') if "/home/snake/Documents/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;sensor_msgs;rosconsole;nodelet".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-llaser_proc_library;-llaser_publisher;-llaser_transport;-llaser_proc_ROS;-lLaserProcNodelet".split(';') if "-llaser_proc_library;-llaser_publisher;-llaser_transport;-llaser_proc_ROS;-lLaserProcNodelet" != "" else []
PROJECT_NAME = "laser_proc"
PROJECT_SPACE_DIR = "/home/snake/Documents/catkin_ws/install"
PROJECT_VERSION = "0.1.4"
| [
"[email protected]"
]
| |
0d9c589064bdfa802bbc69912c2b119c8b1a3167 | 5b3d8b5c612c802fd846de63f86b57652d33f672 | /Python/seven_kyu/to_jaden_case.py | 6f1011c1120d950fcc87a4462cab4f25505b6208 | [
"Apache-2.0"
]
| permissive | Brokenshire/codewars-projects | 1e591b57ed910a567f6c0423beb194fa7f8f693e | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | refs/heads/master | 2021-07-22T18:50:25.847592 | 2021-01-25T23:27:17 | 2021-01-25T23:27:17 | 228,114,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | # Python solution for 'Jaden Casing Strings' codewars question.
# Level: 7 kyu
# Tags: Fundamentals, Strings, and Arrays.
# Author: Jack Brokenshire
# Date: 17/02/2020
import unittest
def to_jaden_case(string):
"""
Your task is to convert strings to how they would be written by Jaden Smith. The strings are actual quotes from
Jaden Smith, but they are not capitalized in the same way he originally typed them.
:param string: A string value input.
:return: A new string with each word in the sentence capitalized.
"""
return " ".join(x.capitalize() for x in string.split())
class TestToJadenCase(unittest.TestCase):
"""Class to test 'to_jaden_case' function"""
def test_name_list(self):
quote = "How can mirrors be real if our eyes aren't real"
self.assertEqual(to_jaden_case(quote), "How Can Mirrors Be Real If Our Eyes Aren't Real")
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
b34d5bebd57109d20aee7fec341878dfb3c9875c | 31eaed64b0caeda5c5fe3603609402034e6eb7be | /python_zumbi/py_web/test_scraping_2.py | 8504ae20c38d531160f7f991a12e83e59ccd487b | []
| no_license | RaphaelfsOliveira/workspace_python | 93657b581043176ecffb5783de208c0a00924832 | 90959697687b9398cc48146461750942802933b3 | refs/heads/master | 2021-01-11T17:39:49.574875 | 2017-06-28T20:55:43 | 2017-06-28T20:55:43 | 79,814,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | import urllib.request #modulo que permite conversar com a internet
pagina = urllib.request.urlopen(
'http://beans.itcarlow.ie/prices-loyalty.html')
text = pagina.read().decode('utf8')
print(text)
i = text.find('>$')
preco = float(text[i+2:i+6])
if preco < 4.74:
print('Em Promoção: ', preco)
else:
print('Está Caro!!: ', preco)
| [
"[email protected]"
]
| |
d807a7d1a649fac018c6da8614952df89a7cdc5e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_snowboard.py | 5100be7c8c861988ab39e3be570cce2fce7b2eba | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py |
#calss header
class _SNOWBOARD():
def __init__(self,):
self.name = "SNOWBOARD"
self.definitions = [u'to slide on the snow by standing on a specially shaped board: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
]
| |
751a74264a973fe1ab989c874cc4a9a039bd45e4 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_55/373.py | 65e1a8ea83c63527538a4d324820da9d12a0a74e | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,581 | py | '''
Created on May 9, 2010
@author: indra
'''
import sys, os
filename = "C-large"
path = os.path.normpath(os.path.join(os.path.dirname(__file__), filename+".in"))
reader = open(path, "rb")
path = os.path.normpath(os.path.join(os.path.dirname(__file__), filename+".out"))
writer = open(path,"w")
ncases = int(reader.readline().rstrip())
caseno = 0
while caseno<ncases:
caseno+=1
case = reader.readline().rstrip()
R,k,N = [int(x) for x in case.split(' ')]
case = reader.readline().rstrip()
gps = [int(x) for x in case.split(' ')]
totp = 0
for gp in gps:
totp+=gp
print (R,k,N)
print gps
print totp
if totp<=k:
writer.write("Case #%s: %d\n" % (str(caseno),R*totp))
continue
rides = [-1]*N
money = [0]*N
retmon = 0
curloc = 0
curride = 0
curmon = 0
while rides[curloc]==-1 and curride<R:
rides[curloc] = curride
money[curloc] = curmon
curride+=1
tem=0
while tem+gps[curloc]<=k:
tem+=gps[curloc]
curloc+=1
if curloc>=N:
curloc-=N
curmon+=tem
if curride==R:
writer.write("Case #%s: %d\n" % (str(caseno),curmon))
continue
cycrides = curride - rides[curloc]
cycmoney = curmon - money[curloc]
R-=rides[curloc]
retmon+=money[curloc]
rleft = R%cycrides
retmon += cycmoney*((R-rleft)/cycrides)
lastrides = 0
while lastrides<rleft:
lastrides+=1
tem=0
while tem+gps[curloc]<=k:
tem+=gps[curloc]
curloc+=1
if curloc>=N:
curloc-=N
retmon+=tem
writer.write("Case #%s: %d\n" % (str(caseno),retmon))
writer.close() | [
"[email protected]"
]
| |
aba1b20ca910395e8e556c928a2bf6e5d53cdac8 | 2d8da5cacd21dd425688d67e1a92faa50aefc6bc | /excel-sheet-column-number.py | c90dd1c70703b45a9911aa35628d96708bba7730 | []
| no_license | stella-shen/Leetcode | 970857edb74ae3ccf4bcce0c40e972ab8bcc5348 | 16ad99a6511543f0286559c483206c43ed655ddd | refs/heads/master | 2021-01-19T02:48:49.918054 | 2018-11-29T10:36:43 | 2018-11-29T10:36:43 | 47,523,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py |
class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
ret = 0
for i in xrange(len(s)):
ret *= 26
ret += ord(s[i]) - ord('A') + 1
return ret
if __name__ == '__main__':
sol = Solution()
s = "AB"
print sol.titleToNumber(s)
| [
"[email protected]"
]
| |
d0686bbf88f5f164a24afb5e2449f189d6ba2b4b | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc008/B/4886377.py | 4b2e8bb008ccb62443ac42cbdabfef1b5a1468e8 | []
| no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | import collections
N = int(input())
names = [input() for i in range(N)]
max_ele = collections.Counter(names)
print(max_ele.most_common()[0][0]) | [
"[email protected]"
]
| |
2c7332530c6106c9f596a55673e138596fa175ad | be7a0aa49a9b4fdad1b8b21c6f1eb6bd508be109 | /ex027vs1.py | 8f5869722797ed74a9a1bd50c65b05a9267c8f63 | []
| no_license | tlima1011/python3-curso-em-video | 29a60ee3355d6cb3ba8d1f48c6a3ecd7bc6e60dd | f6454f4d636a2bf73c151e67710f732e2d8e738c | refs/heads/master | 2021-02-04T01:13:35.313590 | 2020-04-14T12:51:19 | 2020-04-14T12:51:19 | 243,593,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | nomeCompleto = str(input('Informe seu nome completo.: ')).strip()
nomeCompleto = nomeCompleto.split()
print(f'Primeiro nome é {nomeCompleto[0].capitalize()} e o último é {nomeCompleto[-1].capitalize()}')
| [
"[email protected]"
]
| |
b9a0d4a8c907d64a769984ce54c21e598bceb55a | 857fc21a40aa32d2a57637de1c723e4ab51062ff | /PythonChallenge/Ex05/05_01.py | 93d0926dda5e81b77d9190b1a9e433c954140ed4 | [
"MIT"
]
| permissive | YorkFish/git_study | efa0149f94623d685e005d58dbaef405ab91d541 | 6e023244daaa22e12b24e632e76a13e5066f2947 | refs/heads/master | 2021-06-21T18:46:50.906441 | 2020-12-25T14:04:04 | 2020-12-25T14:04:04 | 171,432,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | #!/usr/bin/env python3
# coding:utf-8
from pickle import load
with open("banner.p", "rb") as f:
print(load(f))
| [
"[email protected]"
]
| |
2e95edb992349cc95441512bef5344b238ed4afd | c3c2af25c3269e200d2773ec9f8800f4f9a20165 | /backend/manage.py | 42924076b3f1daf8f7bf76a1488f43e45b84b567 | []
| no_license | crowdbotics-apps/divine-hill-27443 | a39ecac7c1c5f510d00bf4e300acea3e46ecca24 | f6abe52a7080da59cc99b1fb01a039933f273a2c | refs/heads/master | 2023-04-26T09:05:39.002510 | 2021-05-26T19:29:02 | 2021-05-26T19:29:02 | 371,147,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'divine_hill_27443.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
be9c106d741c93b8522ff5e49ea7ff2e5f2b74fe | aeeba89591b787bbe6b93ffb4889be9a8fca521e | /cfg.py | cf7791d7a7b0fe3603dac542a0bbc59c1ee3d3aa | [
"MIT"
]
| permissive | wistic/python-web-crawler | efa7968f66ecd7396797390f253d0ff68f3623a1 | e3738fd49d77bdff4c43a0ec31ed36cc381d26b8 | refs/heads/master | 2022-12-10T05:38:40.030202 | 2020-08-28T14:24:38 | 2020-08-28T14:24:38 | 288,676,553 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | config = {
"root_url": "https://flinkhub.com",
"sleep_timer": 5,
"Max_links_limit": 5000,
"Recrawl_time_limit_hours": 24,
"user_agent": "Python Spiderbot",
"No_of_threads": 5,
"database_name": "python-web-crawler",
"collection_name": "Links",
"connection_uri": "mongodb://localhost:27017/",
"download_dir_path": "/home/wistic/github/python-web-crawler/html-files"
}
| [
"[email protected]"
]
| |
6433092cbee060d537b5cb9919a76a1ec7c5ab85 | 683b73e0c95c755a08e019529aed3ff1a8eb30f8 | /machina/apps/forum_conversation/forum_attachments/admin.py | de1995638c922ddee9447fdc8ec8937ae0ebd484 | [
"BSD-3-Clause"
]
| permissive | DrJackilD/django-machina | b3a7be9da22afd457162e0f5a147a7ed5802ade4 | 76858921f2cd247f3c1faf4dc0d9a85ea99be3e1 | refs/heads/master | 2020-12-26T08:19:09.838794 | 2016-03-11T03:55:25 | 2016-03-11T03:55:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | # -*- coding: utf-8 -*-
# Standard library imports
# Third party imports
from django.contrib import admin
# Local application / specific library imports
from machina.core.db.models import get_model
Attachment = get_model('forum_attachments', 'Attachment')
class AttachmentAdmin(admin.ModelAdmin):
list_display = ('id', 'post', 'comment', 'file', )
list_display_links = ('id', 'post', 'comment', )
raw_id_fields = ('post', )
admin.site.register(Attachment, AttachmentAdmin)
| [
"[email protected]"
]
| |
0e13ea228a661ee0d8e2c5bfce784e4d705a8f66 | 09b0075f56455d1b54d8bf3e60ca3535b8083bdc | /WideResnet.py | 595e4f69f1baa13a9f27f80fdb61e54773195de4 | []
| no_license | YanYan0716/MPL | e02c1ddf036d6019c3596fd932c51c3a14187f5e | 6ad82b050ec1ed81987c779df2dddff95dc1cde5 | refs/heads/master | 2023-04-17T23:05:54.164840 | 2021-05-07T01:14:49 | 2021-05-07T01:14:49 | 331,491,485 | 11 | 6 | null | null | null | null | UTF-8 | Python | false | false | 7,157 | py | import os
from abc import ABC
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
import config
class BasicBlock(layers.Layer):
def __init__(self, in_channels, out_channels, stride, dropout, name, trainable):
super(BasicBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self.dropout = dropout
# name = name
self.trainable = trainable
self.bn1 = layers.BatchNormalization(
# momentum=0.999,
epsilon=config.BATCH_NORM_EPSILON,
trainable=self.trainable,
name=name+'_bn1'
)
self.relu1 = layers.LeakyReLU(alpha=0.2)
self.conv1 = layers.Conv2D(
filters=self.out_channels,
kernel_size=3,
strides=self.stride,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name+'_conv1',
)
self.bn2 = layers.BatchNormalization(
# momentum=0.999,
epsilon=config.BATCH_NORM_EPSILON,
trainable=self.trainable,
name=name+'_bn2'
)
self.relu2 = layers.LeakyReLU(alpha=0.2)
self.dropout = layers.Dropout(
rate=self.dropout,
trainable=self.trainable,
name=name+'_dropout',
)
self.conv2 = layers.Conv2D(
filters=self.out_channels,
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name+'_conv2',
)
if self.stride != 1 or self.in_channels != self.out_channels:
self.short_cut_relu = layers.LeakyReLU(alpha=0.2)
self.short_cut = layers.Conv2D(
filters=self.out_channels,
kernel_size=1,
strides=self.stride,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name+'_shortcut'
)
self.add = layers.Add(name=name+'_add')
def call(self, inputs, **kwargs):
residual = inputs
out = self.bn1(inputs)
if self.stride != 1 or self.in_channels != self.out_channels:
residual = out
out = self.relu1(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv2(out)
if self.stride != 1 or self.in_channels != self.out_channels:
residual = self.short_cut_relu(residual)
residual = self.short_cut(residual)
# else:
# shortcut = out
out = self.add([residual, out])
return out
class WideResnet(keras.Model):
def __init__(self, k=[16, 32, 64, 128], name='wider'):
super(WideResnet, self).__init__(name=name)
self.k = k
self.dropout = config.DROPOUT
self.drop = layers.Dropout(
rate=config.DROPOUT,
trainable=self.trainable,
name=name+'_dropout',
)
self.conv1 = layers.Conv2D(
filters=k[0],
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name + '_conv1',
)
self.Basic1 = BasicBlock(in_channels=k[0], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic1', trainable=True)
self.Basic2 = BasicBlock(in_channels=k[1], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic2', trainable=True)
self.Basic3 = BasicBlock(in_channels=k[1], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic3', trainable=True)
self.Basic4 = BasicBlock(in_channels=k[1], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic4', trainable=True)
self.Basic5 = BasicBlock(in_channels=k[1], out_channels=k[2], stride=2, dropout=self.dropout, name=name+'_Basic5', trainable=True)
self.Basic6 = BasicBlock(in_channels=k[2], out_channels=k[2], stride=1, dropout=self.dropout, name=name+'_Basic6', trainable=True)
self.Basic7 = BasicBlock(in_channels=k[2], out_channels=k[2], stride=1, dropout=self.dropout, name=name+'_Basic7', trainable=True)
self.Basic8 = BasicBlock(in_channels=k[2], out_channels=k[2], stride=1, dropout=self.dropout, name=name+'_Basic8', trainable=True)
self.Basic9 = BasicBlock(in_channels=k[2], out_channels=k[3], stride=2, dropout=self.dropout, name=name+'_Basic9', trainable=True)
self.Basic10 = BasicBlock(in_channels=k[3], out_channels=k[3], stride=1, dropout=self.dropout, name=name+'_Basic10', trainable=True)
self.Basic11 = BasicBlock(in_channels=k[3], out_channels=k[3], stride=1, dropout=self.dropout, name=name+'_Basic11', trainable=True)
self.Basic12 = BasicBlock(in_channels=k[3], out_channels=k[3], stride=1, dropout=self.dropout, name=name+'_Basic12', trainable=True)
self.bn1 = layers.BatchNormalization(
# momentum=0.999,
epsilon=config.BATCH_NORM_EPSILON,
trainable=self.trainable,
name=name+'_bn1'
)
self.relu1 = layers.LeakyReLU(alpha=0.2)
self.avgpool = layers.GlobalAveragePooling2D(name=name+'_avgpool')
self.dense = layers.Dense(
units=config.NUM_CLASS,
# kernel_initializer=keras.initializers.RandomNormal(mean=0., stddev=1.),
# activation='softmax',
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
name=name+'_dense',
)
def call(self, inputs, training=None, mask=None):
x = self.conv1(inputs)
x = self.Basic1(x)
x = self.Basic2(x)
x = self.Basic3(x)
x = self.Basic4(x)
x = self.Basic5(x)
x = self.Basic6(x)
x = self.Basic7(x)
x = self.Basic8(x)
x = self.Basic9(x)
x = self.Basic10(x)
x = self.Basic11(x)
x = self.Basic12(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.avgpool(x)
x = self.drop(x)
out = self.dense(x)
return out
def model(self):
input = keras.Input(shape=(32, 32, 3), dtype=tf.float32)
return keras.Model(inputs=input, outputs=self.call(input))
if __name__ == '__main__':
img = tf.random.normal([1, 32, 32, 3])
model = WideResnet().model()
model.summary() | [
"[email protected]"
]
| |
b5d716b2740e66732492a580f7db8280232f261e | d3d8acc788bd3a8d7e5f861ad87c4d802723062b | /test/step3_descope200MCHF_HLT.py | c2272355f19530f27df01562b14bf70d1dee3ae4 | []
| no_license | calabria/L1IntegratedMuonTrigger | 27ff0bde46208f84595423ec375080979fbe4c62 | 05a368b8d04f84b675d40445555f2cacfd135e4e | refs/heads/master | 2021-01-24T21:57:42.232290 | 2015-08-11T11:52:35 | 2015-08-11T11:52:35 | 38,485,204 | 0 | 2 | null | 2015-08-11T11:52:35 | 2015-07-03T09:40:57 | Python | UTF-8 | Python | false | false | 4,607 | py | # Auto generated configuration file
# using:
# Revision: 1.20
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: step3_descope200MCHF --fileout file:out_hlt_descope200MCHF.root --mc --eventcontent RECOSIM --step HLT --customise RecoParticleFlow/PandoraTranslator/customizeHGCalPandora_cff.cust_2023HGCalPandoraMuon,Configuration/DataProcessing/Utils.addMonitoring,L1Trigger/L1IntegratedMuonTrigger/phase2DescopingScenarios.descope200MCHF --datatier GEN-SIM-RECO --conditions PH2_1K_FB_V6::All --magField 38T_PostLS1 --filein file:/afs/cern.ch/work/d/dildick/public/GEM/MuonPhaseIIScopeDoc/CMSSW_6_2_0_SLHC26_patch3/src/001B71CC-0F38-E511-BEE2-002618943918.root --geometry Extended2023HGCalMuon,Extended2023HGCalMuonReco --no_exec -n 10
import FWCore.ParameterSet.Config as cms
process = cms.Process('HLT')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.Geometry.GeometryExtended2023HGCalMuonReco_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('HLTrigger.Configuration.HLT_GRun_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
# Input source
process.source = cms.Source("PoolSource",
secondaryFileNames = cms.untracked.vstring(),
fileNames = cms.untracked.vstring('file:/afs/cern.ch/work/d/dildick/public/GEM/MuonPhaseIIScopeDoc/CMSSW_6_2_0_SLHC26_patch3/src/001B71CC-0F38-E511-BEE2-002618943918.root')
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.20 $'),
annotation = cms.untracked.string('step3_descope200MCHF nevts:10'),
name = cms.untracked.string('Applications')
)
# Output definition
process.RECOSIMoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.RECOSIMEventContent.outputCommands,
fileName = cms.untracked.string('file:out_hlt_descope200MCHF.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('GEN-SIM-RECO')
)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'PH2_1K_FB_V6::All', '')
# Path and EndPath definitions
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RECOSIMoutput_step = cms.EndPath(process.RECOSIMoutput)
# Schedule definition
process.schedule = cms.Schedule()
process.schedule.extend(process.HLTSchedule)
process.schedule.extend([process.endjob_step,process.RECOSIMoutput_step])
# customisation of the process.
# Automatic addition of the customisation function from RecoParticleFlow.PandoraTranslator.customizeHGCalPandora_cff
from RecoParticleFlow.PandoraTranslator.customizeHGCalPandora_cff import cust_2023HGCalPandoraMuon
#call to customisation function cust_2023HGCalPandoraMuon imported from RecoParticleFlow.PandoraTranslator.customizeHGCalPandora_cff
process = cust_2023HGCalPandoraMuon(process)
# Automatic addition of the customisation function from HLTrigger.Configuration.customizeHLTforMC
from HLTrigger.Configuration.customizeHLTforMC import customizeHLTforMC
#call to customisation function customizeHLTforMC imported from HLTrigger.Configuration.customizeHLTforMC
process = customizeHLTforMC(process)
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# Automatic addition of the customisation function from L1Trigger.L1IntegratedMuonTrigger.phase2DescopingScenarios
from L1Trigger.L1IntegratedMuonTrigger.phase2DescopingScenarios import descope200MCHF
#call to customisation function descope200MCHF imported from L1Trigger.L1IntegratedMuonTrigger.phase2DescopingScenarios
process = descope200MCHF(process)
# End of customisation functions
| [
"[email protected]"
]
| |
d8229a35567ff7594f50dbb89b7cea36bec123ac | 148125096da896fd93292d2cd408265d159fec28 | /qa/rpc-tests/p2p-acceptblock.py | 2267768dbfeb2685302144171cfdd388f4355b4c | [
"MIT"
]
| permissive | lycion/lkcoinse | 7cfbcbdfc1e98f20d9dfc497ea65fd75ca6de25d | 9cf9ed5730217566b44466c22dc255f0134ad1bb | refs/heads/master | 2020-03-30T03:24:44.245148 | 2018-09-28T04:55:30 | 2018-09-28T04:55:30 | 150,548,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,678 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Lkcoinse Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import test_framework.loginit
from test_framework.mininode import *
from test_framework.test_framework import LkcoinseTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(LkcoinseTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LKCOINSED", "lkcoinsed"),
help="lkcoinsed binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print("First height 2 block accepted by both nodes")
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in range(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print("Second height 2 block accepted only from whitelisted peer")
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in range(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print("Unrequested more-work block accepted from non-whitelisted peer")
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print("Successfully reorged to length 3 chain from whitelisted peer")
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in range(2):
for i in range(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
test_node.sync_with_ping()
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print("Unrequested block too far-ahead not processed")
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print("Unrequested block far ahead of tip accepted from whitelisted peer")
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
# Wait for the reorg to complete. It can be slower on some systems.
while self.nodes[0].getblockcount() != 290:
time.sleep(1)
j = j + 1
if (j > 60):
break
assert_equal(self.nodes[0].getblockcount(), 290)
print("Successfully reorged to longer chain from non-whitelisted peer")
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
| [
"[email protected]"
]
| |
6537118072122509e9adad7738eee5616a1b24dd | fc83fc10fcc509316e612d73bd40a81d3ca0a2e6 | /tests/nd_gaussian_multiprocessing.py | 1f8c698393e3a088d991eb3484785a391dc3c783 | [
"MIT"
]
| permissive | DimitriMisiak/mcmc-red | 47dfb7e0664205da55fa463df77851722082e3c3 | caae0ce39d082e578176a5078a9184980b0851c3 | refs/heads/main | 2023-06-19T04:10:42.385862 | 2019-07-05T07:45:01 | 2019-07-05T07:45:01 | 387,757,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,928 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Handy MCMC scripts.
Test for the different fit method (mcmc, ptmcmc, minimizer).
Author:
Dimitri Misiak ([email protected])
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import scipy.signal as sgl
from os import path
import scipy.optimize as op
import mcmc_red as mcr
# close all plots
plt.close('all')
nsample = 1000
ndim = 4
SCALE = 'log'
### LINEAR SCALE
if SCALE == 'linear':
mu = np.random.uniform(-10, 10, ndim)
sigma = np.random.uniform(0, 10, ndim)
bounds = ((-20, 20),) * ndim
### LOG SCALE
elif SCALE == 'log':
mu_generator = np.random.uniform(-6, 0, ndim)
mu = 10**mu_generator
sigma = mu/10
bounds = ((1e-7, 1e1),) * ndim
else:
raise Exception('SCALE not set properly!')
print("Generating blob at mu={0} and sigma={1}".format(mu, sigma))
blob = np.random.normal(mu, sigma, (nsample, ndim))
print("Checking")
print("mean =", np.mean(blob, axis=0))
print("std =", np.std(blob, axis=0))
def chi2(param):
return mcr.chi2_simple(blob, param, sigma)
#def chi2(param):
# x2 = np.sum( (blob - np.array(param))**2 / np.array(sigma)**2 )
# return x2
condi = None
# XXX MCMC
# save directory
sampler_path = 'mcmc_sampler/autosave'
# extracts the sup bounds and the inf bounds
bounds = list(bounds)
binf = list()
bsup = list()
for b in bounds:
inf, sup = b
binf.append(inf)
bsup.append(sup)
binf = np.array(binf)
bsup = np.array(bsup)
# additionnal constrain as function of the parameters
if condi == None:
condi = lambda p: True
# Loglikelihood function taking into accounts the bounds
def loglike(x):
""" Loglikelihood being -chi2/2.
Take into account the bounds.
"""
cinf = np.sum(x<binf)
csup = np.sum(x>bsup)
if cinf == 0 and csup == 0 and condi(x) == True:
# return -0.5*aux(np.power(10,x))
return -0.5*chi2(x)
else:
return -np.inf
# running the mcmc analysis
sampler = mcr.mcmc_sampler_multi(loglike, bounds, nsteps=1000, path=sampler_path, threads=2, scale=SCALE)
#nwalkers=None
#nsteps=10000
#threads=4
##############################################################################
## extracts the sup bounds and the inf bounds
#bounds = list(bounds)
#binf = list()
#bsup = list()
#for b in bounds:
# inf, sup = b
# binf.append(inf)
# bsup.append(sup)
#binf = np.array(binf)
#bsup = np.array(bsup)
#
#condi = None
## additionnal constrain as function of the parameters
#if condi == None:
# condi = lambda p: True
#
## Loglikelihood function taking into accounts the bounds
#def loglike(x):
# """ Loglikelihood being -chi2/2.
# Take into account the bounds.
# """
# cinf = np.sum(x<binf)
# csup = np.sum(x>bsup)
# if cinf == 0 and csup == 0 and condi(x) == True:
## return -0.5*aux(np.power(10,x))
# return -0.5*chi2(x)
# else:china moon
# return -np.inf
#
## number of parameters/dimensions
#ndim = len(bounds)
#
## default nwalkers
#if nwalkers == None:
# nwalkers = 10 * ndim
#
## walkers are uniformly spread in the parameter space
#pos = list()
#for n in xrange(nwalkers):
# accept = False
# while not accept:
# new_pos = [
# np.random.uniform(low=l, high=h) for l,h in zip(binf, bsup)
# ]
# accept = condi(new_pos)
# pos.append(new_pos)
#
## MCMC analysis
#sampler = emcee.EnsembleSampler(nwalkers, ndim, loglike, threads=threads)
#sampler.run_mcmc(pos, nsteps, rstate0=np.random.get_state())
#############################################################################
# # loading the mcmc results
logd, chain, lnprob, acc = mcr.get_mcmc_sampler(sampler_path)
lab = tuple(['$\mu${}'.format(i) for i in range(ndim)])
dim = int(logd['dim'])
xopt, inf, sup = mcr.mcmc_results(dim, chain, lnprob, acc, lab,
scale=SCALE, savedir=sampler_path)
print(xopt, inf, sup)
| [
"[email protected]"
]
| |
05c06ff5850ee1f5cbab0d42f5704ce5b0f4acb3 | 57d1580fd540b4819abb67f9db43fdfbba63725f | /hydrogen_notebooks/option_pricing/binomial_european_call_delta_hedging.py | 29f3ca209e1b50cb4571fff0cac52d807c607296 | []
| no_license | glyfish/alpaca | 49edfcb9d80551825dfa4cf071f21aeb95a3502f | 2b5b69bcf50ed081a526742658be503706af94b4 | refs/heads/master | 2023-02-22T00:24:19.293502 | 2022-09-05T17:20:23 | 2022-09-05T17:20:23 | 186,169,438 | 1 | 3 | null | 2023-02-11T00:52:12 | 2019-05-11T18:38:58 | Python | UTF-8 | Python | false | false | 2,302 | py | # %%
%load_ext autoreload
%autoreload 2
import os
import sys
import numpy
from matplotlib import pyplot
from lib import config
from scipy.stats import binom
wd = os.getcwd()
yahoo_root = os.path.join(wd, 'data', 'yahoo')
pyplot.style.use(config.glyfish_style)
# %%
def qrn(U, D, R):
return (R - D) / (U - D)
def qrn1(q, U, R):
return q*(1.0 + U) / (1.0 + R)
def binomial_tail_cdf(l, n, p):
return 1.0 - binom.cdf(l, n, p)
def cutoff(S0, U, D, K, n):
for i in range(0, n + 1):
iU = (1.0 + U)**i
iD = (1.0 + D)**(n - i)
payoff = S0*iU*iD - K
if payoff > 0:
return i
return n + 1
def european_call_payoff(U, D, R, S0, K, n):
l = cutoff(S0, U, D, K, n)
q = qrn(U, D, R)
q1 = qrn1(q, U, R)
Ψq = binomial_tail_cdf(l - 1, n, q)
Ψq1 = binomial_tail_cdf(l - 1, n, q1)
return S0*Ψq1 - K*(1 + R)**(-n)*Ψq
def delta(CU, CD, SU, SD):
return (CU - CD) / (SU - SD)
def init_borrow(S0, C0, x):
return C0 - S0 * x
def borrow(y, R, x1, x2, S):
return y * (1 + R) + (x1 - x2) * S
def portfolio_value(x, S, y):
return x * S + y
# %%
n = 3
U = 0.2
D = -0.1
R = 0.1
S0 = 100.0
K = 105.0
# %%
q = qrn(U, D, R)
q1 = qrn1(q, U, R)
l = cutoff(S0, U, D, K, n)
Ψq = binomial_tail_cdf(l - 1, n, q)
Ψq1 = binomial_tail_cdf(l - 1, n, q1)
q, q1, l, Ψq, Ψq1
binom.cdf(l, n, q)
# %
# t = 0
C0 = european_call_payoff(U, D, R, S0, K, n)
# %%
# Delta hedge
# t = 0
S1U = S0*(1.0 + U)
S1D = S0*(1.0 + D)
C1U = european_call_payoff(U, D, R, S1U, K, n - 1)
C1D = european_call_payoff(U, D, R, S1D, K, n - 1)
x1 = delta(C1U, C1D, S1U, S1D)
y1 = init_borrow(S0, C0, x1)
portfolio_value(x1, S0, y1)
# t = 1
# The price goes up S1 = S0*(1+U)
S1 = S0 * (1 + U)
S2U = S1*(1.0 + U)
S2D = S1*(1.0 + D)
C2U = european_call_payoff(U, D, R, S2U, K, n - 2)
C2D = european_call_payoff(U, D, R, S2D, K, n - 2)
x2 = delta(C2U, C2D, S2U, S2D)
y2 = borrow(y1, R, x1, x2, S1)
portfolio_value(x2, S1, y2)
# t = 2
# The price goes down S1 = S0*(1+U)*(1+D)
S2 = S0 * (1 + U) * (1 + D)
S3U = S2*(1.0 + U)
S3D = S2*(1.0 + D)
C3U = european_call_payoff(U, D, R, S3U, K, n - 3)
C3D = european_call_payoff(U, D, R, S3D, K, n - 3)
x3 = delta(C3U, C3D, S3U, S3D)
y3 = borrow(y2, R, x2, x3, S2)
portfolio_value(x3, S2, y3)
| [
"[email protected]"
]
| |
ce5dade7d36a431e3ec81dade64648f6c22eca35 | 7832e7dc8f1583471af9c08806ce7f1117cd228a | /aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/RunClusterServiceActionRequest.py | eb1c959505c70fd4e06aa43388665c4d9f9b06a3 | [
"Apache-2.0"
]
| permissive | dianplus/aliyun-openapi-python-sdk | d6494850ddf0e66aaf04607322f353df32959725 | 6edf1ed02994245dae1d1b89edc6cce7caa51622 | refs/heads/master | 2023-04-08T11:35:36.216404 | 2017-11-02T12:01:15 | 2017-11-02T12:01:15 | 109,257,597 | 0 | 0 | NOASSERTION | 2023-03-23T17:59:30 | 2017-11-02T11:44:27 | Python | UTF-8 | Python | false | false | 3,508 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class RunClusterServiceActionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'RunClusterServiceAction')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_HostIdList(self):
return self.get_query_params().get('HostIdList')
def set_HostIdList(self,HostIdList):
self.add_query_param('HostIdList',HostIdList)
def get_ServiceName(self):
return self.get_query_params().get('ServiceName')
def set_ServiceName(self,ServiceName):
self.add_query_param('ServiceName',ServiceName)
def get_ServiceActionName(self):
return self.get_query_params().get('ServiceActionName')
def set_ServiceActionName(self,ServiceActionName):
self.add_query_param('ServiceActionName',ServiceActionName)
def get_CustomCommand(self):
return self.get_query_params().get('CustomCommand')
def set_CustomCommand(self,CustomCommand):
self.add_query_param('CustomCommand',CustomCommand)
def get_ComponentNameList(self):
return self.get_query_params().get('ComponentNameList')
def set_ComponentNameList(self,ComponentNameList):
self.add_query_param('ComponentNameList',ComponentNameList)
def get_Comment(self):
return self.get_query_params().get('Comment')
def set_Comment(self,Comment):
self.add_query_param('Comment',Comment)
def get_IsRolling(self):
return self.get_query_params().get('IsRolling')
def set_IsRolling(self,IsRolling):
self.add_query_param('IsRolling',IsRolling)
def get_NodeCountPerBatch(self):
return self.get_query_params().get('NodeCountPerBatch')
def set_NodeCountPerBatch(self,NodeCountPerBatch):
self.add_query_param('NodeCountPerBatch',NodeCountPerBatch)
def get_TotlerateFailCount(self):
return self.get_query_params().get('TotlerateFailCount')
def set_TotlerateFailCount(self,TotlerateFailCount):
self.add_query_param('TotlerateFailCount',TotlerateFailCount)
def get_OnlyRestartStaleConfigNodes(self):
return self.get_query_params().get('OnlyRestartStaleConfigNodes')
def set_OnlyRestartStaleConfigNodes(self,OnlyRestartStaleConfigNodes):
self.add_query_param('OnlyRestartStaleConfigNodes',OnlyRestartStaleConfigNodes)
def get_TurnOnMaintenanceMode(self):
return self.get_query_params().get('TurnOnMaintenanceMode')
def set_TurnOnMaintenanceMode(self,TurnOnMaintenanceMode):
self.add_query_param('TurnOnMaintenanceMode',TurnOnMaintenanceMode) | [
"[email protected]"
]
| |
b09fdc0bc43f30b2b51c8893afcf2024ef86d619 | 0009c76a25c89a0d61d3bc9e10071da58bdfaa5a | /py/ztools/mtp/mtp_tools.py | 0496f5ae683026478bdcc98faf9cc9c89b3e14a9 | [
"MIT"
]
| permissive | julesontheroad/NSC_BUILDER | 84054e70a80b572088b0806a47ceb398302451b5 | e9083e83383281bdd9e167d3141163dcc56b6710 | refs/heads/master | 2023-07-05T05:23:17.114363 | 2021-11-15T19:34:47 | 2021-11-15T19:34:47 | 149,040,416 | 1,249 | 143 | MIT | 2022-12-15T03:19:33 | 2018-09-16T22:18:01 | Python | UTF-8 | Python | false | false | 8,313 | py | import os
from listmanager import folder_to_list
from listmanager import parsetags
from pathlib import Path
import Print
import shutil
from mtp.wpd import is_switch_connected
import sys
import subprocess
from python_pick import pick
from python_pick import Picker
squirrel_dir=os.path.abspath(os.curdir)
NSCB_dir=os.path.abspath('../'+(os.curdir))
if os.path.exists(os.path.join(squirrel_dir,'ztools')):
NSCB_dir=squirrel_dir
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
ztools_dir=os.path.join(NSCB_dir,'ztools')
squirrel_dir=ztools_dir
elif os.path.exists(os.path.join(NSCB_dir,'ztools')):
squirrel_dir=squirrel_dir
ztools_dir=os.path.join(NSCB_dir, 'ztools')
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
else:
ztools_dir=os.path.join(NSCB_dir, 'ztools')
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
testroute1=os.path.join(squirrel_dir, "squirrel.py")
testroute2=os.path.join(squirrel_dir, "squirrel.exe")
urlconfig=os.path.join(zconfig_dir,'NUT_DB_URL.txt')
isExe=False
if os.path.exists(testroute1):
squirrel=testroute1
isExe=False
elif os.path.exists(testroute2):
squirrel=testroute2
isExe=True
bin_folder=os.path.join(ztools_dir, 'bin')
nscb_mtp=os.path.join(bin_folder, 'nscb_mtp.exe')
cachefolder=os.path.join(ztools_dir, '_mtp_cache_')
if not os.path.exists(cachefolder):
os.makedirs(cachefolder)
games_installed_cache=os.path.join(cachefolder, 'games_installed.txt')
autoloader_files_cache=os.path.join(cachefolder, 'autoloader_files.txt')
sd_xci_cache=os.path.join(cachefolder, 'sd_xci.txt')
valid_saves_cache=os.path.join(cachefolder, 'valid_saves.txt')
mtp_source_lib=os.path.join(zconfig_dir,'mtp_source_libraries.txt')
mtp_internal_lib=os.path.join(zconfig_dir,'mtp_SD_libraries.txt')
storage_info=os.path.join(cachefolder, 'storage.csv')
download_lib_file = os.path.join(zconfig_dir, 'mtp_download_libraries.txt')
sx_autoloader_db=os.path.join(zconfig_dir, 'sx_autoloader_db')
def gen_sx_autoloader_files_menu():
print('***********************************************')
print('SX AUTOLOADER GENERATE FILES FROM HDD OR FOLDER')
print('***********************************************')
print('')
folder=input("Input a drive path: ")
if not os.path.exists(folder):
sys.exit("Can't find location")
title = 'Target for autoloader files: '
options = ['HDD','SD']
selected = pick(options, title, min_selection_count=1)
if selected[0]=='HDD':
type='hdd'
else:
type='sd'
title = 'Push files after generation?: '
options = ['YES','NO']
selected = pick(options, title, min_selection_count=1)
if selected[0]=='YES':
push=True
else:
push=False
title = "Ensure files can't colide after transfer?: "
options = ['YES','NO']
selected = pick(options, title, min_selection_count=1)
if selected[0]=='YES':
no_colide=True
else:
no_colide=False
gen_sx_autoloader_files(folder,type=type,push=push,no_colide=no_colide)
def gen_sx_autoloader_files(folder,type='hdd',push=False,no_colide=False):
gamelist=folder_to_list(folder,['xci','xc0'])
if type=='hdd':
SD_folder=os.path.join(sx_autoloader_db, 'hdd')
else:
SD_folder=os.path.join(sx_autoloader_db, 'sd')
if not os.path.exists(sx_autoloader_db):
os.makedirs(sx_autoloader_db)
if not os.path.exists(SD_folder):
os.makedirs(SD_folder)
for f in os.listdir(SD_folder):
fp = os.path.join(SD_folder, f)
try:
shutil.rmtree(fp)
except OSError:
os.remove(fp)
print(' * Generating autoloader files')
try:
for g in gamelist:
try:
fileid,fileversion,cctag,nG,nU,nD,baseid=parsetags(g)
if fileid=='unknown':
continue
tfile=os.path.join(SD_folder,fileid)
fileparts=Path(g).parts
if type=='hdd':
new_path=g.replace(fileparts[0],'"usbhdd:/')
else:
new_path=g.replace(fileparts[0],'"sdmc:/')
new_path=new_path.replace('\\','/')
with open(tfile,'w') as text_file:
text_file.write(new_path)
except:pass
print(' DONE')
if push==True:
if not is_switch_connected():
sys.exit("Can't push files. Switch device isn't connected.\nCheck if mtp responder is running!!!")
print(' * Pushing autoloader files')
if type=='hdd':
destiny="1: External SD Card\\sxos\\titles\\00FF0012656180FF\\cach\\hdd"
else:
destiny="1: External SD Card\\sxos\\titles\\00FF0012656180FF\\cach\\sd"
process=subprocess.Popen([nscb_mtp,"TransferFolder","-ori",SD_folder,"-dst",destiny,"-fbf","true"])
while process.poll()==None:
if process.poll()!=None:
process.terminate();
if no_colide==True:
cleanup_sx_autoloader_files()
except BaseException as e:
Print.error('Exception: ' + str(e))
pass
def cleanup_sx_autoloader_files():
from mtp_game_manager import retrieve_xci_paths
from mtp_game_manager import get_gamelist
try:
for f in os.listdir(cachefolder):
fp = os.path.join(cachefolder, f)
try:
shutil.rmtree(fp)
except OSError:
os.remove(fp)
except:pass
if not is_switch_connected():
sys.exit("Can't push files. Switch device isn't connected.\nCheck if mtp responder is running!!!")
retrieve_xci_paths()
print(" * Retriving autoloader files in device. Please Wait...")
process=subprocess.Popen([nscb_mtp,"Retrieve_autoloader_files","-tfile",autoloader_files_cache,"-show","false"],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
while process.poll()==None:
if process.poll()!=None:
process.terminate();
if os.path.exists(autoloader_files_cache):
print(" Success")
else:
sys.exit("Autoloader files weren't retrieved properly")
gamelist=get_gamelist(file=sd_xci_cache)
autoloader_list=get_gamelist(file=autoloader_files_cache)
sd_xci_ids=[]
for g in gamelist:
try:
fileid,fileversion,cctag,nG,nU,nD,baseid=parsetags(g)
sd_xci_ids.append(fileid)
except:pass
files_to_remove=[]
for f in autoloader_list:
fileparts=Path(f).parts
if 'sdd' in fileparts and not (fileparts[-1] in sd_xci_ids):
files_to_remove.append(f)
elif 'hdd' in fileparts and (fileparts[-1] in sd_xci_ids):
files_to_remove.append(f)
print(" * The following files will be removed")
for f in files_to_remove:
print(" - "+f)
for f in files_to_remove:
process=subprocess.Popen([nscb_mtp,"DeleteFile","-fp",f])
while process.poll()==None:
if process.poll()!=None:
process.terminate();
def push_sx_autoloader_libraries():
if not is_switch_connected():
sys.exit("Can't push files. Switch device isn't connected.\nCheck if mtp responder is running!!!")
title = "Ensure files can't colide after transfer?: "
options = ['YES','NO']
selected = pick(options, title, min_selection_count=1)
if selected[0]=='YES':
no_colide=True
else:
no_colide=False
print(' * Pushing autoloader files in hdd folder')
HDD_folder=os.path.join(sx_autoloader_db, 'hdd')
destiny="1: External SD Card\\sxos\\titles\\00FF0012656180FF\\cach\\hdd"
process=subprocess.Popen([nscb_mtp,"TransferFolder","-ori",HDD_folder,"-dst",destiny,"-fbf","true"])
while process.poll()==None:
if process.poll()!=None:
process.terminate();
print(' * Pushing autoloader files in SD folder')
SD_folder=os.path.join(sx_autoloader_db, 'sd')
destiny="1: External SD Card\\sxos\\titles\\00FF0012656180FF\\cach\\sd"
process=subprocess.Popen([nscb_mtp,"TransferFolder","-ori",SD_folder,"-dst",destiny,"-fbf","true"])
while process.poll()==None:
if process.poll()!=None:
process.terminate();
if no_colide==True:
cleanup_sx_autoloader_files()
def get_nca_ticket(filepath,nca):
import Fs
from binascii import hexlify as hx, unhexlify as uhx
if filepath.endswith('xci') or filepath.endswith('xcz'):
f = Fs.Xci(filepath)
check=False;titleKey=0
for nspF in f.hfs0:
if str(nspF._path)=="secure":
for file in nspF:
if (file._path).endswith('.tik'):
titleKey = file.getTitleKeyBlock().to_bytes(16, byteorder='big')
check=f.verify_key(nca,str(file._path))
if check==True:
break
return check,titleKey
elif filepath.endswith('nsp') or filepath.endswith('nsz'):
f = Fs.Nsp(filepath)
check=False;titleKey=0
for file in f:
if (file._path).endswith('.tik'):
titleKey = file.getTitleKeyBlock().to_bytes(16, byteorder='big')
check=f.verify_key(nca,str(file._path))
if check==True:
break
return check,titleKey | [
"[email protected]"
]
| |
fa26cbfd0a0af998227fd24745c6f1b50a85ae34 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03046/s367901013.py | bd60026b909a76c85e533b517ac364ab9dac011a | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | from sys import stdout
printn = lambda x: stdout.write(str(x))
inn = lambda : int(input())
inl = lambda: list(map(int, input().split()))
inm = lambda: map(int, input().split())
ins = lambda : input().strip()
DBG = True # and False
BIG = 999999999
R = 10**9 + 7
def ddprint(x):
if DBG:
print(x)
m,k = inm()
if m==0 and k==0:
print('0 0')
exit()
if m==0 and k>0:
print('-1')
exit()
if m==1 and k==0:
print('0 0 1 1')
exit()
if m==1 and k>0:
print('-1')
exit()
if k>=2**m:
print('-1')
exit()
if k==0:
printn('0 0')
for i in range(1,2**m):
printn(' {} {}'.format(i,i))
print('')
exit()
u = [False]*(2**m)
u[k] = True
a = []
cnt = 0
for i in range(1,2**m):
j = i^k
if not u[i] and not u[j]:
a.append(i)
u[j] = True
cnt += 1
if cnt==2**(m-1)-1:
break
s = [x for x in a]
t = [x for x in a]
t.reverse()
s.extend([0,k,0])
s.extend(t)
v = [x^k for x in a]
t = [x for x in v]
t.reverse()
s.extend(v)
s.append(k)
s.extend(t)
printn(s[0])
for i in range(1,len(s)):
printn(' ' + str(s[i]))
print("")
| [
"[email protected]"
]
| |
354f4e8b11fc7deaae648a37d207d137f827d66e | 0aa87ee2e544f56c17c2dde28a3b3feed08daa14 | /apps/users/urls.py | 6dda1d1373eadae3c77476250c17308642600204 | []
| no_license | yanshigou/mxonline | f2cc44724c1511418953e7e06d04661244b29455 | cebc3295734713846828246fc54dd33f8df14f86 | refs/heads/master | 2022-12-09T12:11:05.734326 | 2022-08-17T10:38:13 | 2022-08-17T10:38:13 | 148,120,737 | 0 | 2 | null | 2022-12-08T02:58:15 | 2018-09-10T08:06:10 | Python | UTF-8 | Python | false | false | 1,309 | py | # -*- coding: utf-8 -*-
__author__ = 'dzt'
__date__ = '2018/12/21 23:48'
from django.conf.urls import url
from .views import UserInfoView, UploadImageView, UpdatePwdView, SendEmailCodeView, UpdateEmailView, MyCourses
from .views import MyFavOrgView, MyFavTeacherView, MyFavCourseView, MyMessageView
urlpatterns = [
# 用户信息
url(r'^info/$', UserInfoView.as_view(), name='user_info'),
# 用户头像上传
url(r'^image/upload/$', UploadImageView.as_view(), name='image_upload'),
# 用户个人中心修改密码
url(r'^update/pwd/$', UpdatePwdView.as_view(), name='update_pwd'),
# 发送邮箱验证码
url(r'^sendemail_code/$', SendEmailCodeView.as_view(), name='sendemail_code'),
# 修改邮箱
url(r'^update_email/$', UpdateEmailView.as_view(), name='update_email'),
# 我的教程
url(r'^mycourses/$', MyCourses.as_view(), name='mycourses'),
# 我的收藏 直播机构
url(r'^myfav/org/$', MyFavOrgView.as_view(), name='myfav_org'),
# 我的收藏 主播
url(r'^myfav/teacher/$', MyFavTeacherView.as_view(), name='myfav_teacher'),
# 我的收藏 教程
url(r'^myfav/course/$', MyFavCourseView.as_view(), name='myfav_course'),
# 我的消息
url(r'^mymessage/$', MyMessageView.as_view(), name='mymessage'),
] | [
"[email protected]"
]
| |
506ab3ede97c112af86c4a23956ee39a25c9aecd | 83b1a267809c08a57a3bb16c103d71539502a650 | /job/migrations/0011_apply_created_at.py | c9ebca4b68d4fe3dc9d8d3052bdac004ee5816f8 | []
| no_license | rimatechcampus/django-jobboard-project- | c66933295b4692c7d3cb055dcf0cbaef80424b38 | 8823e1e7db011a4fbaa0fc87f1810bcd5dab08c6 | refs/heads/master | 2022-11-20T16:40:56.495550 | 2020-07-19T16:52:13 | 2020-07-19T16:52:13 | 279,794,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # Generated by Django 3.0.8 on 2020-07-18 08:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0010_apply_job'),
]
operations = [
migrations.AddField(
model_name='apply',
name='created_at',
field=models.DateTimeField(auto_now=True),
),
]
| [
"[email protected]"
]
| |
91f7b4d2efaf48ed26bfcc96e2670ac062a664fe | 6515c886cc420539bed05b2250c76e1c6974e5da | /models/mxnet_resnet_50.py | 708dbb07c13c01468c1d3fe4962f17ca8206bfd6 | []
| no_license | yuanmengzhixing/pytorch_deep_metric_learning | a320fd4e8863b9b8c3768b61e46027ccfc2077ee | b57621355a49af89573447c72685694043548434 | refs/heads/master | 2020-03-22T23:10:11.622231 | 2018-03-11T08:02:56 | 2018-03-11T08:02:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,697 | py | import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
__weights_dict = dict()
pre_trained_path = '/home/zhengxiawu/project/pytorch_deep_metric_learning/pretrained_models/kit_pytorch.npy'
#pre_trained_path = '/home/zhengxiawu/deep_learning/model/mxnet_2_resnet/mx2pt_resnet_50.npy'
#pre_trained_path = '/home/zhengxiawu/project/pytorch_deep_metric_learning/pretrained_models/resnet_50.npy'
pre_trained_path = '/home/zhengxiawu/deep_learning/model/mxnet_2_resnet/resnet_50_pytorch.npy'
def load_weights():
try:
weights_dict = np.load(pre_trained_path).item()
except:
weights_dict = np.load(pre_trained_path, encoding='bytes').item()
return weights_dict
class mxnet_resnet_50(nn.Module):
def __init__(self, **kwargs):
super(mxnet_resnet_50, self).__init__()
num_class = kwargs['num_class']
if kwargs['pretrain']:
global __weights_dict
__weights_dict = load_weights()
self.conv1 = self.__conv(2, name='conv1', in_channels=3, out_channels=64, kernel_size=(7L, 7L), stride=(2L, 2L),
groups=1, bias=True)
self.bn_conv1 = self.__batch_normalization(2, 'bn_conv1', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2a_branch1 = self.__conv(2, name='res2a_branch1', in_channels=64, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.res2a_branch2a = self.__conv(2, name='res2a_branch2a', in_channels=64, out_channels=64,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2a_branch1 = self.__batch_normalization(2, 'bn2a_branch1', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.bn2a_branch2a = self.__batch_normalization(2, 'bn2a_branch2a', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2a_branch2b = self.__conv(2, name='res2a_branch2b', in_channels=64, out_channels=64,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn2a_branch2b = self.__batch_normalization(2, 'bn2a_branch2b', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2a_branch2c = self.__conv(2, name='res2a_branch2c', in_channels=64, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2a_branch2c = self.__batch_normalization(2, 'bn2a_branch2c', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2b_branch2a = self.__conv(2, name='res2b_branch2a', in_channels=256, out_channels=64,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2b_branch2a = self.__batch_normalization(2, 'bn2b_branch2a', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2b_branch2b = self.__conv(2, name='res2b_branch2b', in_channels=64, out_channels=64,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn2b_branch2b = self.__batch_normalization(2, 'bn2b_branch2b', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2b_branch2c = self.__conv(2, name='res2b_branch2c', in_channels=64, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2b_branch2c = self.__batch_normalization(2, 'bn2b_branch2c', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2c_branch2a = self.__conv(2, name='res2c_branch2a', in_channels=256, out_channels=64,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2c_branch2a = self.__batch_normalization(2, 'bn2c_branch2a', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2c_branch2b = self.__conv(2, name='res2c_branch2b', in_channels=64, out_channels=64,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn2c_branch2b = self.__batch_normalization(2, 'bn2c_branch2b', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2c_branch2c = self.__conv(2, name='res2c_branch2c', in_channels=64, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2c_branch2c = self.__batch_normalization(2, 'bn2c_branch2c', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3a_branch1 = self.__conv(2, name='res3a_branch1', in_channels=256, out_channels=512,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.res3a_branch2a = self.__conv(2, name='res3a_branch2a', in_channels=256, out_channels=128,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.bn3a_branch1 = self.__batch_normalization(2, 'bn3a_branch1', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.bn3a_branch2a = self.__batch_normalization(2, 'bn3a_branch2a', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3a_branch2b = self.__conv(2, name='res3a_branch2b', in_channels=128, out_channels=128,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn3a_branch2b = self.__batch_normalization(2, 'bn3a_branch2b', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3a_branch2c = self.__conv(2, name='res3a_branch2c', in_channels=128, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3a_branch2c = self.__batch_normalization(2, 'bn3a_branch2c', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3b_branch2a = self.__conv(2, name='res3b_branch2a', in_channels=512, out_channels=128,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3b_branch2a = self.__batch_normalization(2, 'bn3b_branch2a', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3b_branch2b = self.__conv(2, name='res3b_branch2b', in_channels=128, out_channels=128,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn3b_branch2b = self.__batch_normalization(2, 'bn3b_branch2b', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3b_branch2c = self.__conv(2, name='res3b_branch2c', in_channels=128, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3b_branch2c = self.__batch_normalization(2, 'bn3b_branch2c', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3c_branch2a = self.__conv(2, name='res3c_branch2a', in_channels=512, out_channels=128,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3c_branch2a = self.__batch_normalization(2, 'bn3c_branch2a', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3c_branch2b = self.__conv(2, name='res3c_branch2b', in_channels=128, out_channels=128,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn3c_branch2b = self.__batch_normalization(2, 'bn3c_branch2b', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3c_branch2c = self.__conv(2, name='res3c_branch2c', in_channels=128, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3c_branch2c = self.__batch_normalization(2, 'bn3c_branch2c', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3d_branch2a = self.__conv(2, name='res3d_branch2a', in_channels=512, out_channels=128,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3d_branch2a = self.__batch_normalization(2, 'bn3d_branch2a', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3d_branch2b = self.__conv(2, name='res3d_branch2b', in_channels=128, out_channels=128,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn3d_branch2b = self.__batch_normalization(2, 'bn3d_branch2b', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3d_branch2c = self.__conv(2, name='res3d_branch2c', in_channels=128, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3d_branch2c = self.__batch_normalization(2, 'bn3d_branch2c', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4a_branch1 = self.__conv(2, name='res4a_branch1', in_channels=512, out_channels=1024,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.res4a_branch2a = self.__conv(2, name='res4a_branch2a', in_channels=512, out_channels=256,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.bn4a_branch1 = self.__batch_normalization(2, 'bn4a_branch1', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.bn4a_branch2a = self.__batch_normalization(2, 'bn4a_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4a_branch2b = self.__conv(2, name='res4a_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4a_branch2b = self.__batch_normalization(2, 'bn4a_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4a_branch2c = self.__conv(2, name='res4a_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4a_branch2c = self.__batch_normalization(2, 'bn4a_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4b_branch2a = self.__conv(2, name='res4b_branch2a', in_channels=1024, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4b_branch2a = self.__batch_normalization(2, 'bn4b_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4b_branch2b = self.__conv(2, name='res4b_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4b_branch2b = self.__batch_normalization(2, 'bn4b_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4b_branch2c = self.__conv(2, name='res4b_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4b_branch2c = self.__batch_normalization(2, 'bn4b_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4c_branch2a = self.__conv(2, name='res4c_branch2a', in_channels=1024, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4c_branch2a = self.__batch_normalization(2, 'bn4c_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4c_branch2b = self.__conv(2, name='res4c_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4c_branch2b = self.__batch_normalization(2, 'bn4c_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4c_branch2c = self.__conv(2, name='res4c_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4c_branch2c = self.__batch_normalization(2, 'bn4c_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4d_branch2a = self.__conv(2, name='res4d_branch2a', in_channels=1024, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4d_branch2a = self.__batch_normalization(2, 'bn4d_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4d_branch2b = self.__conv(2, name='res4d_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4d_branch2b = self.__batch_normalization(2, 'bn4d_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4d_branch2c = self.__conv(2, name='res4d_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4d_branch2c = self.__batch_normalization(2, 'bn4d_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4e_branch2a = self.__conv(2, name='res4e_branch2a', in_channels=1024, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4e_branch2a = self.__batch_normalization(2, 'bn4e_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4e_branch2b = self.__conv(2, name='res4e_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4e_branch2b = self.__batch_normalization(2, 'bn4e_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4e_branch2c = self.__conv(2, name='res4e_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4e_branch2c = self.__batch_normalization(2, 'bn4e_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4f_branch2a = self.__conv(2, name='res4f_branch2a', in_channels=1024, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4f_branch2a = self.__batch_normalization(2, 'bn4f_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4f_branch2b = self.__conv(2, name='res4f_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4f_branch2b = self.__batch_normalization(2, 'bn4f_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4f_branch2c = self.__conv(2, name='res4f_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4f_branch2c = self.__batch_normalization(2, 'bn4f_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5a_branch1 = self.__conv(2, name='res5a_branch1', in_channels=1024, out_channels=2048,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.res5a_branch2a = self.__conv(2, name='res5a_branch2a', in_channels=1024, out_channels=512,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.bn5a_branch1 = self.__batch_normalization(2, 'bn5a_branch1', num_features=2048, eps=9.99999974738e-05,
momentum=0.899999976158)
self.bn5a_branch2a = self.__batch_normalization(2, 'bn5a_branch2a', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5a_branch2b = self.__conv(2, name='res5a_branch2b', in_channels=512, out_channels=512,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn5a_branch2b = self.__batch_normalization(2, 'bn5a_branch2b', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5a_branch2c = self.__conv(2, name='res5a_branch2c', in_channels=512, out_channels=2048,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn5a_branch2c = self.__batch_normalization(2, 'bn5a_branch2c', num_features=2048, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5b_branch2a = self.__conv(2, name='res5b_branch2a', in_channels=2048, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn5b_branch2a = self.__batch_normalization(2, 'bn5b_branch2a', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5b_branch2b = self.__conv(2, name='res5b_branch2b', in_channels=512, out_channels=512,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn5b_branch2b = self.__batch_normalization(2, 'bn5b_branch2b', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5b_branch2c = self.__conv(2, name='res5b_branch2c', in_channels=512, out_channels=2048,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn5b_branch2c = self.__batch_normalization(2, 'bn5b_branch2c', num_features=2048, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5c_branch2a = self.__conv(2, name='res5c_branch2a', in_channels=2048, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn5c_branch2a = self.__batch_normalization(2, 'bn5c_branch2a', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5c_branch2b = self.__conv(2, name='res5c_branch2b', in_channels=512, out_channels=512,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn5c_branch2b = self.__batch_normalization(2, 'bn5c_branch2b', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5c_branch2c = self.__conv(2, name='res5c_branch2c', in_channels=512, out_channels=2048,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn5c_branch2c = self.__batch_normalization(2, 'bn5c_branch2c', num_features=2048, eps=9.99999974738e-05,
momentum=0.899999976158)
self.class_fc = nn.Linear(4096, num_class)
nn.init.xavier_uniform(self.class_fc._parameters['weight'],gain=0.624)
nn.init.constant(self.class_fc._parameters['weight'],0)
def forward(self, x, **kwargs):
conv1_pad = F.pad(x, (3L, 3L, 3L, 3L))
conv1 = self.conv1(conv1_pad)
# conv1_numpy = conv1.data.cpu().numpy()
# param_numpy = self.conv1._parameters['weight'].data.cpu().numpy()
bn_conv1 = self.bn_conv1(conv1)
conv1_relu = F.relu(bn_conv1)
pool1 = F.max_pool2d(conv1_relu, kernel_size=(3L, 3L), stride=(2L, 2L))
res2a_branch1 = self.res2a_branch1(pool1)
res2a_branch2a = self.res2a_branch2a(pool1)
bn2a_branch1 = self.bn2a_branch1(res2a_branch1)
bn2a_branch2a = self.bn2a_branch2a(res2a_branch2a)
res2a_branch2a_relu = F.relu(bn2a_branch2a)
res2a_branch2b_pad = F.pad(res2a_branch2a_relu, (1L, 1L, 1L, 1L))
res2a_branch2b = self.res2a_branch2b(res2a_branch2b_pad)
bn2a_branch2b = self.bn2a_branch2b(res2a_branch2b)
res2a_branch2b_relu = F.relu(bn2a_branch2b)
res2a_branch2c = self.res2a_branch2c(res2a_branch2b_relu)
bn2a_branch2c = self.bn2a_branch2c(res2a_branch2c)
res2a = bn2a_branch1 + bn2a_branch2c
res2a_relu = F.relu(res2a)
res2b_branch2a = self.res2b_branch2a(res2a_relu)
bn2b_branch2a = self.bn2b_branch2a(res2b_branch2a)
res2b_branch2a_relu = F.relu(bn2b_branch2a)
res2b_branch2b_pad = F.pad(res2b_branch2a_relu, (1L, 1L, 1L, 1L))
res2b_branch2b = self.res2b_branch2b(res2b_branch2b_pad)
bn2b_branch2b = self.bn2b_branch2b(res2b_branch2b)
res2b_branch2b_relu = F.relu(bn2b_branch2b)
res2b_branch2c = self.res2b_branch2c(res2b_branch2b_relu)
bn2b_branch2c = self.bn2b_branch2c(res2b_branch2c)
res2b = res2a_relu + bn2b_branch2c
res2b_relu = F.relu(res2b)
res2c_branch2a = self.res2c_branch2a(res2b_relu)
bn2c_branch2a = self.bn2c_branch2a(res2c_branch2a)
res2c_branch2a_relu = F.relu(bn2c_branch2a)
res2c_branch2b_pad = F.pad(res2c_branch2a_relu, (1L, 1L, 1L, 1L))
res2c_branch2b = self.res2c_branch2b(res2c_branch2b_pad)
bn2c_branch2b = self.bn2c_branch2b(res2c_branch2b)
res2c_branch2b_relu = F.relu(bn2c_branch2b)
res2c_branch2c = self.res2c_branch2c(res2c_branch2b_relu)
bn2c_branch2c = self.bn2c_branch2c(res2c_branch2c)
res2c = res2b_relu + bn2c_branch2c
res2c_relu = F.relu(res2c)
res3a_branch1 = self.res3a_branch1(res2c_relu)
res3a_branch2a = self.res3a_branch2a(res2c_relu)
bn3a_branch1 = self.bn3a_branch1(res3a_branch1)
bn3a_branch2a = self.bn3a_branch2a(res3a_branch2a)
res3a_branch2a_relu = F.relu(bn3a_branch2a)
res3a_branch2b_pad = F.pad(res3a_branch2a_relu, (1L, 1L, 1L, 1L))
res3a_branch2b = self.res3a_branch2b(res3a_branch2b_pad)
bn3a_branch2b = self.bn3a_branch2b(res3a_branch2b)
res3a_branch2b_relu = F.relu(bn3a_branch2b)
res3a_branch2c = self.res3a_branch2c(res3a_branch2b_relu)
bn3a_branch2c = self.bn3a_branch2c(res3a_branch2c)
res3a = bn3a_branch1 + bn3a_branch2c
res3a_relu = F.relu(res3a)
res3b_branch2a = self.res3b_branch2a(res3a_relu)
bn3b_branch2a = self.bn3b_branch2a(res3b_branch2a)
res3b_branch2a_relu = F.relu(bn3b_branch2a)
res3b_branch2b_pad = F.pad(res3b_branch2a_relu, (1L, 1L, 1L, 1L))
res3b_branch2b = self.res3b_branch2b(res3b_branch2b_pad)
bn3b_branch2b = self.bn3b_branch2b(res3b_branch2b)
res3b_branch2b_relu = F.relu(bn3b_branch2b)
res3b_branch2c = self.res3b_branch2c(res3b_branch2b_relu)
bn3b_branch2c = self.bn3b_branch2c(res3b_branch2c)
res3b = res3a_relu + bn3b_branch2c
res3b_relu = F.relu(res3b)
res3c_branch2a = self.res3c_branch2a(res3b_relu)
bn3c_branch2a = self.bn3c_branch2a(res3c_branch2a)
res3c_branch2a_relu = F.relu(bn3c_branch2a)
res3c_branch2b_pad = F.pad(res3c_branch2a_relu, (1L, 1L, 1L, 1L))
res3c_branch2b = self.res3c_branch2b(res3c_branch2b_pad)
bn3c_branch2b = self.bn3c_branch2b(res3c_branch2b)
res3c_branch2b_relu = F.relu(bn3c_branch2b)
res3c_branch2c = self.res3c_branch2c(res3c_branch2b_relu)
bn3c_branch2c = self.bn3c_branch2c(res3c_branch2c)
res3c = res3b_relu + bn3c_branch2c
res3c_relu = F.relu(res3c)
res3d_branch2a = self.res3d_branch2a(res3c_relu)
bn3d_branch2a = self.bn3d_branch2a(res3d_branch2a)
res3d_branch2a_relu = F.relu(bn3d_branch2a)
res3d_branch2b_pad = F.pad(res3d_branch2a_relu, (1L, 1L, 1L, 1L))
res3d_branch2b = self.res3d_branch2b(res3d_branch2b_pad)
bn3d_branch2b = self.bn3d_branch2b(res3d_branch2b)
res3d_branch2b_relu = F.relu(bn3d_branch2b)
res3d_branch2c = self.res3d_branch2c(res3d_branch2b_relu)
bn3d_branch2c = self.bn3d_branch2c(res3d_branch2c)
res3d = res3c_relu + bn3d_branch2c
res3d_relu = F.relu(res3d)
res4a_branch1 = self.res4a_branch1(res3d_relu)
res4a_branch2a = self.res4a_branch2a(res3d_relu)
bn4a_branch1 = self.bn4a_branch1(res4a_branch1)
bn4a_branch2a = self.bn4a_branch2a(res4a_branch2a)
res4a_branch2a_relu = F.relu(bn4a_branch2a)
res4a_branch2b_pad = F.pad(res4a_branch2a_relu, (1L, 1L, 1L, 1L))
res4a_branch2b = self.res4a_branch2b(res4a_branch2b_pad)
bn4a_branch2b = self.bn4a_branch2b(res4a_branch2b)
res4a_branch2b_relu = F.relu(bn4a_branch2b)
res4a_branch2c = self.res4a_branch2c(res4a_branch2b_relu)
bn4a_branch2c = self.bn4a_branch2c(res4a_branch2c)
res4a = bn4a_branch1 + bn4a_branch2c
res4a_relu = F.relu(res4a)
res4b_branch2a = self.res4b_branch2a(res4a_relu)
bn4b_branch2a = self.bn4b_branch2a(res4b_branch2a)
res4b_branch2a_relu = F.relu(bn4b_branch2a)
res4b_branch2b_pad = F.pad(res4b_branch2a_relu, (1L, 1L, 1L, 1L))
res4b_branch2b = self.res4b_branch2b(res4b_branch2b_pad)
bn4b_branch2b = self.bn4b_branch2b(res4b_branch2b)
res4b_branch2b_relu = F.relu(bn4b_branch2b)
res4b_branch2c = self.res4b_branch2c(res4b_branch2b_relu)
bn4b_branch2c = self.bn4b_branch2c(res4b_branch2c)
res4b = res4a_relu + bn4b_branch2c
res4b_relu = F.relu(res4b)
res4c_branch2a = self.res4c_branch2a(res4b_relu)
bn4c_branch2a = self.bn4c_branch2a(res4c_branch2a)
res4c_branch2a_relu = F.relu(bn4c_branch2a)
res4c_branch2b_pad = F.pad(res4c_branch2a_relu, (1L, 1L, 1L, 1L))
res4c_branch2b = self.res4c_branch2b(res4c_branch2b_pad)
bn4c_branch2b = self.bn4c_branch2b(res4c_branch2b)
res4c_branch2b_relu = F.relu(bn4c_branch2b)
res4c_branch2c = self.res4c_branch2c(res4c_branch2b_relu)
bn4c_branch2c = self.bn4c_branch2c(res4c_branch2c)
res4c = res4b_relu + bn4c_branch2c
res4c_relu = F.relu(res4c)
res4d_branch2a = self.res4d_branch2a(res4c_relu)
bn4d_branch2a = self.bn4d_branch2a(res4d_branch2a)
res4d_branch2a_relu = F.relu(bn4d_branch2a)
res4d_branch2b_pad = F.pad(res4d_branch2a_relu, (1L, 1L, 1L, 1L))
res4d_branch2b = self.res4d_branch2b(res4d_branch2b_pad)
bn4d_branch2b = self.bn4d_branch2b(res4d_branch2b)
res4d_branch2b_relu = F.relu(bn4d_branch2b)
res4d_branch2c = self.res4d_branch2c(res4d_branch2b_relu)
bn4d_branch2c = self.bn4d_branch2c(res4d_branch2c)
res4d = res4c_relu + bn4d_branch2c
res4d_relu = F.relu(res4d)
res4e_branch2a = self.res4e_branch2a(res4d_relu)
bn4e_branch2a = self.bn4e_branch2a(res4e_branch2a)
res4e_branch2a_relu = F.relu(bn4e_branch2a)
res4e_branch2b_pad = F.pad(res4e_branch2a_relu, (1L, 1L, 1L, 1L))
res4e_branch2b = self.res4e_branch2b(res4e_branch2b_pad)
bn4e_branch2b = self.bn4e_branch2b(res4e_branch2b)
res4e_branch2b_relu = F.relu(bn4e_branch2b)
res4e_branch2c = self.res4e_branch2c(res4e_branch2b_relu)
bn4e_branch2c = self.bn4e_branch2c(res4e_branch2c)
res4e = res4d_relu + bn4e_branch2c
res4e_relu = F.relu(res4e)
res4f_branch2a = self.res4f_branch2a(res4e_relu)
bn4f_branch2a = self.bn4f_branch2a(res4f_branch2a)
res4f_branch2a_relu = F.relu(bn4f_branch2a)
res4f_branch2b_pad = F.pad(res4f_branch2a_relu, (1L, 1L, 1L, 1L))
res4f_branch2b = self.res4f_branch2b(res4f_branch2b_pad)
bn4f_branch2b = self.bn4f_branch2b(res4f_branch2b)
res4f_branch2b_relu = F.relu(bn4f_branch2b)
res4f_branch2c = self.res4f_branch2c(res4f_branch2b_relu)
bn4f_branch2c = self.bn4f_branch2c(res4f_branch2c)
res4f = res4e_relu + bn4f_branch2c
res4f_relu = F.relu(res4f)
res5a_branch1 = self.res5a_branch1(res4f_relu)
res5a_branch2a = self.res5a_branch2a(res4f_relu)
bn5a_branch1 = self.bn5a_branch1(res5a_branch1)
bn5a_branch2a = self.bn5a_branch2a(res5a_branch2a)
res5a_branch2a_relu = F.relu(bn5a_branch2a)
res5a_branch2b_pad = F.pad(res5a_branch2a_relu, (1L, 1L, 1L, 1L))
res5a_branch2b = self.res5a_branch2b(res5a_branch2b_pad)
bn5a_branch2b = self.bn5a_branch2b(res5a_branch2b)
res5a_branch2b_relu = F.relu(bn5a_branch2b)
res5a_branch2c = self.res5a_branch2c(res5a_branch2b_relu)
bn5a_branch2c = self.bn5a_branch2c(res5a_branch2c)
res5a = bn5a_branch1 + bn5a_branch2c
res5a_relu = F.relu(res5a)
res5b_branch2a = self.res5b_branch2a(res5a_relu)
bn5b_branch2a = self.bn5b_branch2a(res5b_branch2a)
res5b_branch2a_relu = F.relu(bn5b_branch2a)
res5b_branch2b_pad = F.pad(res5b_branch2a_relu, (1L, 1L, 1L, 1L))
res5b_branch2b = self.res5b_branch2b(res5b_branch2b_pad)
bn5b_branch2b = self.bn5b_branch2b(res5b_branch2b)
res5b_branch2b_relu = F.relu(bn5b_branch2b)
res5b_branch2c = self.res5b_branch2c(res5b_branch2b_relu)
bn5b_branch2c = self.bn5b_branch2c(res5b_branch2c)
res5b = res5a_relu + bn5b_branch2c
res5b_relu = F.relu(res5b)
res5c_branch2a = self.res5c_branch2a(res5b_relu)
bn5c_branch2a = self.bn5c_branch2a(res5c_branch2a)
res5c_branch2a_relu = F.relu(bn5c_branch2a)
res5c_branch2b_pad = F.pad(res5c_branch2a_relu, (1L, 1L, 1L, 1L))
res5c_branch2b = self.res5c_branch2b(res5c_branch2b_pad)
bn5c_branch2b = self.bn5c_branch2b(res5c_branch2b)
res5c_branch2b_relu = F.relu(bn5c_branch2b)
res5c_branch2c = self.res5c_branch2c(res5c_branch2b_relu)
bn5c_branch2c = self.bn5c_branch2c(res5c_branch2c)
res5c = res5b_relu + bn5c_branch2c
res5c_relu = F.relu(res5c)
if kwargs['scda']:
scda_x = torch.sum(res5c_relu,1,keepdim=True)
mean_x = torch.mean(scda_x.view(scda_x.size(0),-1),1,True)
scda_x = scda_x - mean_x
scda_x = scda_x>0
scda_x = scda_x.float()
res5c_relu = res5c_relu * scda_x
pooling0 = F.max_pool2d(input=res5c_relu, kernel_size=res5c_relu.size()[2:])
pooling1 = F.avg_pool2d(input=res5c_relu, kernel_size=res5c_relu.size()[2:])
flatten0 = pooling0.view(pooling0.size(0), -1)
flatten1 = pooling1.view(pooling1.size(0), -1)
avg_x = F.normalize(flatten1, p=2, dim=1)
max_x = F.normalize(flatten0, p=2, dim=1)
x = torch.cat((avg_x, max_x), dim=1)
# the last fc layer can be treat as distanc
# ree compute
x = x * kwargs['scale']
if kwargs['is_train']:
x = self.class_fc(x)
return x
@staticmethod
def __conv(dim, name, **kwargs):
if dim == 1:
layer = nn.Conv1d(**kwargs)
elif dim == 2:
layer = nn.Conv2d(**kwargs)
elif dim == 3:
layer = nn.Conv3d(**kwargs)
else:
raise NotImplementedError()
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer
@staticmethod
def __batch_normalization(dim, name, **kwargs):
if dim == 1:
layer = nn.BatchNorm1d(**kwargs)
elif dim == 2:
layer = nn.BatchNorm2d(**kwargs)
elif dim == 3:
layer = nn.BatchNorm3d(**kwargs)
else:
raise NotImplementedError()
if 'scale' in __weights_dict[name]:
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['scale']))
else:
layer.weight.data.fill_(1)
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
else:
layer.bias.data.fill_(0)
layer.state_dict()['running_mean'].copy_(torch.from_numpy(__weights_dict[name]['mean']))
layer.state_dict()['running_var'].copy_(torch.from_numpy(__weights_dict[name]['var']))
return layer
| [
"[email protected]"
]
| |
47220864385f35b099736c3ef297a7ae7f1cbe54 | ca08100b33a78c01bf49f097f4e80ed10e4ee9ad | /intrepidboats/apps/owners_portal/utils.py | 605fe7065629b6a2f9983f3de5ed580162b6c11a | []
| no_license | elite0401/intrepidpowerboats | 347eae14b584d1be9a61ca14c014135ab0d14ad0 | d2a475b60d17aa078bf0feb5e0298c927e7362e7 | refs/heads/master | 2021-09-11T01:51:47.615117 | 2018-04-06T02:20:02 | 2018-04-06T02:20:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,654 | py | from django.conf import settings
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import gettext as _
def send_report_email(user_boat):
context = {
'user': user_boat.user,
'user_boat': user_boat,
'boat': user_boat.boat,
'site': Site.objects.get_current().domain,
'dashboard_url': reverse("owners_portal:owners_portal"),
}
send_mail(
subject=_("New boat report - Intrepid Powerboats"),
message=render_to_string('owners_portal/emails/report_email.txt', context),
from_email=settings.BUILD_A_BOAT['NO_REPLY_EMAIL_REPORTS'],
recipient_list=[user_boat.user.email],
html_message=render_to_string('owners_portal/emails/report_email.html', context),
)
def send_step_feedback_email(step_feedback):
context = {
'comments': step_feedback.comments,
'user': step_feedback.user,
'step': '{title} (phase: {phase})'.format(title=step_feedback.step.title, phase=step_feedback.step.phase),
'boat': '{boat} (model: {model})'.format(boat=step_feedback.step.user_boat,
model=step_feedback.step.user_boat.boat)
}
send_mail(
subject=_("{user} has sent feedback on {step} in Owner's portal - Intrepid Powerboats".format(
user=context['user'],
step=context['step'],
)),
message=render_to_string('owners_portal/emails/step_feedback_email.txt', context),
from_email=settings.NO_REPLY_EMAIL,
recipient_list=settings.TO_EMAIL['OWNERS_PORTAL_FEEDBACK_FORM'],
html_message=render_to_string('owners_portal/emails/step_feedback_email.html', context),
)
def send_new_shared_video_uploaded_email(shared_video):
from django.contrib.auth.models import User
admins = User.objects.filter(is_superuser=True)
subject = _("New uploaded video to vimeo")
to = admins.values_list('email', flat=True)
from_email = settings.NO_REPLY_EMAIL
site = Site.objects.get_current()
ctx = {
'user': shared_video.uploader,
'site': site.domain,
'admin_url': reverse("admin:owners_portal_sharedvideo_change", args=[shared_video.pk]),
}
message = render_to_string('owners_portal/emails/new_shared_video_email.txt', ctx)
html_message = render_to_string('owners_portal/emails/new_shared_video_email.html', ctx)
send_mail(subject=subject, message=message, from_email=from_email, recipient_list=to, html_message=html_message)
| [
"[email protected]"
]
| |
2c4cfe1cd667b7a708c96b4978b00325826dfb19 | 0987f31e64bcacb41ba3a1e20054d7b8ac0d7346 | /contests/panasonic2020/a.py | 3c85e5a3a0a4b6b5ab170b052566849aab8ae7bf | []
| no_license | masakiaota/kyoupuro | 81ae52ab3014fb2b1e10472994afa4caa9ea463b | 74915a40ac157f89fe400e3f98e9bf3c10012cd7 | refs/heads/master | 2021-06-27T04:13:52.152582 | 2020-09-20T03:21:17 | 2020-09-20T03:21:17 | 147,049,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | import sys
sys.setrecursionlimit(1 << 25)
read = sys.stdin.readline
def read_ints():
return list(map(int, read().split()))
def read_a_int():
return int(read())
def read_tuple(H):
'''
H is number of rows
'''
ret = []
for _ in range(H):
ret.append(tuple(map(int, read().split())))
return ret
def read_col(H, n_cols):
'''
H is number of rows
n_cols is number of cols
A列、B列が与えられるようなとき
'''
ret = [[] for _ in range(n_cols)]
for _ in range(H):
tmp = list(map(int, read().split()))
for col in range(n_cols):
ret[col].append(tmp[col])
return ret
def read_matrix(H):
'''
H is number of rows
'''
ret = []
for _ in range(H):
ret.append(list(map(int, read().split())))
return ret
# return [list(map(int, read().split())) for _ in range(H)] # 内包表記はpypyでは遅いため
def read_map(H):
'''
H is number of rows
文字列で与えられた盤面を読み取る用
'''
return [read()[:-1] for _ in range(H)]
def read_map_as_int(H):
'''
#→1,.→0として読み込む
'''
ret = []
for _ in range(H):
ret.append([1 if s == '#' else 0 for s in read()[:-1]])
# 内包表記はpypyでは若干遅いことに注意
# #numpy使うだろうからこれを残しておくけど
return ret
# default import
from collections import defaultdict, Counter, deque
from operator import itemgetter
from itertools import product, permutations, combinations
from bisect import bisect_left, bisect_right # , insort_left, insort_right
from fractions import gcd
def lcm(a, b):
# 最小公約数
g = gcd(a, b)
return a * b // g
a = [1, 1, 1, 2, 1, 2, 1, 5, 2, 2, 1, 5, 1, 2, 1, 14,
1, 5, 1, 5, 2, 2, 1, 15, 2, 2, 5, 4, 1, 4, 1, 51]
print(a[int(input()) - 1])
| [
"[email protected]"
]
| |
b1c5a6fe4a11aa713099d0337893a6259fa2e086 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02973/s301790930.py | 280647a2fd8669a6345ecf3a1ac6c75ef906c3dc | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | from sys import stdin
from bisect import bisect
N = int(stdin.readline().rstrip())
A = []
for i in range(N):
A.append(int(input()))
dp = []
for a in A[::-1]:
i = bisect(dp, a)
if i < len(dp):
dp[i] = a
else:
dp.append(a)
print(len(dp)) | [
"[email protected]"
]
| |
1d1dfcd44cf71fa592df181189c7efe1af6af40d | 7a8560742946bfb95f4a252693264c34d4d0473d | /k2/centroid.py | e09491c999915180b3830fd138110d6e2140551a | [
"MIT"
]
| permissive | benmontet/K2-noise | 3781e475ed6d5e2748a7ac3ddd878b8eec334254 | a4b682cdf33f85d2dffc4cef115dcedacfccb4b4 | refs/heads/master | 2016-09-05T13:02:09.051080 | 2014-10-25T14:36:22 | 2014-10-25T14:36:22 | 22,899,258 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["centroid"]
import numpy as np
from functools import partial
from itertools import izip, imap
from .c3k import find_centroid
def centroid(tpf, **kwargs):
# Load the data.
data = tpf.read()
times = data["TIME"]
images = data["FLUX"]
quality = data["QUALITY"]
# Get rid of the bad times based on quality flags.
m = np.isfinite(times) * (quality == 0)
images[~m, :] = np.nan
f = partial(find_centroid, **kwargs)
return [times] + list(imap(np.array, izip(*(imap(f, images)))))
| [
"[email protected]"
]
| |
02af91d9a068eb13b6123c2f26b025668f5bb79f | 6eaf69ffd454ed6933e3395516246d878cb09781 | /repozeldapapp/tests/functional/test_authentication.py | f998f67ccdc2ccc018c17f9cecb7cb08697d7a58 | []
| no_license | ralphbean/repoze-ldap-app | 0d6658ef13b153736aaed6aa07fbdcaf65cbe1d9 | cc00fe59bcc286fd44d1e22a14c40cfc8419e21d | refs/heads/master | 2021-01-01T05:35:25.069715 | 2011-07-19T15:30:31 | 2011-07-19T15:30:31 | 2,072,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,583 | py | # -*- coding: utf-8 -*-
"""
Integration tests for the :mod:`repoze.who`-powered authentication sub-system.
As repoze-ldap-app grows and the authentication method changes, only these tests
should be updated.
"""
from repozeldapapp.tests import TestController
class TestAuthentication(TestController):
"""Tests for the default authentication setup.
By default in TurboGears 2, :mod:`repoze.who` is configured with the same
plugins specified by repoze.what-quickstart (which are listed in
http://code.gustavonarea.net/repoze.what-quickstart/#repoze.what.plugins.quickstart.setup_sql_auth).
As the settings for those plugins change, or the plugins are replaced,
these tests should be updated.
"""
application_under_test = 'main'
def test_forced_login(self):
"""Anonymous users are forced to login
Test that anonymous users are automatically redirected to the login
form when authorization is denied. Next, upon successful login they
should be redirected to the initially requested page.
"""
# Requesting a protected area
resp = self.app.get('/secc/', status=302)
assert resp.location.startswith('http://localhost/login')
# Getting the login form:
resp = resp.follow(status=200)
form = resp.form
# Submitting the login form:
form['login'] = u'manager'
form['password'] = 'managepass'
post_login = form.submit(status=302)
# Being redirected to the initially requested page:
assert post_login.location.startswith('http://localhost/post_login')
initial_page = post_login.follow(status=302)
assert 'authtkt' in initial_page.request.cookies, \
"Session cookie wasn't defined: %s" % initial_page.request.cookies
assert initial_page.location.startswith('http://localhost/secc/'), \
initial_page.location
def test_voluntary_login(self):
"""Voluntary logins must work correctly"""
# Going to the login form voluntarily:
resp = self.app.get('/login', status=200)
form = resp.form
# Submitting the login form:
form['login'] = u'manager'
form['password'] = 'managepass'
post_login = form.submit(status=302)
# Being redirected to the home page:
assert post_login.location.startswith('http://localhost/post_login')
home_page = post_login.follow(status=302)
assert 'authtkt' in home_page.request.cookies, \
'Session cookie was not defined: %s' % home_page.request.cookies
assert home_page.location == 'http://localhost/'
def test_logout(self):
"""Logouts must work correctly"""
# Logging in voluntarily the quick way:
resp = self.app.get('/login_handler?login=manager&password=managepass',
status=302)
resp = resp.follow(status=302)
assert 'authtkt' in resp.request.cookies, \
'Session cookie was not defined: %s' % resp.request.cookies
# Logging out:
resp = self.app.get('/logout_handler', status=302)
assert resp.location.startswith('http://localhost/post_logout')
# Finally, redirected to the home page:
home_page = resp.follow(status=302)
authtkt = home_page.request.cookies.get('authtkt')
assert not authtkt or authtkt == 'INVALID', \
'Session cookie was not deleted: %s' % home_page.request.cookies
assert home_page.location == 'http://localhost/', home_page.location
| [
"[email protected]"
]
| |
1b20703b930ae2d775880d83cd617d40c9cdfa18 | ea867a1db2b730964b471e5f198ac74988417fa5 | /steemtools/helpers.py | 5c4e3a5d73bff0aa5310093de2799d44d516835b | [
"MIT"
]
| permissive | Denis007138/steemtools | 0b58fa4bb2608c0134752b0855a36464cff9073a | c7f7ad9f482ff1b56e1218ceffbf574c95cf0c1f | refs/heads/master | 2021-01-11T01:34:36.721177 | 2016-10-10T13:58:44 | 2016-10-10T13:58:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | import datetime
import re
import time
import dateutil
from dateutil import parser
from funcy import contextmanager, decorator
from werkzeug.contrib.cache import SimpleCache
@contextmanager
def timeit():
t1 = time.time()
yield
print("Time Elapsed: %.2f" % (time.time() - t1))
@decorator
def simple_cache(func, cache_obj, timeout=3600):
if type(cache_obj) is not SimpleCache:
return func()
name = "%s_%s_%s" % (func._func.__name__, func._args, func._kwargs)
cache_value = cache_obj.get(name)
if cache_value:
return cache_value
else:
out = func()
cache_obj.set(name, out, timeout=timeout)
return out
def read_asset(asset_string):
re_asset = re.compile(r'(?P<number>\d*\.?\d+)\s?(?P<unit>[a-zA-Z]+)')
res = re_asset.match(asset_string)
return {'value': float(res.group('number')), 'symbol': res.group('unit')}
def parse_payout(payout):
return read_asset(payout)['value']
def time_diff(time1, time2):
time1 = parser.parse(time1 + "UTC").timestamp()
time2 = parser.parse(time2 + "UTC").timestamp()
return time2 - time1
def is_comment(item):
if item['permlink'][:3] == "re-":
return True
return False
def time_elapsed(time1):
created_at = parser.parse(time1 + "UTC").timestamp()
now_adjusted = time.time()
return now_adjusted - created_at
def parse_time(block_time):
return dateutil.parser.parse(block_time + "UTC").astimezone(datetime.timezone.utc)
| [
"[email protected]"
]
| |
be1ca56a4c8e33d679fe761dc4faa412b354bfa3 | 61e68e3a4d6cc841da4350dc193315822ca4e354 | /lecture/4_정렬/4_퀵정렬.py | 45420f20a5eaaae9aafb31ff3bea12843c0068c4 | []
| no_license | sswwd95/Algorithm | 34360cd333019d6ded60f967c19aa70f1655e12a | a70bdf02580a39b9a5c282a04b0b2f8c2cb41636 | refs/heads/master | 2023-04-16T21:05:07.293929 | 2021-05-08T10:58:05 | 2021-05-08T10:58:05 | 362,651,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | array = [5, 7, 9, 0, 3, 1, 6, 2, 4, 8]
def quick_sort(array, start, end):
if start >= end: # 원소가 1개인 경우 종료
return
pivot = start # 피벗은 첫 번째 원소
left = start + 1
right = end
while(left <= right):
# 피벗보다 큰 데이터를 찾을 때까지 반복
while(left <= end and array[left] <= array[pivot]):
left += 1
# 피벗보다 작은 데이터를 찾을 때까지 반복
while(right > start and array[right] >= array[pivot]):
right -= 1
if(left > right): # 엇갈렸다면 작은 데이터와 피벗을 교체
array[right], array[pivot] = array[pivot], array[right]
else: # 엇갈리지 않았다면 작은 데이터와 큰 데이터를 교체
array[left], array[right] = array[right], array[left]
# 분할 이후 왼쪽 부분과 오른쪽 부분에서 각각 정렬 수행
quick_sort(array, start, right - 1)
quick_sort(array, right + 1, end)
quick_sort(array, 0, len(array) - 1)
print(array)
# [0,1,2,3,4,5,6,7,8,9] | [
"[email protected]"
]
| |
b4ebea591ef98eba50becc2628f71215e816a37f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_84/306.py | 0561a547b612e83a36f4cf677430a4ecdf3d37f6 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | import sys, math
from multiprocessing import Pool
def main(data):
R,C,s = data
for i in range(R):
for j in range(C):
try:
if s[i][j] == "#":
if s[i][j+1] == "#" and s[i+1][j] == "#" and s[i+1][j+1] == "#":
s[i][j] = "/"
s[i][j+1] = "\\"
s[i+1][j] = "\\"
s[i+1][j+1] = "/"
else:
return "Impossible"
except:
return "Impossible"
return "\n".join(["".join(l) for l in s])
if __name__ == "__main__":
mode = 0
if len(sys.argv) > 1:
f = open(sys.argv[1])
mode = 1
else:
f = open("test.txt")
T = int(f.readline())
data = []
for i in range(T):
R,C = map(int, f.readline().strip().split())
s = list()
for j in range(R):
s.append(list(f.readline().strip()))
data.append((R, C, s))
if mode == 1:
pool = Pool()
r = pool.map(main, data)
else:
r = map(main, data)
for i in range(T):
print "Case #%d: \n%s" % (i+1, r[i]) | [
"[email protected]"
]
| |
327203d439300f410de4e56199b07bcb7a5b1cb1 | 3ca67d69abd4e74b7145b340cdda65532f90053b | /programmers/난이도별/level01.제일_작은_수_제거하기/Jaewon0702.py | 9574b875696e370e939054a0279eb98293b8defd | []
| no_license | DKU-STUDY/Algorithm | 19549516984b52a1c5cd73e1ed1e58f774d6d30e | 6f78efdbefd8eedab24e43d74c7dae7f95c2893b | refs/heads/master | 2023-02-18T06:48:39.309641 | 2023-02-09T07:16:14 | 2023-02-09T07:16:14 | 258,455,710 | 175 | 49 | null | 2023-02-09T07:16:16 | 2020-04-24T08:42:27 | Python | UTF-8 | Python | false | false | 156 | py | def solution(arr):
arr.remove(min(arr))
return arr if len(arr) else [-1]
print(solution([4, 3, 2, 1]) == [4, 3, 2])
print(solution([10]) == [-1])
| [
"[email protected]"
]
| |
bd9a420a7684d527bcd274c32086f85330ec970b | 2704ad14c83050ac28f403371daa8e3148440e00 | /chiadoge/wallet/did_wallet/did_info.py | 2294be358c05f883b729c58c3c37a27b0b590ce5 | [
"Apache-2.0"
]
| permissive | Bgihe/chiadoge-blockchain | d5e01a53c8e15fa17c47b44d9c95e6511aa98b7f | befb179c65ffe42aebbc47c211f78e193a095d2b | refs/heads/main | 2023-06-01T05:31:51.503755 | 2021-07-05T20:47:32 | 2021-07-05T20:47:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | from dataclasses import dataclass
from typing import List, Optional, Tuple
from chiadoge.types.blockchain_format.sized_bytes import bytes32
from chiadoge.util.ints import uint64
from chiadoge.util.streamable import streamable, Streamable
from chiadoge.wallet.cc_wallet.ccparent import CCParent
from chiadoge.types.blockchain_format.program import Program
from chiadoge.types.blockchain_format.coin import Coin
@dataclass(frozen=True)
@streamable
class DIDInfo(Streamable):
origin_coin: Optional[Coin] # puzzlehash of this coin is our DID
backup_ids: List[bytes]
num_of_backup_ids_needed: uint64
parent_info: List[Tuple[bytes32, Optional[CCParent]]] # {coin.name(): CCParent}
current_inner: Optional[Program] # represents a Program as bytes
temp_coin: Optional[Coin] # partially recovered wallet uses these to hold info
temp_puzhash: Optional[bytes32]
temp_pubkey: Optional[bytes]
| [
"[email protected]"
]
| |
093c9c5f1b37d499d6bb6486317cbdcbb89a838e | 17b63416cf2f66246e1cf655ccfa2eb9a108da3c | /abupy/AlphaBu/ABuPickStockExecute.py | f344c2ed857ae0f8c94dc194d151f49cddb60f57 | []
| no_license | cmy00cmy/qtLearning | 58aec5cf9fccf9d8f14adf1793306b8b8b5ecb7f | 2b5fee7b9bbd832b20ba4e1b508be16b606249e0 | refs/heads/master | 2020-03-20T01:42:19.882639 | 2018-06-12T14:52:00 | 2018-06-12T14:52:00 | 137,085,926 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | # -*- encoding:utf-8 -*-
"""
包装选股worker进行,完善前后工作
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from .ABuPickStockWorker import AbuPickStockWorker
from ..CoreBu.ABuEnvProcess import add_process_env_sig
from ..MarketBu.ABuMarket import split_k_market
from ..TradeBu.ABuKLManager import AbuKLManager
from ..CoreBu.ABuFixes import ThreadPoolExecutor
__author__ = '阿布'
__weixin__ = 'abu_quant'
@add_process_env_sig
def do_pick_stock_work(choice_symbols, benchmark, capital, stock_pickers):
"""
包装AbuPickStockWorker进行选股
:param choice_symbols: 初始备选交易对象序列
:param benchmark: 交易基准对象,AbuBenchmark实例对象
:param capital: 资金类AbuCapital实例化对象
:param stock_pickers: 选股因子序列
:return:
"""
kl_pd_manager = AbuKLManager(benchmark, capital)
stock_pick = AbuPickStockWorker(capital, benchmark, kl_pd_manager, choice_symbols=choice_symbols,
stock_pickers=stock_pickers)
stock_pick.fit()
return stock_pick.choice_symbols
@add_process_env_sig
def do_pick_stock_thread_work(choice_symbols, benchmark, capital, stock_pickers, n_thread):
"""包装AbuPickStockWorker启动线程进行选股"""
result = []
def when_thread_done(r):
result.extend(r.result())
with ThreadPoolExecutor(max_workers=n_thread) as pool:
thread_symbols = split_k_market(n_thread, market_symbols=choice_symbols)
for symbols in thread_symbols:
future_result = pool.submit(do_pick_stock_work, symbols, benchmark, capital, stock_pickers)
future_result.add_done_callback(when_thread_done)
return result
| [
"[email protected]"
]
| |
c08a05fcca3a38d83fa5e5c0f599e925d0a2c97b | 56a4d0d73c349aeaca7580ca248caf0cf893a8c5 | /w2/using_find.py | af6a320679d645b836416da8a37d141b0a0c269d | []
| no_license | alejo8591/m101 | 79e62e0110bcc3e6ca82ac02ae3cdcbe13d51c67 | d93d34a161ecede77defb9a6a3db389d4a9b0de8 | refs/heads/master | 2020-05-18T21:42:46.651036 | 2012-12-17T23:36:49 | 2012-12-17T23:36:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | #!/usr/bin/env python
import pymongo
import sys
connect = pymongo.Connection("mongodb://127.0.0.1", safe=True)
db = connect.school
scores = db.scores
def find():
print "Find, reporting for duty"
query = {'type':'exam'}
try:
iter = scores.find(query)
except:
print "Unexpected error:",sys.exc_info()[0]
sanity = 0
for doc in iter:
print doc
sanity+=1
if (sanity > 10):
break
def find_one():
print "find one, reporting for duty"
query = {'student_id':10}
try:
iter = scores.find_one(query)
except:
print "Unexpected error:",sys.exc_info()[0]
print iter
find_one()
find() | [
"[email protected]"
]
| |
b6a2760e083ef2662b8cb1a29ee20d3d09c6f19b | e76aa4de68988abcfceb7f90ea680505a9159995 | /outrigger/__init__.py | 358e6751f654522e24e8680c88312573f25843fb | [
"BSD-3-Clause"
]
| permissive | ggraham/outrigger | 3ab1798fbeb3c871cae4d2a12bcd721032c3a96c | 135388192bd8b15fc248653ee50943448ff19160 | refs/heads/master | 2021-05-26T09:58:02.547479 | 2020-04-29T19:32:34 | 2020-04-29T19:32:34 | 254,086,816 | 0 | 0 | BSD-3-Clause | 2020-04-29T19:32:35 | 2020-04-08T12:52:08 | null | UTF-8 | Python | false | false | 201 | py | # -*- coding: utf-8 -*-
__author__ = 'Olga Botvinnik'
__email__ = '[email protected]'
__version__ = '1.1.1'
__all__ = ['psi', 'region', 'util', 'io', 'validate', 'index',
'common']
| [
"[email protected]"
]
| |
eba364f9af767f3702b519b7192b96c2b9890d8d | cc08f8eb47ef92839ba1cc0d04a7f6be6c06bd45 | /Personal/Developent/advance-django-blog-master/venv/bin/coverage | 49a1df365828a4beab01a74ad814ac7cc6b66a9d | [
"Apache-2.0"
]
| permissive | ProsenjitKumar/PycharmProjects | d90d0e7c2f4adc84e861c12a3fcb9174f15cde17 | 285692394581441ce7b706afa3b7af9e995f1c55 | refs/heads/master | 2022-12-13T01:09:55.408985 | 2019-05-08T02:21:47 | 2019-05-08T02:21:47 | 181,052,978 | 1 | 1 | null | 2022-12-08T02:31:17 | 2019-04-12T17:21:59 | null | UTF-8 | Python | false | false | 281 | #!/root/PycharmProjects/Developent/advance-django-blog-master/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from coverage.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
a7c60b78f32abc44f71b77a5227cb86f6803806d | 659d41f0c737dffc2a6ebd5e773a6513da32e5ba | /scripts/experiments/Experiments729/dephasing_scan_duration.py | adf770c56bb5fd14721f410bb6a9d3b6978b1e37 | []
| no_license | HaeffnerLab/sqip | b3d4d570becb1022083ea01fea9472115a183ace | 5d18f167bd9a5344dcae3c13cc5a84213fb7c199 | refs/heads/master | 2020-05-21T23:11:10.448549 | 2019-11-21T02:00:58 | 2019-11-21T02:00:58 | 19,164,232 | 0 | 0 | null | 2019-11-04T04:39:37 | 2014-04-25T23:54:47 | Python | UTF-8 | Python | false | false | 7,104 | py | from common.abstractdevices.script_scanner.scan_methods import experiment
from excitations import excitation_dephase
from sqip.scripts.scriptLibrary.common_methods_729 import common_methods_729 as cm
from sqip.scripts.scriptLibrary import dvParameters
import time
import labrad
from labrad.units import WithUnit
from numpy import linspace
#The following command brinfgs the sequence plotter.
#from common.okfpgaservers.pulser.pulse_sequences.plot_sequence import SequencePlotter
class dephase_scan_duration(experiment):
name = 'Dephase Scan Duration'
dephasing_required_parameters = [
('Dephasing_Pulses', 'preparation_line_selection'),
('Dephasing_Pulses', 'evolution_line_selection'),
('Dephasing_Pulses','preparation_sideband_selection'),
('Dephasing_Pulses','evolution_sideband_selection'),
('Dephasing_Pulses', 'scan_interaction_duration'),
('TrapFrequencies','axial_frequency'),
('TrapFrequencies','radial_frequency_1'),
('TrapFrequencies','radial_frequency_2'),
('TrapFrequencies','rf_drive_frequency'),
]
@classmethod
def all_required_parameters(cls):
parameters = set(cls.dephasing_required_parameters)
parameters = parameters.union(set(excitation_dephase.all_required_parameters()))
parameters = list(parameters)
#removing parameters we'll be overwriting, and they do not need to be loaded
parameters.remove(('Dephasing_Pulses','evolution_ramsey_time'))
parameters.remove(('Dephasing_Pulses','evolution_pulses_frequency'))
parameters.remove(('Dephasing_Pulses','preparation_pulse_frequency'))
return parameters
def initialize(self, cxn, context, ident):
self.ident = ident
self.excite = self.make_experiment(excitation_dephase)
self.excite.initialize(cxn, context, ident)
self.scan = []
self.cxnlab = labrad.connect('192.168.169.49') #connection to labwide network
self.drift_tracker = cxn.sd_tracker
self.dv = cxn.data_vault
self.data_save_context = cxn.context()
self.setup_data_vault()
def setup_sequence_parameters(self):
p = self.parameters.Dephasing_Pulses
trap = self.parameters.TrapFrequencies
prep_line_frequency = cm.frequency_from_line_selection('auto', None, p.preparation_line_selection, self.drift_tracker)
frequency_preparation = cm.add_sidebands(prep_line_frequency, p.preparation_sideband_selection, trap)
#if same line is selected, match the frequency exactly
same_line = p.preparation_line_selection == p.evolution_line_selection
same_sideband = p.preparation_sideband_selection.aslist == p.evolution_sideband_selection.aslist
print 'same line', same_line
print 'same sideband', same_sideband
if same_line and same_sideband:
frequency_evolution = frequency_preparation
else:
evo_line_frequency = cm.frequency_from_line_selection('auto', None, p.evolution_line_selection, self.drift_tracker)
frequency_evolution = cm.add_sidebands(evo_line_frequency, p.evolution_sideband_selection, trap)
self.parameters['Dephasing_Pulses.preparation_pulse_frequency'] = frequency_preparation
self.parameters['Dephasing_Pulses.evolution_pulses_frequency'] = frequency_evolution
self.max_second_pulse = p.evolution_pulses_duration
minim,maxim,steps = self.parameters.Dephasing_Pulses.scan_interaction_duration
minim = minim['us']; maxim = maxim['us']
self.scan = linspace(minim,maxim, steps)
self.scan = [WithUnit(pt, 'us') for pt in self.scan]
def setup_data_vault(self):
localtime = time.localtime()
dirappend = [time.strftime("%Y%b%d",localtime) ,time.strftime("%H%M_%S", localtime)]
directory = ['','Experiments']
directory.extend([self.name])
directory.extend(dirappend)
self.dv.cd(directory, True,context = self.data_save_context)
def data_vault_new_trace(self):
localtime = time.localtime()
datasetNameAppend = time.strftime("%Y%b%d_%H%M_%S",localtime)
output_size = self.excite.output_size
dependants = [('Excitation','Ion {}'.format(ion),'Probability') for ion in range(output_size)]
self.dv.new('{0} {1}'.format(self.name, datasetNameAppend),[('Excitation', 'us')], dependants , context = self.data_save_context)
window_name = ['Dephasing, Scan Duration']
self.dv.add_parameter('Window', window_name, context = self.data_save_context)
self.dv.add_parameter('plotLive', True, context = self.data_save_context)
def run(self, cxn, context):
p = self.parameters.Dephasing_Pulses
self.data_vault_new_trace()
self.setup_sequence_parameters()
for i,interaction_duration in enumerate(self.scan):
should_stop = self.pause_or_stop()
if should_stop:
return False
second_pulse_dur = min(self.max_second_pulse, interaction_duration)
ramsey_time = max(WithUnit(0,'us'), interaction_duration - self.max_second_pulse)
#ramsey_time = WithUnit(0,'us')
p.evolution_ramsey_time = ramsey_time
p.evolution_pulses_duration = second_pulse_dur
self.excite.set_parameters(self.parameters)
excitation, readout = self.excite.run(cxn, context)
submission = [interaction_duration['us']]
submission.extend(excitation)
self.dv.add(submission, context = self.data_save_context)
self.update_progress(i)
self.save_parameters(self.dv, cxn, self.cxnlab, self.data_save_context)
####### FROM DYLAN -- PULSE SEQUENCE PLOTTING #########
#ttl = self.cxn.pulser.human_readable_ttl()
#dds = self.cxn.pulser.human_readable_dds()
#channels = self.cxn.pulser.get_channels().asarray
#sp = SequencePlotter(ttl.asarray, dds.aslist, channels)
#sp.makePlot()
############################################3
return True
def finalize(self, cxn, context):
pass
def update_progress(self, iteration):
progress = self.min_progress + (self.max_progress - self.min_progress) * float(iteration + 1.0) / len(self.scan)
self.sc.script_set_progress(self.ident, progress)
def save_parameters(self, dv, cxn, cxnlab, context):
measuredDict = dvParameters.measureParameters(cxn, cxnlab)
dvParameters.saveParameters(dv, measuredDict, context)
dvParameters.saveParameters(dv, dict(self.parameters), context)
if __name__ == '__main__':
cxn = labrad.connect()
scanner = cxn.scriptscanner
exprt = dephase_scan_duration(cxn = cxn)
ident = scanner.register_external_launch(exprt.name)
exprt.execute(ident)
| [
"[email protected]"
]
| |
f5fb13e993e1f670fb944b04d958c11f4c9235e0 | 4a63c8e2545c6968547d7aa36c2dca85b9b84301 | /workscheduler/src/backend/utils/datetime.py | 88eb649edb561f5fec06a44475f4020eda3ac2b3 | []
| no_license | epirevolve/workscheduler | 458b8da84da94862c91de6544c5aaaefc1520d47 | 6c89e7264c5b66f4eb91b1989da6324695449703 | refs/heads/develop | 2023-01-23T02:01:29.356940 | 2019-12-30T01:16:32 | 2019-12-30T01:16:32 | 147,050,241 | 5 | 2 | null | 2023-01-04T11:42:19 | 2018-09-02T03:10:19 | JavaScript | UTF-8 | Python | false | false | 207 | py | # -*- coding: utf-8 -*-
from datetime import datetime
def is_overlap(a_from: datetime, a_to: datetime, b_from: datetime, b_to: datetime):
return (b_from <= a_from <= b_to) or (b_from <= a_to <= b_to)
| [
"[email protected]"
]
| |
2f74ae3f7caac57b707a98584b6bdd4a40ded6f8 | fd1dba8223ad1938916369b5eb721305ef197b30 | /AtCoder/ABC/abc110/abc110c.py | b19744afbe63b3698d7e3487b7f15813a0167d39 | []
| no_license | genkinanodesu/competitive | a3befd2f4127e2d41736655c8d0acfa9dc99c150 | 47003d545bcea848b409d60443655edb543d6ebb | refs/heads/master | 2020-03-30T07:41:08.803867 | 2019-06-10T05:22:17 | 2019-06-10T05:22:17 | 150,958,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | S = input()
T = input()
n = len(S)
X = [[] for _ in range(26)]
Y = [[] for _ in range(26)]
for i in range(n):
s = ord(S[i]) - 97
t = ord(T[i]) - 97
X[s].append(i)
Y[t].append(i)
P = [tuple(x) for x in X]
Q = [tuple(y) for y in Y]
if set(P) == set(Q):
print('Yes')
else:
print('No')
| [
"[email protected]"
]
| |
be63e415ecf5e1d3a8f53e768d4c23c1d1643511 | cca21b0ddca23665f886632a39a212d6b83b87c1 | /virtual/classroom/views.py | 07712f42f10a68880ba8e8500e4a6784453a72e1 | []
| no_license | siumhossain/classroom | a8926621456d1e7ed77387fb8a5851825771a9d9 | 4afe9cdee2c58b71bd3711b042eae3f86172eaea | refs/heads/master | 2023-02-02T08:28:14.958761 | 2020-12-24T14:58:59 | 2020-12-24T14:58:59 | 323,007,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,300 | py | from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView,DeleteView
from .models import Course
from django.contrib.auth.mixins import LoginRequiredMixin,PermissionRequiredMixin
from django.shortcuts import redirect, get_object_or_404
from django.views.generic.base import TemplateResponseMixin,View
from .forms import ModuleFormSet
from django.forms.models import modelform_factory
from django.apps import apps
from .models import Module, Content
from braces.views import CsrfExemptMixin, JsonRequestResponseMixin
from django.db.models import Count
from .models import Subject
from django.views.generic.detail import DetailView
from students.forms import CourseEnrollForm
# Create your views here.
from django.views.generic.list import ListView
from .models import Course
class ManageCourseListView(ListView):
model = Course
template_name = 'courses/manage/course/list.html'
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(owner=self.request.user)
class OwnerMixin(object):
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(owner=self.request.user)
class OwnerEditMixin(object):
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
class OwnerCourseMixin(OwnerMixin):
model = Course
fields = ['subject', 'title', 'slug', 'overview']
success_url = reverse_lazy('manage_course_list')
class OwnerCourseEditMixin(OwnerCourseMixin, OwnerEditMixin):
template_name = 'courses/manage/course/form.html'
class ManageCourseListView(OwnerCourseMixin, ListView):
template_name = 'courses/manage/course/list.html'
class CourseCreateView(OwnerCourseEditMixin, CreateView):
pass
class CourseUpdateView(OwnerCourseEditMixin, UpdateView):
pass
class CourseDeleteView(OwnerCourseMixin, DeleteView):
template_name = 'courses/manage/course/delete.html'
class OwnerCourseMixin(OwnerMixin,LoginRequiredMixin,PermissionRequiredMixin):
model = Course
fields = ['subject', 'title', 'slug', 'overview']
success_url = reverse_lazy('manage_course_list')
class ManageCourseListView(OwnerCourseMixin, ListView):
template_name = 'courses/manage/course/list.html'
permission_required = 'courses.view_course'
class CourseCreateView(OwnerCourseEditMixin, CreateView):
permission_required = 'courses.add_course'
class CourseUpdateView(OwnerCourseEditMixin, UpdateView):
permission_required = 'courses.change_course'
class CourseDeleteView(OwnerCourseMixin, DeleteView):
template_name = 'courses/manage/course/delete.html'
permission_required = 'courses.delete_course'
class CourseModuleUpdateView(TemplateResponseMixin, View):
template_name = 'courses/manage/module/formset.html'
course = None
def get_formset(self, data=None):
return ModuleFormSet(instance=self.course,data=data)
def dispatch(self, request, pk):
self.course = get_object_or_404(Course,id=pk,owner=request.user)
return super().dispatch(request, pk)
def get(self, request, *args, **kwargs):
formset = self.get_formset()
return self.render_to_response({'course': self.course,'formset': formset})
def post(self, request, *args, **kwargs):
formset = self.get_formset(data=request.POST)
if formset.is_valid():
formset.save()
return redirect('manage_course_list')
return self.render_to_response({'course': self.course,'formset': formset})
class ContentCreateUpdateView(TemplateResponseMixin, View):
module = None
model = None
obj = None
template_name = 'courses/manage/content/form.html'
def get_model(self, model_name):
if model_name in ['text', 'video', 'image', 'file']:
return apps.get_model(app_label='courses',model_name=model_name)
return None
def get_form(self, model, *args, **kwargs):
Form = modelform_factory(model, exclude=['owner','order','created','updated'])
return Form(*args, **kwargs)
def dispatch(self, request, module_id, model_name, id=None):
self.module = get_object_or_404(Module,id=module_id,course__owner=request.user)
self.model = self.get_mode(model_name)
if id:
self.obj = get_object_or_404(self.model,id=id,owner=request.user)
return super().dispatch(request, module_id, model_name, id)
def get(self, request, module_id, model_name, id=None):
form = self.get_form(self.model, instance=self.obj)
return self.render_to_response({'form': form,'object': self.obj})
def post(self, request, module_id, model_name, id=None):
form = self.get_form(self.model,instance=self.obj,data=request.POST,files=request.FILES)
if form.is_valid():
obj = form.save(commit=False)
obj.owner = request.user
obj.save()
if not id:
# new content
Content.objects.create(module=self.module,item=obj)
return redirect('module_content_list', self.module.id)
return self.render_to_response({'form': form,'object': self.obj})
class ContentDeleteView(View):
def post(self, request, id):
content = get_object_or_404(Content,id=id,module__course__owner=request.user)
module = content.module
content.item.delete()
content.delete()
return redirect('module_content_list', module.id)
class ModuleContentListView(TemplateResponseMixin, View):
template_name = 'courses/manage/module/content_list.html'
def get(self, request, module_id):
module = get_object_or_404(Module,id=module_id,course__owner=request.user)
return self.render_to_response({'module': module})
class ModuleOrderView(CsrfExemptMixin,JsonRequestResponseMixin,View):
def post(self, request):
for id, order in self.request_json.items():
Module.objects.filter(id=id,course__owner=request.user).update(order=order)
return self.render_json_response({'saved': 'OK'})
class ContentOrderView(CsrfExemptMixin,JsonRequestResponseMixin,View):
def post(self, request):
for id, order in self.request_json.items():
Content.objects.filter(id=id,module__course__owner=request.user).update(order=order)
return self.render_json_response({'saved': 'OK'})
class CourseListView(TemplateResponseMixin, View):
model = Course
template_name = 'courses/course/list.html'
def get(self, request, subject=None):
subjects = Subject.objects.annotate(total_courses=Count('courses'))
courses = Course.objects.annotate(total_modules=Count('modules'))
if subject:
subject = get_object_or_404(Subject, slug=subject)
courses = courses.filter(subject=subject)
return self.render_to_response({'subjects': subjects,'subject': subject,'courses': courses})
class CourseDetailView(DetailView):
model = Course
template_name = 'courses/course/detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['enroll_form'] = CourseEnrollForm(initial={'course':self.object})
return context | [
"[email protected]"
]
| |
d9c01472e3a355d2c744a3b72a0896f067997726 | 5fb9f29964268223869944508798d6c21d9e5298 | /sub_test/sub_test.py | ea78eeb031a733544b22f4926dc7ead63ea94ff4 | []
| no_license | CodedQuen/Python-Pocket-Reference- | 56459ce1509f74bc253af027be91935e62922948 | 8f7c69edb8ad4ac3ef7f70bab15ffe24eb162325 | refs/heads/master | 2022-06-14T20:57:13.799676 | 2020-05-05T08:27:17 | 2020-05-05T08:27:17 | 261,398,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | from subprocess import call, Popen, PIPE, check_output
print(call("ls -l", shell=True))
print(check_output("ls -l", shell=True).decode())
pipe1 = Popen("ls -l", stdout=PIPE, shell=True)
pipe2 = Popen("wc -l", stdin=pipe1.stdout, stdout=PIPE, shell=True)
print(pipe2.stdout.read().decode())
| [
"[email protected]"
]
| |
2b63046ccd7b852daa7ce8a78c6345d746f667f9 | 6c137e70bb6b1b618fbbceddaeb74416d387520f | /spyre/testing/cavity.py | 1d95f5fa22fb580cf87be1fa538c49f3fa4ba85b | [
"BSD-2-Clause"
]
| permissive | zhong-lab/code | fe497c75662f8c3b7ab3c01e7e351bff6d5e8d15 | b810362e06b44387f0768353c602ec5d29b551a2 | refs/heads/master | 2023-01-28T09:46:01.448833 | 2022-06-12T22:53:47 | 2022-06-12T22:53:47 | 184,670,765 | 2 | 7 | BSD-2-Clause | 2022-12-08T21:46:15 | 2019-05-02T23:37:39 | Python | UTF-8 | Python | false | false | 361 | py | ##Config file for lifetime_spyrelet.py in spyre/spyre/spyrelet/
# Device List
devices = {
'vna':[
'lantz.drivers.VNA.P9371A',
['TCPIP0::DESKTOP-ER250Q8::hislip0,4880::INSTR'],
{}
]
}
# Experiment List
spyrelets = {
'freqSweep':[
'spyre.spyrelets.cavity_spyrelet.Record',
{'vna': 'vna'},
{}
],
} | [
"none"
]
| none |
4a04f161cd2987c6ca772ac5ef11c4953ecbb7ec | cfa35dc2ea93ee0eceb2399a9e6112e987579c09 | /stonesoup/metricgenerator/__init__.py | 580303c8a8d1dce6e8550f6f212b7afe198d89c9 | [
"LicenseRef-scancode-proprietary-license",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-secret-labs-2011"
]
| permissive | dstl/Stone-Soup | 227e6a9e6fbdceca14af3f0259f311ec74095597 | f24090cc919b3b590b84f965a3884ed1293d181d | refs/heads/main | 2023-09-01T14:33:14.626428 | 2023-09-01T11:35:46 | 2023-09-01T11:35:46 | 98,420,803 | 315 | 126 | MIT | 2023-09-14T14:55:34 | 2017-07-26T12:34:28 | Python | UTF-8 | Python | false | false | 65 | py | from .base import MetricGenerator
__all__ = ['MetricGenerator']
| [
"[email protected]"
]
| |
3b74887e37753f6834df15e0acf789b4118532ec | 26cf1df102b75b0c068047cc6eca0d50dbc70c5a | /melati/server/address_manager_store.py | b0adc0891bd154e0333b582a3e552035eb13fd9b | [
"Apache-2.0"
]
| permissive | a96009467/melati-blockchain | 307f9a92eee25a15aa294ddfed41a595e63acc50 | 28b8cd1590ee8fa860554c66d639a1fefc0d3c41 | refs/heads/main | 2023-06-24T13:53:41.332345 | 2021-07-20T09:37:49 | 2021-07-20T09:37:49 | 387,778,815 | 0 | 0 | Apache-2.0 | 2021-07-20T12:06:20 | 2021-07-20T12:06:20 | null | UTF-8 | Python | false | false | 8,257 | py | import logging
from typing import Dict, List, Tuple
import aiosqlite
from melati.server.address_manager import (
BUCKET_SIZE,
NEW_BUCKET_COUNT,
NEW_BUCKETS_PER_ADDRESS,
AddressManager,
ExtendedPeerInfo,
)
log = logging.getLogger(__name__)
class AddressManagerStore:
"""
Metadata table:
- private key
- new table count
- tried table count
Nodes table:
* Maps entries from new/tried table to unique node ids.
- node_id
- IP, port, together with the IP, port of the source peer.
New table:
* Stores node_id, bucket for each occurrence in the new table of an entry.
* Once we know the buckets, we can also deduce the bucket positions.
Every other information, such as tried_matrix, map_addr, map_info, random_pos,
be deduced and it is not explicitly stored, instead it is recalculated.
"""
db: aiosqlite.Connection
@classmethod
async def create(cls, connection) -> "AddressManagerStore":
self = cls()
self.db = connection
await self.db.commit()
await self.db.execute("pragma journal_mode=wal")
await self.db.execute("pragma synchronous=2")
await self.db.execute("CREATE TABLE IF NOT EXISTS peer_metadata(key text,value text)")
await self.db.commit()
await self.db.execute("CREATE TABLE IF NOT EXISTS peer_nodes(node_id int,value text)")
await self.db.commit()
await self.db.execute("CREATE TABLE IF NOT EXISTS peer_new_table(node_id int,bucket int)")
await self.db.commit()
return self
async def clear(self) -> None:
cursor = await self.db.execute("DELETE from peer_metadata")
await cursor.close()
cursor = await self.db.execute("DELETE from peer_nodes")
await cursor.close()
cursor = await self.db.execute("DELETE from peer_new_table")
await cursor.close()
await self.db.commit()
async def get_metadata(self) -> Dict[str, str]:
cursor = await self.db.execute("SELECT key, value from peer_metadata")
metadata = await cursor.fetchall()
await cursor.close()
return {key: value for key, value in metadata}
async def is_empty(self) -> bool:
metadata = await self.get_metadata()
if "key" not in metadata:
return True
if int(metadata.get("new_count", 0)) > 0:
return False
if int(metadata.get("tried_count", 0)) > 0:
return False
return True
async def get_nodes(self) -> List[Tuple[int, ExtendedPeerInfo]]:
cursor = await self.db.execute("SELECT node_id, value from peer_nodes")
nodes_id = await cursor.fetchall()
await cursor.close()
return [(node_id, ExtendedPeerInfo.from_string(info_str)) for node_id, info_str in nodes_id]
async def get_new_table(self) -> List[Tuple[int, int]]:
cursor = await self.db.execute("SELECT node_id, bucket from peer_new_table")
entries = await cursor.fetchall()
await cursor.close()
return [(node_id, bucket) for node_id, bucket in entries]
async def set_metadata(self, metadata) -> None:
for key, value in metadata:
cursor = await self.db.execute(
"INSERT OR REPLACE INTO peer_metadata VALUES(?, ?)",
(key, value),
)
await cursor.close()
await self.db.commit()
async def set_nodes(self, node_list) -> None:
for node_id, peer_info in node_list:
cursor = await self.db.execute(
"INSERT OR REPLACE INTO peer_nodes VALUES(?, ?)",
(node_id, peer_info.to_string()),
)
await cursor.close()
await self.db.commit()
async def set_new_table(self, entries) -> None:
for node_id, bucket in entries:
cursor = await self.db.execute(
"INSERT OR REPLACE INTO peer_new_table VALUES(?, ?)",
(node_id, bucket),
)
await cursor.close()
await self.db.commit()
async def serialize(self, address_manager: AddressManager):
metadata = []
nodes = []
new_table_entries = []
metadata.append(("key", str(address_manager.key)))
unique_ids = {}
count_ids = 0
for node_id, info in address_manager.map_info.items():
unique_ids[node_id] = count_ids
if info.ref_count > 0:
assert count_ids != address_manager.new_count
nodes.append((count_ids, info))
count_ids += 1
metadata.append(("new_count", str(count_ids)))
tried_ids = 0
for node_id, info in address_manager.map_info.items():
if info.is_tried:
assert info is not None
assert tried_ids != address_manager.tried_count
nodes.append((count_ids, info))
count_ids += 1
tried_ids += 1
metadata.append(("tried_count", str(tried_ids)))
for bucket in range(NEW_BUCKET_COUNT):
for i in range(BUCKET_SIZE):
if address_manager.new_matrix[bucket][i] != -1:
index = unique_ids[address_manager.new_matrix[bucket][i]]
new_table_entries.append((index, bucket))
await self.clear()
await self.set_metadata(metadata)
await self.set_nodes(nodes)
await self.set_new_table(new_table_entries)
async def deserialize(self) -> AddressManager:
address_manager = AddressManager()
metadata = await self.get_metadata()
nodes = await self.get_nodes()
new_table_entries = await self.get_new_table()
address_manager.clear()
address_manager.key = int(metadata["key"])
address_manager.new_count = int(metadata["new_count"])
# address_manager.tried_count = int(metadata["tried_count"])
address_manager.tried_count = 0
new_table_nodes = [(node_id, info) for node_id, info in nodes if node_id < address_manager.new_count]
for n, info in new_table_nodes:
address_manager.map_addr[info.peer_info.host] = n
address_manager.map_info[n] = info
info.random_pos = len(address_manager.random_pos)
address_manager.random_pos.append(n)
address_manager.id_count = len(new_table_nodes)
tried_table_nodes = [(node_id, info) for node_id, info in nodes if node_id >= address_manager.new_count]
# lost_count = 0
for node_id, info in tried_table_nodes:
tried_bucket = info.get_tried_bucket(address_manager.key)
tried_bucket_pos = info.get_bucket_position(address_manager.key, False, tried_bucket)
if address_manager.tried_matrix[tried_bucket][tried_bucket_pos] == -1:
info.random_pos = len(address_manager.random_pos)
info.is_tried = True
id_count = address_manager.id_count
address_manager.random_pos.append(id_count)
address_manager.map_info[id_count] = info
address_manager.map_addr[info.peer_info.host] = id_count
address_manager.tried_matrix[tried_bucket][tried_bucket_pos] = id_count
address_manager.id_count += 1
address_manager.tried_count += 1
# else:
# lost_count += 1
# address_manager.tried_count -= lost_count
for node_id, bucket in new_table_entries:
if node_id >= 0 and node_id < address_manager.new_count:
info = address_manager.map_info[node_id]
bucket_pos = info.get_bucket_position(address_manager.key, True, bucket)
if address_manager.new_matrix[bucket][bucket_pos] == -1 and info.ref_count < NEW_BUCKETS_PER_ADDRESS:
info.ref_count += 1
address_manager.new_matrix[bucket][bucket_pos] = node_id
for node_id, info in list(address_manager.map_info.items()):
if not info.is_tried and info.ref_count == 0:
address_manager.delete_new_entry_(node_id)
address_manager.load_used_table_positions()
return address_manager
| [
"[email protected]"
]
| |
992cbbcc8751d9aa132eea71a9c34ba42f5b03b4 | 4754226625d4a6b9680a22fd39166f502034aeb5 | /samsung/[cutz]lab1.py | 971e71a34d9cdfed878116d35cf9fd619e85ef26 | [
"MIT"
]
| permissive | cutz-j/AlgorithmStudy | 298cc7d6fa92345629623a9bd8d186f0608cdf7c | de0f81220e29bd5e109d174800f507b12a3bee36 | refs/heads/master | 2021-07-01T03:15:51.627208 | 2021-02-24T01:24:44 | 2021-02-24T01:24:44 | 222,935,322 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,913 | py | import sys
from itertools import combinations
class Queue():
def __init__(self):
self.front = 0
self.rear = 0
self.list = []
self.pop_count = 0
def append(self, x):
self.list.append(x)
self.rear += 1
def pop(self):
res = self.list[self.front]
self.front += 1
self.pop_count += 1
return res
def empty(self):
return len(self.list) == self.pop_count
res = 0
rl = lambda: sys.stdin.readline()
N, M = map(int, rl().split())
all_map = []
virus = []
zero = []
virus_num = sys.maxsize
for i in range(N):
tmp = list(map(int, rl().split()))
for j, v in enumerate(tmp):
if v == 2:
virus.append((i, j))
elif v == 0:
zero.append((i, j))
all_map.append(tmp)
row_dir, col_dir = [1, 0, -1, 0], [0, 1, 0, -1]
wall_comb = combinations(zero, 3)
for wall in wall_comb:
# visited = copy.deepcopy(all_map)
visited = []
for i in range(N):
tmp = []
for j in range(M):
tmp.append(all_map[i][j])
visited.append(tmp)
for w in wall:
visited[w[0]][w[1]] = 1
v_num = 0
queue = Queue()
for v in virus:
queue.append(v)
while queue.empty() == False:
r, c = queue.pop()
v_num += 1
if v_num > virus_num:
break
for i in range(4):
new_r, new_c = r + row_dir[i], c + col_dir[i]
if (0 <= new_r < N) and (0 <= new_c < M):
if visited[new_r][new_c] == 0:
queue.append((new_r, new_c))
visited[new_r][new_c] = 2
cnt, v_cnt = 0, 0
for i in range(N):
for j in range(M):
if visited[i][j] == 0:
cnt += 1
if visited[i][j] == 2:
v_cnt += 1
if cnt > res:
res = cnt
virus_num = v_cnt
print(res) | [
"[email protected]"
]
| |
76846a71c9a5bcac685d5452c7f039c04d5dd554 | 3712a929d1124f514ea7af1ac0d4a1de03bb6773 | /开班笔记/python基础部分/day02/code/test.py | e2a14e0f4bc5a36dc4cbb782ba168443482180ac | []
| no_license | jiyabing/learning | abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9 | 6059006b0f86aee9a74cfc116d2284eb44173f41 | refs/heads/master | 2020-04-02T20:47:33.025331 | 2018-10-26T05:46:10 | 2018-10-26T05:46:10 | 154,779,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | #!/usr/bin/python3
print('这是我的python第一条语句')
print('我现在开始学python')
print('这是最后一条语句') | [
"[email protected]"
]
| |
6eeced6d1506a1def659d8582180f495fff68a7f | 50402cc4388dfee3a9dbe9e121ef217759ebdba8 | /etc/MOPSO-ZDT2/ZDT2-1.py | d0f2faf6d992bb8b09ed659299c095a99a98486a | []
| no_license | dqyi11/SVNBackup | bd46a69ec55e3a4f981a9bca4c8340944d8d5886 | 9ad38e38453ef8539011cf4d9a9c0a363e668759 | refs/heads/master | 2020-03-26T12:15:01.155873 | 2015-12-10T01:11:36 | 2015-12-10T01:11:36 | 144,883,382 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | '''
Created on Jan 26, 2014
@author: daqing_yi
'''
if __name__ == '__main__':
from PerformanceAnalyzer import *;
import sys;
trial_time = 30;
figFolder = sys.path[0] + "\\zdt2";
caseName = "ZDT2";
fileList1 = [];
fileList2 = [];
fileList3 = [];
fileList4 = [];
for tt in range(trial_time):
filename1 = "ZDT2-"+str(tt)+"--Div.txt";
filename2 = "ZDT2-"+str(tt)+"--AD.txt";
filename3 = "ZDT2-"+str(tt)+"--Spread.txt";
filename4 = "ZDT2-"+str(tt)+"--Efficiency.txt";
fileList1.append(filename1);
fileList2.append(filename2);
fileList3.append(filename3);
fileList4.append(filename4);
analyzer1 = PerformanceAnalyzer(fileList1, figFolder, "Diversity", 10);
analyzer1.genData();
analyzer1.plot(caseName);
analyzer1.dump(caseName);
analyzer2 = PerformanceAnalyzer(fileList2, figFolder, "Distance", 10);
analyzer2.genData();
analyzer2.plot(caseName);
analyzer2.dump(caseName);
analyzer3 = PerformanceAnalyzer(fileList3, figFolder, "Spread", 10);
analyzer3.genData();
analyzer3.plot(caseName);
analyzer3.dump(caseName);
analyzer4 = PerformanceAnalyzer(fileList4, figFolder, "Efficiency", 10);
analyzer4.genData();
analyzer4.plot(caseName);
analyzer4.dump(caseName); | [
"walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39"
]
| walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39 |
1511968638f2441910615d9b97b2c2629ea64078 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/resources/types/product_bidding_category_constant.py | 6aacc16b169b40875e5f6b751c1c07d2a833a97f | [
"Apache-2.0"
]
| permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,334 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import product_bidding_category_level
from google.ads.googleads.v6.enums.types import product_bidding_category_status
__protobuf__ = proto.module(
package='google.ads.googleads.v6.resources',
marshal='google.ads.googleads.v6',
manifest={
'ProductBiddingCategoryConstant',
},
)
class ProductBiddingCategoryConstant(proto.Message):
r"""A Product Bidding Category.
Attributes:
resource_name (str):
Output only. The resource name of the product bidding
category. Product bidding category resource names have the
form:
``productBiddingCategoryConstants/{country_code}~{level}~{id}``
id (int):
Output only. ID of the product bidding category.
This ID is equivalent to the google_product_category ID as
described in this article:
https://support.google.com/merchants/answer/6324436.
country_code (str):
Output only. Two-letter upper-case country
code of the product bidding category.
product_bidding_category_constant_parent (str):
Output only. Resource name of the parent
product bidding category.
level (google.ads.googleads.v6.enums.types.ProductBiddingCategoryLevelEnum.ProductBiddingCategoryLevel):
Output only. Level of the product bidding
category.
status (google.ads.googleads.v6.enums.types.ProductBiddingCategoryStatusEnum.ProductBiddingCategoryStatus):
Output only. Status of the product bidding
category.
language_code (str):
Output only. Language code of the product
bidding category.
localized_name (str):
Output only. Display value of the product bidding category
localized according to language_code.
"""
resource_name = proto.Field(proto.STRING, number=1)
id = proto.Field(proto.INT64, number=10, optional=True)
country_code = proto.Field(proto.STRING, number=11, optional=True)
product_bidding_category_constant_parent = proto.Field(proto.STRING, number=12, optional=True)
level = proto.Field(proto.ENUM, number=5,
enum=product_bidding_category_level.ProductBiddingCategoryLevelEnum.ProductBiddingCategoryLevel,
)
status = proto.Field(proto.ENUM, number=6,
enum=product_bidding_category_status.ProductBiddingCategoryStatusEnum.ProductBiddingCategoryStatus,
)
language_code = proto.Field(proto.STRING, number=13, optional=True)
localized_name = proto.Field(proto.STRING, number=14, optional=True)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
37c5eab2b0dce309f35baf4e54e33fcf65b69a0f | b37c027a3f63305345f266e8f4f944721adbb956 | /BASES/OLD/3_2_CAC_CC_SPLTED_CSDS/tx_no_gui.py | a9468578d04ae10a963ccd3699fadbf0be6ccf6e | []
| no_license | andrehoracio97/investigacao | fdfb663867e6fe9f240bb828b7b96b99323f8be3 | 5dd1fad12f4991bb737ed236426247dfb52333eb | refs/heads/master | 2022-10-11T02:08:30.478893 | 2020-06-16T09:58:13 | 2020-06-16T09:58:13 | 193,519,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,072 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Tx No Gui
# Author: andresilva
# GNU Radio version: 3.7.13.5
##################################################
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import fec
from gnuradio import gr
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.filter import pfb
from optparse import OptionParser
import insert_vec_cpp
import pmt
import random
import time
class tx_no_gui(gr.top_block):
def __init__(self, puncpat='11'):
gr.top_block.__init__(self, "Tx No Gui")
##################################################
# Parameters
##################################################
self.puncpat = puncpat
##################################################
# Variables
##################################################
self.sps = sps = 4
self.nfilts = nfilts = 32
self.eb = eb = 0.22
self.tx_rrc_taps = tx_rrc_taps = firdes.root_raised_cosine(nfilts, nfilts, 1.0, eb, 5*sps*nfilts)
self.taps_per_filt = taps_per_filt = len(tx_rrc_taps)/nfilts
self.samp_rate_array_MCR = samp_rate_array_MCR = [7500000,4000000,3750000,3000000,2500000,2000000,1500000,1000000,937500,882352,833333,714285,533333,500000,421052,400000,380952]
self.vector = vector = [int(random.random()*4) for i in range(49600)]
self.variable_qtgui_range_0 = variable_qtgui_range_0 = 50
self.samp_rate = samp_rate = samp_rate_array_MCR[1]
self.rate = rate = 2
self.polys = polys = [109, 79]
self.pld_enc = pld_enc = map( (lambda a: fec.ccsds_encoder_make(440, 0, fec.CC_TERMINATED)), range(0,16) );
self.pld_const = pld_const = digital.constellation_rect(([0.707+0.707j, -0.707+0.707j, -0.707-0.707j, 0.707-0.707j]), ([0, 1, 2, 3]), 4, 2, 2, 1, 1).base()
self.pld_const.gen_soft_dec_lut(8)
self.k = k = 7
self.frequencia_usrp = frequencia_usrp = 484e6
self.filt_delay = filt_delay = 1+(taps_per_filt-1)/2
self.MCR = MCR = "master_clock_rate=60e6"
##################################################
# Blocks
##################################################
self.uhd_usrp_sink_0_0 = uhd.usrp_sink(
",".join(("serial=F5EAE1", MCR)),
uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
)
self.uhd_usrp_sink_0_0.set_samp_rate(samp_rate)
self.uhd_usrp_sink_0_0.set_time_now(uhd.time_spec(time.time()), uhd.ALL_MBOARDS)
self.uhd_usrp_sink_0_0.set_center_freq(frequencia_usrp, 0)
self.uhd_usrp_sink_0_0.set_gain(variable_qtgui_range_0, 0)
self.uhd_usrp_sink_0_0.set_antenna('TX/RX', 0)
self.pfb_arb_resampler_xxx_0 = pfb.arb_resampler_ccf(
sps,
taps=(tx_rrc_taps),
flt_size=nfilts)
self.pfb_arb_resampler_xxx_0.declare_sample_delay(filt_delay)
self.insert_vec_cpp_new_vec_0 = insert_vec_cpp.new_vec((vector))
self.fec_extended_encoder_0 = fec.extended_encoder(encoder_obj_list=pld_enc, threading='capillary', puncpat=puncpat)
self.digital_map_bb_1_0 = digital.map_bb((pld_const.pre_diff_code()))
self.digital_diff_encoder_bb_0 = digital.diff_encoder_bb(4)
self.digital_chunks_to_symbols_xx_0_0 = digital.chunks_to_symbols_bc((pld_const.points()), 1)
self.blocks_vector_source_x_0_0_0 = blocks.vector_source_b([0], True, 1, [])
self.blocks_stream_to_tagged_stream_0_0_0 = blocks.stream_to_tagged_stream(gr.sizeof_char, 1, 992, "packet_len")
self.blocks_stream_mux_0_1_0 = blocks.stream_mux(gr.sizeof_char*1, (96, 896))
self.blocks_stream_mux_0_0 = blocks.stream_mux(gr.sizeof_char*1, (892, 4))
self.blocks_repack_bits_bb_1_0_0_1 = blocks.repack_bits_bb(8, 1, '', False, gr.GR_MSB_FIRST)
self.blocks_repack_bits_bb_1_0_0_0 = blocks.repack_bits_bb(1, 2, "packet_len", False, gr.GR_MSB_FIRST)
self.blocks_multiply_const_vxx_1 = blocks.multiply_const_vcc((0.7, ))
self.blocks_file_source_0_0_1_0_0_0 = blocks.file_source(gr.sizeof_char*1, '/home/andre/Desktop/Files_To_Transmit/video_lion.mpeg', False)
self.blocks_file_source_0_0_1_0_0_0.set_begin_tag(pmt.PMT_NIL)
self.acode_1104 = blocks.vector_source_b([0x1, 0x0, 0x1, 0x0, 0x1, 0x1, 0x0, 0x0, 0x1, 0x1, 0x0, 0x1, 0x1, 0x1, 0x0, 0x1, 0x1, 0x0, 0x1, 0x0, 0x0, 0x1, 0x0, 0x0, 0x1, 0x1, 0x1, 0x0, 0x0, 0x0, 0x1, 0x0, 0x1, 0x1, 0x1, 0x1, 0x0, 0x0, 0x1, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x0, 0x0, 0x0, 0x0], True, 1, [])
##################################################
# Connections
##################################################
self.connect((self.acode_1104, 0), (self.blocks_stream_mux_0_1_0, 0))
self.connect((self.blocks_file_source_0_0_1_0_0_0, 0), (self.blocks_repack_bits_bb_1_0_0_1, 0))
self.connect((self.blocks_multiply_const_vxx_1, 0), (self.uhd_usrp_sink_0_0, 0))
self.connect((self.blocks_repack_bits_bb_1_0_0_0, 0), (self.insert_vec_cpp_new_vec_0, 0))
self.connect((self.blocks_repack_bits_bb_1_0_0_1, 0), (self.fec_extended_encoder_0, 0))
self.connect((self.blocks_stream_mux_0_0, 0), (self.blocks_stream_mux_0_1_0, 1))
self.connect((self.blocks_stream_mux_0_1_0, 0), (self.blocks_stream_to_tagged_stream_0_0_0, 0))
self.connect((self.blocks_stream_to_tagged_stream_0_0_0, 0), (self.blocks_repack_bits_bb_1_0_0_0, 0))
self.connect((self.blocks_vector_source_x_0_0_0, 0), (self.blocks_stream_mux_0_0, 1))
self.connect((self.digital_chunks_to_symbols_xx_0_0, 0), (self.pfb_arb_resampler_xxx_0, 0))
self.connect((self.digital_diff_encoder_bb_0, 0), (self.digital_chunks_to_symbols_xx_0_0, 0))
self.connect((self.digital_map_bb_1_0, 0), (self.digital_diff_encoder_bb_0, 0))
self.connect((self.fec_extended_encoder_0, 0), (self.blocks_stream_mux_0_0, 0))
self.connect((self.insert_vec_cpp_new_vec_0, 0), (self.digital_map_bb_1_0, 0))
self.connect((self.pfb_arb_resampler_xxx_0, 0), (self.blocks_multiply_const_vxx_1, 0))
def get_puncpat(self):
return self.puncpat
def set_puncpat(self, puncpat):
self.puncpat = puncpat
def get_sps(self):
return self.sps
def set_sps(self, sps):
self.sps = sps
self.pfb_arb_resampler_xxx_0.set_rate(self.sps)
def get_nfilts(self):
return self.nfilts
def set_nfilts(self, nfilts):
self.nfilts = nfilts
self.set_taps_per_filt(len(self.tx_rrc_taps)/self.nfilts)
def get_eb(self):
return self.eb
def set_eb(self, eb):
self.eb = eb
def get_tx_rrc_taps(self):
return self.tx_rrc_taps
def set_tx_rrc_taps(self, tx_rrc_taps):
self.tx_rrc_taps = tx_rrc_taps
self.set_taps_per_filt(len(self.tx_rrc_taps)/self.nfilts)
self.pfb_arb_resampler_xxx_0.set_taps((self.tx_rrc_taps))
def get_taps_per_filt(self):
return self.taps_per_filt
def set_taps_per_filt(self, taps_per_filt):
self.taps_per_filt = taps_per_filt
self.set_filt_delay(1+(self.taps_per_filt-1)/2)
def get_samp_rate_array_MCR(self):
return self.samp_rate_array_MCR
def set_samp_rate_array_MCR(self, samp_rate_array_MCR):
self.samp_rate_array_MCR = samp_rate_array_MCR
self.set_samp_rate(self.samp_rate_array_MCR[1])
def get_vector(self):
return self.vector
def set_vector(self, vector):
self.vector = vector
def get_variable_qtgui_range_0(self):
return self.variable_qtgui_range_0
def set_variable_qtgui_range_0(self, variable_qtgui_range_0):
self.variable_qtgui_range_0 = variable_qtgui_range_0
self.uhd_usrp_sink_0_0.set_gain(self.variable_qtgui_range_0, 0)
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.uhd_usrp_sink_0_0.set_samp_rate(self.samp_rate)
def get_rate(self):
return self.rate
def set_rate(self, rate):
self.rate = rate
def get_polys(self):
return self.polys
def set_polys(self, polys):
self.polys = polys
def get_pld_enc(self):
return self.pld_enc
def set_pld_enc(self, pld_enc):
self.pld_enc = pld_enc
def get_pld_const(self):
return self.pld_const
def set_pld_const(self, pld_const):
self.pld_const = pld_const
def get_k(self):
return self.k
def set_k(self, k):
self.k = k
def get_frequencia_usrp(self):
return self.frequencia_usrp
def set_frequencia_usrp(self, frequencia_usrp):
self.frequencia_usrp = frequencia_usrp
self.uhd_usrp_sink_0_0.set_center_freq(self.frequencia_usrp, 0)
def get_filt_delay(self):
return self.filt_delay
def set_filt_delay(self, filt_delay):
self.filt_delay = filt_delay
def get_MCR(self):
return self.MCR
def set_MCR(self, MCR):
self.MCR = MCR
def argument_parser():
parser = OptionParser(usage="%prog: [options]", option_class=eng_option)
parser.add_option(
"", "--puncpat", dest="puncpat", type="string", default='11',
help="Set puncpat [default=%default]")
return parser
def main(top_block_cls=tx_no_gui, options=None):
if options is None:
options, _ = argument_parser().parse_args()
tb = top_block_cls(puncpat=options.puncpat)
tb.start()
tb.wait()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
37a4bed3bf5ad368c0622bb623e70c8852cd6ba3 | c0239d75a8199ec84ad683f945c21785c1b59386 | /dingtalk/api/rest/CorpDingTaskCreateRequest.py | ebe77db44bea52c850f1888fb9ce57aede6aae7f | []
| no_license | luss613/oauth_dingtalk | 9f253a75ce914c577dbabfb84e97fd883e80e04b | 1e2554642d2b16c642a031670d08efa4a74e8252 | refs/heads/master | 2023-04-23T01:16:33.450821 | 2020-06-18T08:22:57 | 2020-06-18T08:22:57 | 264,966,287 | 1 | 1 | null | 2020-06-18T08:31:24 | 2020-05-18T14:33:25 | Python | UTF-8 | Python | false | false | 332 | py | '''
Created by auto_sdk on 2018.07.25
'''
from dingtalk.api.base import RestApi
class CorpDingTaskCreateRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.task_send_v_o = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.corp.ding.task.create'
| [
"[email protected]"
]
| |
79e39282fe18e3659d7f76f56c3f2ae8ce5dc408 | d62f1c0bd9c35cd8ae681d7465e749d63bb59d4e | /Week1/Codingbat/List-1/same_first_last.py | 43d30b5ee7aa5c509d24f23881f34fe800bd4642 | []
| no_license | Yeldan/BFDjango | 0134a57ec523b08e4ca139ec11c384eeefec6caa | a390e08b8711613040a972e30a25b4035ff58e37 | refs/heads/master | 2020-03-27T15:49:53.859506 | 2018-11-25T22:33:38 | 2018-11-25T22:33:38 | 146,742,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | def same_first_last(nums):
if len(nums) >= 1 and nums[0] == nums[len(nums)-1]:
return True
return False | [
"[email protected]"
]
| |
2151cceac149e0509db788b0da44d68c4d1cd4cb | 3e24611b7315b5ad588b2128570f1341b9c968e8 | /Pseudo_Finder.py | 2d5054ccbc1b1928f339f8fd026680b8d0102af6 | [
"BSD-2-Clause"
]
| permissive | bioCKO/lpp_Script | dc327be88c7d12243e25557f7da68d963917aa90 | 0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2 | refs/heads/master | 2022-02-27T12:35:05.979231 | 2019-08-27T05:56:33 | 2019-08-27T05:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,854 | py | #!/usr/bin/env python
#coding:utf-8
"""
Author: --<>
Purpose:
Created: 2015/10/19
"""
from lpp import *
import os
from optparse import OptionParser
def check_path( path ):
if not os.path.exists(path):
os.makedirs( path )
return os.path.abspath(path)+'/'
def GBLASTA( protein,assemblyresult,output ):
#os.system("""makeblastdb -in %s -title Assem -parse_seqids -out Assem -dbtype nucl"""%(assemblyresult))
COMMAND = open("gblasta_run.bat",'w')
RAW = fasta_check(open(protein,'rU'))
i=0
for t,s in RAW:
i+=1
COMMAND.write("""
genblast -P blast -q $input -t %s -o $output
"""%(assemblyresult))
os.system("""
Genblast_Run.py -i %s -s %s -c %s -o %s
"""%(
protein,COMMAND.name, i,output
)
)
def ParseGblasta(gbaresult,genewiseruncommand):
COMMAND = open(genewiseruncommand,'w')
cache_path = check_path("CACHE/")
i=0
data_cache_hash = {}
GBA = block_reading(open(gbaresult,'rU'), re.escape("//******************END*******************//") )
i=0
for e_b in GBA:
i+=1
k=0
gb_block = re.split("\n\n+", e_b)
if "for query:" not in e_b:
continue
proteinid = re.search("for query\:\s+(\S+)", e_b).group(1)
for align in gb_block[1:]:
if "gene cover" not in align:
continue
aligndata = re.search("cover\:\d+\((\S+)\%\)\|score:([^\|]+)", align)
perc = float(aligndata.group(1))
score = float(aligndata.group(2))
if perc >=80:
i+=1
if i not in data_cache_hash:
PRO= open(cache_path+'%s.pep'%(i),'w')
PRO.write(proteinseqHash[proteinid])
data_cache_hash[i] = [PRO.name]
k+=1
NUC = open(cache_path+'%s_%s.nuc'%(i,k),'w')
align_detail = align.split("\n")[0]
align_detail_list = align_detail.split("|")
subject_detail = align_detail_list[1]
scaffold_name = subject_detail.split(":")[0]
direct = align_detail_list[2]
scaffoldStart,scaffoldEND = subject_detail.split(":")[1].split("..")
scaffoldStart=int(scaffoldStart)
scaffoldEND = int(scaffoldEND)
if scaffoldStart<10000:
scaffoldStart = 0
else:
scaffoldStart =scaffoldStart -10000
scaffoldEND = scaffoldEND+10000
NUC.write(">"+scaffold_name+"__%s\n"%(scaffoldStart)+assemblyseqHash[scaffold_name][scaffoldStart:scaffoldEND]+'\n')
commandline = """Genewise_Psuedeo.py -p %s -n %s -o %s.result.gff"""%(PRO.name,NUC.name,i)
if direct =="-":
commandline += " -d"
COMMAND.write(commandline+'\n')
COMMAND.close()
os.system( "cat %s | parallel -j 64"%(COMMAND.name) )
os.system( "cat *.result.gff > %s"%(output) )
os.system(" rm *.result.gff")
#os.system("cat %s| parallel -j %s >genewise.out")
if __name__=='__main__':
usage = '''usage: python2.7 %prog [options] Kmer
Kmer is a list of K value you want,e.g [ 1, 2, 3, 4 ]'''
parser = OptionParser(usage =usage )
parser.add_option("-c", "--CPU", action="store",
dest="cpu",
type='int',
default = 60,
help="CPU number for each thread")
parser.add_option("-p", "--pro", action="store",
dest="protein",
help="protein sequence!!")
parser.add_option("-a", "--assembly", action="store",
dest="assembly",
help="Assemblied Genome!!")
parser.add_option("-o", "--out", action="store",
dest="output",
default = 'genewise.out',
help="The output file you want!!")
(options, args) = parser.parse_args()
cpu = options.cpu
protein = options.protein
assembly = options.assembly
output = options.output
assemblyseqHash = {}
for t,s in fasta_check(open(assembly,'rU')):
t = t.split()[0][1:]
s = re.sub("\s+",'',s)
assemblyseqHash[t]=s
proteinseqHash = {}
for t,s in fasta_check(open(protein,'rU')):
proteinseqHash[t.split()[0][1:]] = t+s
GBLASTA(protein, assembly,"geneblasta.out")
ParseGblasta("geneblasta.out", "genewise.command")
os.remove("genewise.command")
os.system("rm CACHE -rf")
os.system("rm cache -rf")
os.system( "rm *.xml")
| [
"[email protected]"
]
| |
eabfec2e4c0257175b2f88f159573dc90713903f | faaad3f79c5409ba87c32648562097a611884800 | /app/app/migrations/0008_auto__add_field_partner_enabled.py | a3d5859a39c05f0df938dd399d231cd774ed6a0c | []
| no_license | ahguerilla/movements | d320cf4e59549f9aebb9c534ce4ae9c468189915 | a2065b65ff96391571390d4d44744566b5f298ac | refs/heads/master | 2020-12-29T02:32:05.568280 | 2018-05-11T16:22:00 | 2018-05-11T16:22:00 | 55,590,490 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,276 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Partner.enabled'
db.add_column(u'app_partner', 'enabled',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Partner.enabled'
db.delete_column(u'app_partner', 'enabled')
models = {
u'app.menuextension': {
'Meta': {'object_name': 'MenuExtension'},
'extended_object': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.Page']", 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_extension': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'draft_extension'", 'unique': 'True', 'null': 'True', 'to': u"orm['app.MenuExtension']"}),
'show_on_footer_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_on_top_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'app.newslettersignups': {
'Meta': {'ordering': "('-registered_date',)", 'object_name': 'NewsletterSignups'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'registered_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'app.notificationping': {
'Meta': {'object_name': 'NotificationPing'},
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'send_email_to': ('django.db.models.fields.EmailField', [], {'max_length': '75'})
},
u'app.partner': {
'Meta': {'object_name': 'Partner'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'app.safevpnlink': {
'Meta': {'object_name': 'SafeVPNLink', '_ormbases': ['cms.CMSPlugin']},
'base_url': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'link_text': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['app'] | [
"[email protected]"
]
| |
0fd00087bbe6ec945db73332b6cad077f02cef83 | 2359121ebcebba9db2cee20b4e8f8261c5b5116b | /configs_pytorch/f113-f10_6_pt.py | 473e694402d9b971c6aaf8839943d7c3313f54aa | []
| no_license | EliasVansteenkiste/plnt | 79840bbc9f1518c6831705d5a363dcb3e2d2e5c2 | e15ea384fd0f798aabef04d036103fe7af3654e0 | refs/heads/master | 2021-01-20T00:34:37.275041 | 2017-07-20T18:03:08 | 2017-07-20T18:03:08 | 89,153,531 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,362 | py |
#copy of j25
import numpy as np
from collections import namedtuple
from functools import partial
from PIL import Image
import data_transforms
import data_iterators
import pathfinder
import utils
import app
import torch
import torchvision
import torch.optim as optim
import torch.nn as nn
import torch.nn.init
import torch.nn.functional as F
import math
restart_from_save = None
rng = np.random.RandomState(42)
# transformations
p_transform = {'patch_size': (256, 256),
'channels': 4,
'n_labels': 17}
p_augmentation = {
'rot90_values': [0, 1, 2, 3],
'flip': [0, 1]
}
channel_zmuv_stats = {
'avg': [4970.55, 4245.35, 3064.64, 6360.08],
'std': [1785.79, 1576.31, 1661.19, 1841.09]}
# data preparation function
def data_prep_function_train(x, p_transform=p_transform, p_augmentation=p_augmentation, **kwargs):
x = np.array(x,dtype=np.float32)
x = data_transforms.channel_zmuv(x, img_stats=channel_zmuv_stats, no_channels=4)
x = data_transforms.random_lossless(x, p_augmentation, rng)
return x
def data_prep_function_valid(x, p_transform=p_transform, **kwargs):
x = np.array(x, dtype=np.float32)
x = data_transforms.channel_zmuv(x, img_stats=channel_zmuv_stats, no_channels=4)
return x
def label_prep_function(x):
#cut out the label
return x
# data iterators
batch_size = 32
nbatches_chunk = 1
chunk_size = batch_size * nbatches_chunk
folds = app.make_stratified_split(no_folds=10)
#for checking if folds are equal over multiple config files
for fold in folds:
print sum(fold)
train_ids = folds[1] + folds[2] + folds[3] + folds[4] + folds[5] + folds[0] + folds[7] + folds[8] + folds[9]
valid_ids = folds[6]
all_ids = folds[0] + folds[1] + folds[2] + folds[3] + folds[4] + folds[5] + folds[6] + folds[7] + folds[8] + folds[9]
bad_ids = []
train_ids = [x for x in train_ids if x not in bad_ids]
valid_ids = [x for x in valid_ids if x not in bad_ids]
test_ids = np.arange(40669)
test2_ids = np.arange(20522)
train_data_iterator = data_iterators.DataGenerator(dataset='train',
batch_size=chunk_size,
img_ids = train_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_train,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=True, random=True, infinite=True)
feat_data_iterator = data_iterators.DataGenerator(dataset='train',
batch_size=chunk_size,
img_ids = all_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=True, infinite=False)
valid_data_iterator = data_iterators.DataGenerator(dataset='train',
batch_size=chunk_size,
img_ids = valid_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=True, infinite=False)
test_data_iterator = data_iterators.DataGenerator(dataset='test',
batch_size=chunk_size,
img_ids = test_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
test2_data_iterator = data_iterators.DataGenerator(dataset='test2',
batch_size=chunk_size,
img_ids = test2_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
import tta
tta = tta.LosslessTTA(p_augmentation)
tta_test_data_iterator = data_iterators.TTADataGenerator(dataset='test',
tta = tta,
duplicate_label = False,
img_ids = test_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
tta_test2_data_iterator = data_iterators.TTADataGenerator(dataset='test2',
tta = tta,
duplicate_label = False,
img_ids = test2_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
tta_valid_data_iterator = data_iterators.TTADataGenerator(dataset='train',
tta = tta,
duplicate_label = True,
batch_size=chunk_size,
img_ids = valid_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=True, infinite=False)
tta_train_data_iterator = data_iterators.TTADataGenerator(dataset='train',
tta = tta,
duplicate_label = True,
batch_size=chunk_size,
img_ids = train_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=True, infinite=False)
tta_all_data_iterator = data_iterators.TTADataGenerator(dataset='train',
tta = tta,
duplicate_label = True,
batch_size=chunk_size,
img_ids = all_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=True, infinite=False)
nchunks_per_epoch = train_data_iterator.nsamples / chunk_size
max_nchunks = nchunks_per_epoch * 60
validate_every = int(0.5 * nchunks_per_epoch)
save_every = int(10 * nchunks_per_epoch)
learning_rate_schedule = {
0: 5e-2,
int(max_nchunks * 0.2): 2e-2,
int(max_nchunks * 0.4): 1e-2,
int(max_nchunks * 0.6): 3e-3,
int(max_nchunks * 0.8): 1e-3
}
# model
from collections import OrderedDict
class MyDenseNet(nn.Module):
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
super(MyDenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(4, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock
num_features = num_init_features
self.blocks = []
final_num_features = 0
for i, num_layers in enumerate(block_config):
block = torchvision.models.densenet._DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
self.features.add_module('denseblock%d' % (i + 1), block)
self.blocks.append(block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = torchvision.models.densenet._Transition(num_input_features=num_features, num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
self.classifier_drop = nn.Dropout(p=0.5)
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
def forward(self, x, feat=False):
features = self.features(x)
out = F.relu(features, inplace=True)
out = self.classifier_drop(out)
out = F.avg_pool2d(out, kernel_size=7).view(features.size(0), -1)
if feat:
return out
out = self.classifier(out)
return out
def my_densenet121(pretrained=False, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MyDenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16))
if pretrained:
model.load_state_dict(torch.utils.model_zoo.load_url(torchvision.models.densenet.model_urls['densenet121']))
return model
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.densenet = my_densenet121(pretrained=False)
self.densenet.apply(weight_init)
self.densenet.classifier = nn.Linear(self.densenet.classifier.in_features, p_transform["n_labels"])
self.densenet.classifier.weight.data.zero_()
def forward(self, x, feat=False):
if feat:
return self.densenet(x,feat)
else:
x = self.densenet(x)
return F.sigmoid(x)
def weight_init(m):
if isinstance(m,nn.Conv2d):
m.weight.data=nn.init.orthogonal(m.weight.data)
def build_model():
net = Net()
return namedtuple('Model', [ 'l_out'])( net )
# loss
class MultiLoss(torch.nn.modules.loss._Loss):
def __init__(self, weight):
super(MultiLoss, self).__init__()
self.weight = weight
def forward(self, input, target):
torch.nn.modules.loss._assert_no_grad(target)
weighted = (self.weight*target)*(input-target)**2 +(1-target)*(input-target)**2
return torch.mean(weighted)
def build_objective():
return MultiLoss(5.0)
def build_objective2():
return MultiLoss(1.0)
def score(gts, preds):
return app.f2_score_arr(gts, preds)
# updates
def build_updates(model, learning_rate):
return optim.SGD(model.parameters(), lr=learning_rate,momentum=0.9,weight_decay=0.0002)
| [
"[email protected]"
]
| |
207c707157fd441286ecf9952084a3c11def6be1 | 9c8fdfa389eaaf2df4c8ba0e3072d94671b5a622 | /0163. Missing Ranges.py | dbf13be4a24913568795bb380bbbac50fd487f69 | []
| no_license | aidardarmesh/leetcode2 | 41b64695afa850f9cc7847158abb6f2e8dc9abcd | 4cf03307c5caeccaa87ccce249322bd02397f489 | refs/heads/master | 2023-02-27T11:22:09.803298 | 2021-02-07T06:47:35 | 2021-02-07T06:47:35 | 264,491,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | from typing import *
class Solution:
def findMissingRanges(self, nums: List[int], lower: int, upper: int) -> List[str]:
res = []
nums = [lower-1] + nums + [upper+1]
for i in range(len(nums)-1):
delta = nums[i+1] - nums[i]
if delta == 2:
res.append(str(nums[i]+1))
elif delta > 2:
res.append(str(nums[i]+1) + '->' + str(nums[i+1]-1))
return res
| [
"[email protected]"
]
| |
7ff1327d876b5b1c37bba099c54717d552757bf5 | aa0270b351402e421631ebc8b51e528448302fab | /sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/operations/_cassandra_data_centers_operations.py | 038ebacf65a49f1624ecc977c3c8039ecf4bbf85 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
]
| permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 46,744 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(resource_group_name: str, cluster_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/dataCenters",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"clusterName": _SERIALIZER.url(
"cluster_name", cluster_name, "str", max_length=100, min_length=1, pattern=r"^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$"
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, cluster_name: str, data_center_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/dataCenters/{dataCenterName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"clusterName": _SERIALIZER.url(
"cluster_name", cluster_name, "str", max_length=100, min_length=1, pattern=r"^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$"
),
"dataCenterName": _SERIALIZER.url(
"data_center_name",
data_center_name,
"str",
max_length=100,
min_length=1,
pattern=r"^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$",
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, cluster_name: str, data_center_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/dataCenters/{dataCenterName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"clusterName": _SERIALIZER.url(
"cluster_name", cluster_name, "str", max_length=100, min_length=1, pattern=r"^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$"
),
"dataCenterName": _SERIALIZER.url(
"data_center_name",
data_center_name,
"str",
max_length=100,
min_length=1,
pattern=r"^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$",
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_update_request(
resource_group_name: str, cluster_name: str, data_center_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/dataCenters/{dataCenterName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"clusterName": _SERIALIZER.url(
"cluster_name", cluster_name, "str", max_length=100, min_length=1, pattern=r"^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$"
),
"dataCenterName": _SERIALIZER.url(
"data_center_name",
data_center_name,
"str",
max_length=100,
min_length=1,
pattern=r"^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$",
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, cluster_name: str, data_center_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/dataCenters/{dataCenterName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"clusterName": _SERIALIZER.url(
"cluster_name", cluster_name, "str", max_length=100, min_length=1, pattern=r"^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$"
),
"dataCenterName": _SERIALIZER.url(
"data_center_name",
data_center_name,
"str",
max_length=100,
min_length=1,
pattern=r"^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$",
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
class CassandraDataCentersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.cosmosdb.CosmosDBManagementClient`'s
:attr:`cassandra_data_centers` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, resource_group_name: str, cluster_name: str, **kwargs: Any
) -> Iterable["_models.DataCenterResource"]:
"""List all data centers in a particular managed Cassandra cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name. Required.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataCenterResource or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.DataCenterResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ListDataCenters] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ListDataCenters", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/dataCenters"
}
@distributed_trace
def get(
self, resource_group_name: str, cluster_name: str, data_center_name: str, **kwargs: Any
) -> _models.DataCenterResource:
"""Get the properties of a managed Cassandra data center.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name. Required.
:type cluster_name: str
:param data_center_name: Data center name in a managed Cassandra cluster. Required.
:type data_center_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCenterResource or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.DataCenterResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.DataCenterResource] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
data_center_name=data_center_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DataCenterResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/dataCenters/{dataCenterName}"
}
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, cluster_name: str, data_center_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
data_center_name=data_center_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/dataCenters/{dataCenterName}"
}
@distributed_trace
def begin_delete(
self, resource_group_name: str, cluster_name: str, data_center_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Delete a managed Cassandra data center.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name. Required.
:type cluster_name: str
:param data_center_name: Data center name in a managed Cassandra cluster. Required.
:type data_center_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
cluster_name=cluster_name,
data_center_name=data_center_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/dataCenters/{dataCenterName}"
}
def _create_update_initial(
self,
resource_group_name: str,
cluster_name: str,
data_center_name: str,
body: Union[_models.DataCenterResource, IO],
**kwargs: Any
) -> _models.DataCenterResource:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DataCenterResource] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IO, bytes)):
_content = body
else:
_json = self._serialize.body(body, "DataCenterResource")
request = build_create_update_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
data_center_name=data_center_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DataCenterResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("DataCenterResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/dataCenters/{dataCenterName}"
}
@overload
def begin_create_update(
self,
resource_group_name: str,
cluster_name: str,
data_center_name: str,
body: _models.DataCenterResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.DataCenterResource]:
"""Create or update a managed Cassandra data center. When updating, overwrite all properties. To
update only some properties, use PATCH.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name. Required.
:type cluster_name: str
:param data_center_name: Data center name in a managed Cassandra cluster. Required.
:type data_center_name: str
:param body: Parameters specifying the managed Cassandra data center. Required.
:type body: ~azure.mgmt.cosmosdb.models.DataCenterResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DataCenterResource or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.DataCenterResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_update(
self,
resource_group_name: str,
cluster_name: str,
data_center_name: str,
body: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.DataCenterResource]:
"""Create or update a managed Cassandra data center. When updating, overwrite all properties. To
update only some properties, use PATCH.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name. Required.
:type cluster_name: str
:param data_center_name: Data center name in a managed Cassandra cluster. Required.
:type data_center_name: str
:param body: Parameters specifying the managed Cassandra data center. Required.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DataCenterResource or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.DataCenterResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_update(
self,
resource_group_name: str,
cluster_name: str,
data_center_name: str,
body: Union[_models.DataCenterResource, IO],
**kwargs: Any
) -> LROPoller[_models.DataCenterResource]:
"""Create or update a managed Cassandra data center. When updating, overwrite all properties. To
update only some properties, use PATCH.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name. Required.
:type cluster_name: str
:param data_center_name: Data center name in a managed Cassandra cluster. Required.
:type data_center_name: str
:param body: Parameters specifying the managed Cassandra data center. Is either a
DataCenterResource type or a IO type. Required.
:type body: ~azure.mgmt.cosmosdb.models.DataCenterResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DataCenterResource or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.DataCenterResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DataCenterResource] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
data_center_name=data_center_name,
body=body,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DataCenterResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/dataCenters/{dataCenterName}"
}
def _update_initial(
self,
resource_group_name: str,
cluster_name: str,
data_center_name: str,
body: Union[_models.DataCenterResource, IO],
**kwargs: Any
) -> _models.DataCenterResource:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DataCenterResource] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IO, bytes)):
_content = body
else:
_json = self._serialize.body(body, "DataCenterResource")
request = build_update_request(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
data_center_name=data_center_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DataCenterResource", pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize("DataCenterResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/dataCenters/{dataCenterName}"
}
@overload
def begin_update(
self,
resource_group_name: str,
cluster_name: str,
data_center_name: str,
body: _models.DataCenterResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.DataCenterResource]:
"""Update some of the properties of a managed Cassandra data center.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name. Required.
:type cluster_name: str
:param data_center_name: Data center name in a managed Cassandra cluster. Required.
:type data_center_name: str
:param body: Parameters to provide for specifying the managed Cassandra data center. Required.
:type body: ~azure.mgmt.cosmosdb.models.DataCenterResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DataCenterResource or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.DataCenterResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
cluster_name: str,
data_center_name: str,
body: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.DataCenterResource]:
"""Update some of the properties of a managed Cassandra data center.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name. Required.
:type cluster_name: str
:param data_center_name: Data center name in a managed Cassandra cluster. Required.
:type data_center_name: str
:param body: Parameters to provide for specifying the managed Cassandra data center. Required.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DataCenterResource or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.DataCenterResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self,
resource_group_name: str,
cluster_name: str,
data_center_name: str,
body: Union[_models.DataCenterResource, IO],
**kwargs: Any
) -> LROPoller[_models.DataCenterResource]:
"""Update some of the properties of a managed Cassandra data center.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cluster_name: Managed Cassandra cluster name. Required.
:type cluster_name: str
:param data_center_name: Data center name in a managed Cassandra cluster. Required.
:type data_center_name: str
:param body: Parameters to provide for specifying the managed Cassandra data center. Is either
a DataCenterResource type or a IO type. Required.
:type body: ~azure.mgmt.cosmosdb.models.DataCenterResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DataCenterResource or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.DataCenterResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DataCenterResource] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
data_center_name=data_center_name,
body=body,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DataCenterResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/dataCenters/{dataCenterName}"
}
| [
"[email protected]"
]
| |
06a768b10284ec7d0ca364d50ef7abfd9a2060ff | 358aaf68f3c60ebbbd86b3bc66d4e6c098bcb39e | /fonts/wonder16_8x16.py | ff96b7c5170caead9f8c94e725a350e50d913b60 | [
"MIT"
]
| permissive | ccccmagicboy/st7735_mpy | d2de0046abd81978d5176dace45a40758377af82 | b15f1bde69fbe6e0eb4931c57e71c136d8e7f024 | refs/heads/master | 2022-08-28T23:18:04.353733 | 2020-05-28T04:19:21 | 2020-05-28T04:19:21 | 254,869,035 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,756 | py | """converted from ..\fonts\WONDER16__8x16.bin """
WIDTH = 8
HEIGHT = 16
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x18\x3c\x3c\x3c\x3c\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x36\x36\x36\x36\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x6c\x6c\x6c\xfe\x6c\x6c\xfe\x6c\x6c\x6c\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x7c\xc6\xc0\x78\x3c\x06\xc6\x7c\x18\x18\x00\x00'\
b'\x00\x00\x00\x00\x00\x62\x66\x0c\x18\x30\x66\xc6\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\x38\x30\x76\x7e\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x0c\x0c\x0c\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x18\x30\x30\x30\x30\x30\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x30\x18\x0c\x0c\x0c\x0c\x0c\x0c\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x6c\x38\xfe\x38\x6c\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x7e\x18\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x0c\x0c\x18\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x02\x06\x0c\x18\x30\x60\xc0\x80\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xd6\xd6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x18\x78\x18\x18\x18\x18\x18\x18\x18\x7e\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x06\x0c\x18\x30\x60\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x06\x3c\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x0c\x1c\x3c\x6c\xcc\xcc\xfe\x0c\x0c\x1e\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc0\xc0\xc0\xfc\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc0\xc0\xfc\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x06\x0c\x18\x30\x30\x30\x30\x30\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7c\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\x7e\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0c\x0c\x00\x00\x0c\x0c\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0c\x0c\x00\x00\x0c\x0c\x0c\x18\x00\x00\x00'\
b'\x00\x00\x00\x0c\x18\x30\x60\xc0\x60\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xfe\x00\xfe\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x60\x30\x18\x0c\x06\x0c\x18\x30\x60\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x0c\x18\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xde\xde\xde\xdc\xc0\x7e\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x66\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xc0\xc0\xc2\x66\x3c\x00\x00\x00\x00'\
b'\x00\x00\xf8\x6c\x66\x66\x66\x66\x66\x66\x6c\xf8\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x60\x60\x7c\x60\x60\x60\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x60\x60\x7c\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc0\xc0\xc0\xce\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\xd8\xd8\x70\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xcc\xd8\xf0\xf0\xd8\xcc\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xf0\x60\x60\x60\x60\x60\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xee\xee\xfe\xd6\xd6\xd6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xe6\xe6\xf6\xde\xce\xce\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x66\x7c\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xd6\xd6\x7c\x06\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x78\x6c\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc0\xc0\x70\x1c\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7e\x5a\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xd6\xd6\xd6\xfe\xee\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\x6c\x38\x38\x6c\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x66\x66\x66\x66\x66\x3c\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x86\x0c\x18\x30\x60\xc2\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x7c\x60\x60\x60\x60\x60\x60\x60\x60\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x80\xc0\x60\x30\x18\x0c\x06\x02\x00\x00\x00\x00'\
b'\x00\x00\x7c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x7c\x00\x00\x00\x00'\
b'\x00\x10\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff'\
b'\x00\x18\x18\x18\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x78\x0c\x7c\xcc\xcc\xdc\x76\x00\x00\x00\x00'\
b'\x00\x00\xe0\x60\x60\x7c\x66\x66\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x0c\x0c\x7c\xcc\xcc\xcc\xcc\xcc\x7e\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xfe\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x36\x30\x30\xfc\x30\x30\x30\x30\x78\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xce\xc6\xc6\xce\x76\x06\xc6\x7c\x00\x00'\
b'\x00\x00\xe0\x60\x60\x7c\x66\x66\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x38\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x0c\x0c\x00\x1c\x0c\x0c\x0c\x0c\x0c\xcc\xcc\x78\x00\x00'\
b'\x00\x00\xe0\x60\x60\x66\x66\x6c\x78\x6c\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x38\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x6c\xfe\xd6\xd6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x66\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x7c\x60\x60\xf0\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\xcc\x7c\x0c\x0c\x1e\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\x7c\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x30\x30\x30\xfc\x30\x30\x30\x30\x36\x1c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xcc\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xd6\xd6\xd6\xfe\x6c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\x6c\x38\x6c\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\xce\x76\x06\xc6\x7c\x00\x00'\
b'\x00\x00\x00\x00\x00\xfe\x86\x0c\x18\x30\x62\xfe\x00\x00\x00\x00'\
b'\x00\x00\x0e\x18\x18\x18\x70\x18\x18\x18\x18\x0e\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x18\x18\x00\x18\x18\x18\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x70\x18\x18\x18\x0e\x18\x18\x18\x18\x70\x00\x00\x00\x00'\
b'\x00\x00\x76\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x10\x38\x38\x6c\x6c\xfe\x00\x00\x00\x00\x00'\
FONT = memoryview(_FONT)
| [
"[email protected]"
]
| |
4a84f62d878637adbdc7231f34f39011cb2eb011 | 5563fc38a479bf31b158e22ad381bcc1ef6677df | /triangles.py | cac783538a7e501568406903122530725b621395 | []
| no_license | MonRes/tester_school_day5 | e6a1d84bc32342e0e03061208458581ac4357f59 | 985fdb344bf7009c4ba3cd50910ba6b9b9fa172e | refs/heads/master | 2020-03-19T05:13:38.891646 | 2018-06-03T14:30:07 | 2018-06-03T14:30:07 | 135,911,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | a = 2
b = 4
c = 4
if a>0 and b>0 and c>0:
if a + b > c and a + c > b and b + c > a:
print ("da się utworzyć trójkąt")
else:
print ("nie da się")
else:
print("nie da się")
#lub preferowana wersja
if a <= 0 or b <= 0 or c <= 0:
print ('nie da się utworzyć trójkąta - któras długość jest ujemna')
elif a + b > c and a + c > b and b + c > a:
print ('Da się utworzyć trójkąt')
else:
print ('nie da się utworzyć trójkąta')
#mozna z powtarzającego się warunku utworzyć zmienną np. length_negative = a <= 0 or b<= 0 c <= 0 | [
"[email protected]"
]
| |
38bca89d76a9af6298b42dea1ea91f8d1a32682f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_starriest.py | b761053999a8675654b8264719f4395358c732c9 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _STARRIEST():
def __init__(self,):
self.name = "STARRIEST"
self.definitions = starry
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['starry']
| [
"[email protected]"
]
| |
2a2b3521345749ce428ed48884a780c98dae6414 | eb19175c18053e5d414b4f6442bdfd0f9f97e24d | /graphene/contrib/django/fields.py | ba47047e1fdf7326bacd6da7cfc98592cf5da2b6 | [
"MIT"
]
| permissive | jhgg/graphene | 6c4c5a64b7b0f39c8f6b32d17f62e1c31ca03825 | 67904e8329de3d69fec8c82ba8c3b4fe598afa8e | refs/heads/master | 2020-12-25T21:23:22.556227 | 2015-10-15T19:56:40 | 2015-10-15T19:56:40 | 43,073,008 | 1 | 0 | null | 2015-09-24T14:47:19 | 2015-09-24T14:47:19 | null | UTF-8 | Python | false | false | 3,071 | py | from graphene.core.fields import (
ListField
)
from graphene import relay
from graphene.core.fields import Field, LazyField
from graphene.utils import cached_property, memoize, LazyMap
from graphene.relay.types import BaseNode
from django.db.models.query import QuerySet
from django.db.models.manager import Manager
@memoize
def get_type_for_model(schema, model):
schema = schema
types = schema.types.values()
for _type in types:
type_model = hasattr(_type, '_meta') and getattr(
_type._meta, 'model', None)
if model == type_model:
return _type
def lazy_map(value, func):
if isinstance(value, Manager):
value = value.get_queryset()
if isinstance(value, QuerySet):
return LazyMap(value, func)
return value
class DjangoConnectionField(relay.ConnectionField):
def wrap_resolved(self, value, instance, args, info):
schema = info.schema.graphene_schema
return lazy_map(value, self.get_object_type(schema))
class LazyListField(ListField):
def resolve(self, instance, args, info):
schema = info.schema.graphene_schema
resolved = super(LazyListField, self).resolve(instance, args, info)
return lazy_map(resolved, self.get_object_type(schema))
class ConnectionOrListField(LazyField):
@memoize
def get_field(self, schema):
model_field = self.field_type
field_object_type = model_field.get_object_type(schema)
if field_object_type and issubclass(field_object_type, BaseNode):
field = DjangoConnectionField(model_field)
else:
field = LazyListField(model_field)
field.contribute_to_class(self.object_type, self.name)
return field
class DjangoModelField(Field):
def __init__(self, model, *args, **kwargs):
super(DjangoModelField, self).__init__(None, *args, **kwargs)
self.model = model
def resolve(self, instance, args, info):
resolved = super(DjangoModelField, self).resolve(instance, args, info)
schema = info.schema.graphene_schema
_type = self.get_object_type(schema)
assert _type, ("Field %s cannot be retrieved as the "
"ObjectType is not registered by the schema" % (
self.field_name
))
return _type(resolved)
@memoize
def internal_type(self, schema):
_type = self.get_object_type(schema)
if not _type and self.object_type._meta.only_fields:
raise Exception(
"Model %r is not accessible by the schema. "
"You can either register the type manually "
"using @schema.register. "
"Or disable the field %s in %s" % (
self.model,
self.field_name,
self.object_type
)
)
return _type and _type.internal_type(schema) or Field.SKIP
def get_object_type(self, schema):
return get_type_for_model(schema, self.model)
| [
"[email protected]"
]
| |
8278b2891590710961bc86a4918e67d99a0fd397 | 7dc4413967a57c95bda3037154d151190a9309a3 | /django/mysite/mysite/ilib.py | a6d101b8121a4f3fce0e90b946e21d9a56f0aac0 | []
| no_license | connectthefuture/PythonCode | de0e74d81ef46ab34144172588455964d75d6648 | 01bb8c8052c2d89f0aed881f3ae886c8d04f1655 | refs/heads/master | 2021-05-14T23:31:26.334953 | 2016-05-21T13:04:34 | 2016-05-21T13:04:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | import re
import cStringIO
from django.http import HttpRequest
class DynamicFormException(Exception):
pass
class DynamicForm:
def __init__(self,):
self.fielddesc = []
# property, max_length, pattern, enum
def add(self, name, ** kwargs):
self.fielddesc.append((name, kwargs))
def valid(self):
for x, y in self.fielddesc:
pass
def as_table(self):
tmp = u'<tr><th><label for="id_%s">%s:</label></th><td><input id="id_%s" name="%s" type="text" /></td></tr>'
cio = cStringIO.StringIO()
for key, value in self.fielddesc:
lower_key = key.lower()
cio.write(tmp % (lower_key, key, lower_key, key))
return cio.getvalue()
def valid(self, request):
# fields = [x for x, y in self.fielddesc]
tmp = dict(self.fielddesc)
for key, value, in request.POST.items():
if key in tmp:
if 'max_length' in tmp[key] and len(value) > tmp[key]['max_length']:
raise DynamicFormException('field length too long')
if 'pattern' in tmp[key] and not re.search(value, tmp[key]['pattern']):
raise DynamicFormException('value dont match pattern')
# def NeedLogin():
# if not request.user.is_authenticated():
# return HttpResponseRedirect('/accounts/login')
if __name__ == '__main__':
df = DynamicForm()
df.add('A')
print(df.as_table())
| [
"[email protected]"
]
| |
3e013ccefdef52f15ef3f49e35457dfbaad52bc4 | be0898ceaee2a7758ffe0365b976f597b2ad26dd | /rls/common/recorder.py | 15420a8f27c34b97cd49f7aeb8b188faf7054628 | [
"Apache-2.0"
]
| permissive | violet712/RLs | 1edaa6427108e3e36d513cb6038be771837ecca4 | 25cc97c96cbb19fe859c9387b7547cbada2c89f2 | refs/heads/master | 2023-08-25T12:04:24.174034 | 2021-10-03T15:37:32 | 2021-10-03T15:37:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,179 | py | from abc import ABC, abstractmethod
from collections import defaultdict
from copy import deepcopy
from typing import Dict
import numpy as np
from rls.utils.np_utils import arrprint
class Recoder(ABC):
def __init__(self):
pass
@abstractmethod
def episode_reset(self):
pass
@abstractmethod
def episode_step(self, rewards, dones):
pass
@abstractmethod
def episode_end(self):
pass
class SimpleMovingAverageRecoder(Recoder):
def __init__(self,
n_copies,
agent_ids,
gamma=0.99,
verbose=False,
length=10):
super().__init__()
self.n_copies = n_copies
self.agent_ids = agent_ids
self.gamma = gamma
self.verbose = verbose
self.length = length
self.now = 0
self.r_list = []
self.max = defaultdict(int)
self.min = defaultdict(int)
self.mean = defaultdict(int)
self.total_step = 0
self.episode = 0
self.steps = None
self.total_returns = None
self.discounted_returns = None
self.already_dones = None
def episode_reset(self):
self.steps = defaultdict(lambda: np.zeros((self.n_copies,), dtype=int))
self.total_returns = defaultdict(lambda: np.zeros((self.n_copies,), dtype=float))
self.discounted_returns = defaultdict(lambda: np.zeros((self.n_copies,), dtype=float))
self.already_dones = defaultdict(lambda: np.zeros((self.n_copies,), dtype=bool))
def episode_step(self, rewards: Dict[str, np.ndarray], dones: Dict[str, np.ndarray]):
for id in self.agent_ids:
self.total_step += 1
self.discounted_returns[id] += (self.gamma ** self.steps[id]) * (1 - self.already_dones[id]) * rewards[id]
self.steps[id] += (1 - self.already_dones[id]).astype(int)
self.total_returns[id] += (1 - self.already_dones[id]) * rewards[id]
self.already_dones[id] = np.logical_or(self.already_dones[id], dones[id])
def episode_end(self):
# TODO: optimize
self.episode += 1
self.r_list.append(deepcopy(self.total_returns))
if self.now >= self.length:
r_old = self.r_list.pop(0)
for id in self.agent_ids:
self.max[id] += (self.total_returns[id].max() - r_old[id].max()) / self.length
self.min[id] += (self.total_returns[id].min() - r_old[id].min()) / self.length
self.mean[id] += (self.total_returns[id].mean() - r_old[id].mean()) / self.length
else:
self.now = min(self.now + 1, self.length)
for id in self.agent_ids:
self.max[id] += (self.total_returns[id].max() - self.max[id]) / self.now
self.min[id] += (self.total_returns[id].min() - self.min[id]) / self.now
self.mean[id] += (self.total_returns[id].mean() - self.mean[id]) / self.now
@property
def is_all_done(self): # TODO:
if len(self.agent_ids) > 1:
return np.logical_or(*self.already_dones.values()).all()
else:
return self.already_dones[self.agent_ids[0]].all()
@property
def has_done(self): # TODO:
if len(self.agent_ids) > 1:
return np.logical_or(*self.already_dones.values()).any()
else:
return self.already_dones[self.agent_ids[0]].any()
def summary_dict(self, title='Agent'):
_dicts = {}
for id in self.agent_ids:
_dicts[id] = {
f'{title}/total_rt_mean': self.total_returns[id].mean(),
f'{title}/total_rt_min': self.total_returns[id].min(),
f'{title}/total_rt_max': self.total_returns[id].max(),
f'{title}/discounted_rt_mean': self.discounted_returns[id].mean(),
f'{title}/discounted_rt_min': self.discounted_returns[id].min(),
f'{title}/discounted_rt_max': self.discounted_returns[id].max(),
f'{title}/sma_max': self.max[id],
f'{title}/sma_min': self.min[id],
f'{title}/sma_mean': self.mean[id]
}
if self.verbose:
_dicts[id].update({
f'{title}/first_done_step': self.steps[id][
self.already_dones[id] > 0].min() if self.has_done else -1,
f'{title}/last_done_step': self.steps[id][
self.already_dones[id] > 0].max() if self.has_done else -1
})
return _dicts
def __str__(self):
_str = f'Eps: {self.episode:3d}'
for id in self.agent_ids:
_str += f'\n Agent: {id.ljust(10)} | S: {self.steps[id].max():4d} | R: {arrprint(self.total_returns[id], 2)}'
if self.verbose:
first_done_step = self.steps[id][self.already_dones[id] > 0].min() if self.has_done else -1
last_done_step = self.steps[id][self.already_dones[id] > 0].max() if self.has_done else -1
_str += f' | FDS {first_done_step:4d} | LDS {last_done_step:4d}'
return _str
| [
"[email protected]"
]
| |
5932b28ef3e56a2c7b55c65e689ac09cb368b2aa | 72a03df85a6b1b06148338b9119b0b25d4fca164 | /goods/migrations/0008_auto_20191022_0228.py | 0a43eb86b65337692f50444b6527fb7210f08651 | []
| no_license | zeetec20/django-EComerce | f60bcc73ebb8d88ca06d5c8a77331681abc958ff | 5cf8e2aed3f9babe76043337a39f1dfbd0967916 | refs/heads/master | 2022-12-12T03:45:47.710718 | 2019-12-06T10:31:18 | 2019-12-06T10:31:18 | 216,199,678 | 1 | 0 | null | 2022-12-08T06:55:45 | 2019-10-19T12:02:33 | JavaScript | UTF-8 | Python | false | false | 337 | py | # Generated by Django 2.2.5 on 2019-10-22 02:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('goods', '0007_auto_20191018_2140'),
]
operations = [
migrations.RenameModel(
old_name='SemuaBarang',
new_name='SemuaBrand',
),
]
| [
"[email protected]"
]
| |
3362db548136e579197bb364e3296c92ff316937 | 7aa9f79ce2dc379e1139ee5cdf545a1d8aba8f39 | /pygame_menu/examples/other/dynamic_widget_update.py | 5f12d964b99e455d1adc88bf769d1109ae870a2e | [
"MIT"
]
| permissive | arpruss/pygame-menu | 7a755cad7bd36bda8750b6e820146a1037e5d73f | 25cefb5cfc60383544d704b83a32d43dfc621c23 | refs/heads/master | 2021-07-23T17:51:24.536494 | 2021-05-08T17:27:47 | 2021-05-08T17:27:47 | 248,988,541 | 0 | 0 | MIT | 2020-04-03T17:24:25 | 2020-03-21T14:05:48 | Python | UTF-8 | Python | false | false | 7,566 | py | """
pygame-menu
https://github.com/ppizarror/pygame-menu
EXAMPLE - DYNAMIC WIDGET UPDATE
Dynamically updates the widgets based on user events.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2021 Pablo Pizarro R. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
__all__ = ['main']
import pygame
import pygame_menu
from pygame_menu.examples import create_example_window
import math
from typing import Dict, Any
class App(object):
"""
The following object creates the whole app.
"""
image_widget: 'pygame_menu.widgets.Image'
item_description_widget: 'pygame_menu.widgets.Label'
menu: 'pygame_menu.Menu'
modes: Dict[int, Dict[str, Any]]
quit_button: 'pygame_menu.widgets.Button'
quit_button_fake: 'pygame_menu.widgets.Button'
selector_widget: 'pygame_menu.widgets.Selector'
surface: 'pygame.Surface'
def __init__(self) -> None:
"""
Constructor.
"""
self.surface = create_example_window('Example - Dynamic Widget Update',
(640, 480), flags=pygame.NOFRAME)
# Load image
default_image = pygame_menu.BaseImage(
image_path=pygame_menu.baseimage.IMAGE_EXAMPLE_PYGAME_MENU
).scale(0.2, 0.2)
# Set theme
theme = pygame_menu.themes.THEME_DEFAULT.copy()
theme.title_bar_style = pygame_menu.widgets.MENUBAR_STYLE_UNDERLINE_TITLE
theme.title_close_button_cursor = pygame_menu.locals.CURSOR_HAND
theme.title_font_color = (35, 35, 35)
# This dict stores the values of the widgets to be changed dynamically
self.modes = {
1: {
'image': default_image.copy(),
'label': {
'color': theme.widget_font_color,
'size': theme.widget_font_size,
'text': 'The first one is very epic'
}
},
2: {
'image': default_image.copy().to_bw(),
'label': {
'color': (0, 0, 0),
'size': 20,
'text': 'This other one is also epic, but fancy'
}
},
3: {
'image': default_image.copy().flip(False, True).pick_channels('r'),
'label': {
'color': (255, 0, 0),
'size': 45,
'text': 'YOU D I E D'
}
}
}
# Create menus
self.menu = pygame_menu.Menu(
height=480,
onclose=pygame_menu.events.CLOSE,
theme=theme,
title='Everything is dynamic now',
width=640
)
self.selector_widget = self.menu.add.selector(
title='Pick one option: ',
items=[('The first', 1),
('The second', 2),
('The final mode', 3)],
onchange=self._on_selector_change
)
self.image_widget = self.menu.add.image(
image_path=self.modes[1]['image'],
padding=(25, 0, 0, 0) # top, right, bottom, left
)
self.item_description_widget = self.menu.add.label(title='')
self.quit_button = self.menu.add.button('Quit', pygame_menu.events.EXIT)
self.quit_button_fake = self.menu.add.button('You cannot quit', self.fake_quit,
font_color=(255, 255, 255))
self.quit_button_fake.add_draw_callback(self.animate_quit_button)
# Update the widgets based on selected value from selector get_value
# returns selected item tuple and index, so [0][1] means the second object
# from ('The first', 1) tuple
self._update_from_selection(int(self.selector_widget.get_value()[0][1]))
def animate_quit_button(
self,
widget: 'pygame_menu.widgets.Widget',
menu: 'pygame_menu.Menu'
) -> None:
"""
Animate widgets if the last option is selected.
:param widget: Widget to be updated
:param menu: Menu
:return: None
"""
if self.current == 3:
t = widget.get_counter_attribute('t', menu.get_clock().get_time() * 0.0075, math.pi)
widget.set_padding(10 * (1 + math.sin(t))) # Oscillating padding
widget.set_background_color((int(125 * (1 + math.sin(t))), 0, 0), None)
c = int(127 * (1 + math.cos(t)))
widget.update_font({'color': (c, c, c)}) # Widget font now is in grayscale
# widget.translate(10 * math.cos(t), 10 * math.sin(t))
widget.rotate(5 * t)
@staticmethod
def fake_quit() -> None:
"""
Function executed by fake quit button.
:return: None
"""
print('I said that you cannot quit')
def _update_from_selection(self, index: int) -> None:
"""
Change widgets depending on index.
:param index: Index
:return: None
"""
self.current = index
self.image_widget.set_image(self.modes[index]['image'])
self.item_description_widget.set_title(self.modes[index]['label']['text'])
self.item_description_widget.update_font(
{'color': self.modes[index]['label']['color'],
'size': self.modes[index]['label']['size']}
)
# Swap buttons using hide/show
if index == 3:
self.quit_button.hide()
self.quit_button_fake.show()
else:
self.quit_button.show()
self.quit_button_fake.hide()
def _on_selector_change(self, selected: Any, value: int) -> None:
"""
Function executed if selector changes.
:param selected: Selector data containing text and index
:param value: Value from the selected option
:return: None
"""
print('Selected data:', selected)
self._update_from_selection(value)
def mainloop(self, test: bool) -> None:
"""
App mainloop.
:param test: Test status
"""
self.menu.mainloop(self.surface, disable_loop=test)
def main(test: bool = False) -> 'App':
"""
Main function.
:param test: Indicate function is being tested
:return: App object
"""
app = App()
app.mainloop(test)
return app
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
89280ef30b0eb48a4d06dff7f8128783ab05c9f9 | ce15a162d71254d86207b60ec6c1c75117f4fe7c | /NiaPy/algorithms/other/ts.py | dd8050570c98318c15e3f7ab10433f47ffb341f5 | [
"MIT"
]
| permissive | sowmya-debug/NiaPy | eadfceabe939f08acdda87d0879abf72952d4cd1 | 1b8fa9949d238a01523a9822977e32dec4d86aa5 | refs/heads/master | 2022-04-18T05:20:05.140735 | 2020-04-18T16:35:30 | 2020-04-18T16:35:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,789 | py | # encoding=utf8
import logging
from numpy import random as rand
from NiaPy.algorithms.algorithm import Algorithm
logging.basicConfig()
logger = logging.getLogger('NiaPy.algorithms.other')
logger.setLevel('INFO')
__all__ = ['TabuSearch']
# TODO implement algorithm
def TabuSearchF(task, SR=None, TL_size=25, rnd=rand):
if SR == None: SR = task.bRange
x = rnd.uniform(task.Lower, task.Upper)
x_f = task.eval(x)
# while not task.stopCondI():
# Generate neigours
# evaluate x not in ts
# get best of of evaluated
# compare new best with best
return x, x_f
class TabuSearch(Algorithm):
r"""Implementation of Tabu Search Algorithm.
Algorithm:
Tabu Search Algorithm
Date:
2018
Authors:
Klemen Berkovič
License:
MIT
Reference URL:
http://www.cleveralgorithms.com/nature-inspired/stochastic/tabu_search.html
Reference paper:
Attributes:
Name (List[str]): List of strings representing algorithm name.
"""
Name = ['TabuSearch', 'TS']
@staticmethod
def typeParameters(): return {
'NP': lambda x: isinstance(x, int) and x > 0
}
def setParameters(self, **ukwargs):
r"""Set the algorithm parameters/arguments."""
Algorithm.setParameters(self, **ukwargs)
def move(self): return list()
def runIteration(self, task, pop, fpop, xb, fxb, **dparams):
r"""Core function of the algorithm.
Args:
task (Task): Optimization task.
pop (numpy.ndarray): Current population.
fpop (numpy.ndarray): Individuals fitness/objective values.
xb (numpy.ndarray): Global best solution.
fxb (float): Global best solutions fitness/objective value.
**dparams (dict):
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, dict]:
"""
return pop, fpop, xb, fxb, dparams
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| [
"[email protected]"
]
| |
56085b3164c256eb63983021c193772e29f849b1 | de413f085b8c185ac4314a3c875bb2725ae1783a | /python/ThirteenTeV/Hadronizer/Hadronizer_TuneCP5_13TeV_SUSYGluGluToBBHToTauTau_M-90-amcatnlo-pythia8_cff.py | df7a7c19e1e37e8b2d8549bb7b5010c22c6b5bf8 | []
| no_license | good-soul/genproductions | 17b14eade1501207c0c4f389a2d3270239acf8a7 | 12bf6275067b332930e5fc7d65f1a05575d8d549 | refs/heads/master | 2021-04-18T18:48:18.575337 | 2018-03-24T13:29:56 | 2018-03-24T13:29:56 | 126,669,480 | 1 | 0 | null | 2018-03-25T06:04:53 | 2018-03-25T06:04:53 | null | UTF-8 | Python | false | false | 1,231 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
from Configuration.Generator.Pythia8aMCatNLOSettings_cfi import *
generator = cms.EDFilter("Pythia8HadronizerFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
pythia8aMCatNLOSettingsBlock,
processParameters = cms.vstring(
'TimeShower:nPartonsInBorn = 2', #number of coloured particles (before resonance decays) in born matrix element
'SLHA:useDecayTable = off',
'25:onMode = off', # turn OFF all H decays
'25:onIfAny = 15', # turn ON H->tautau
'25:m0 = 90' # mass of H
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8aMCatNLOSettings',
'processParameters'
)
)
)
| [
"[email protected]"
]
| |
aac60b91b7a89824d2aa7fd64a3bf958d5cd6e37 | 2650ae36aca9912d3b75302a52bc91f26c8de31f | /nova/objects/instance.py | 8859dc00c185f6adacb4e98c5b2d5ad4c54d8465 | [
"Apache-2.0"
]
| permissive | wangyc666666/ussuri_nova | 52c09bd001dc1cc9e30364bd1dd916207d8ed644 | 0706b514f288216c41d64e98524ef7e517efb8d8 | refs/heads/master | 2023-02-25T10:38:00.966937 | 2021-02-02T07:51:07 | 2021-02-02T07:51:07 | 331,877,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,441 | py | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import versionutils
from sqlalchemy import or_
from sqlalchemy.sql import false
from sqlalchemy.sql import func
from sqlalchemy.sql import null
from nova import availability_zones as avail_zone
from nova.compute import task_states
from nova.compute import vm_states
from nova.db import api as db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import models
from nova import exception
from nova.i18n import _
from nova.network import model as network_model
from nova import notifications
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# List of fields that can be joined in DB layer.
_INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups',
'pci_devices', 'tags', 'services',
'fault']
# These are fields that are optional but don't translate to db columns
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['flavor', 'old_flavor',
'new_flavor', 'ec2_ids']
# These are fields that are optional and in instance_extra
_INSTANCE_EXTRA_FIELDS = ['numa_topology', 'pci_requests',
'flavor', 'vcpu_model', 'migration_context',
'keypairs', 'device_metadata', 'trusted_certs',
'resources']
# These are fields that applied/drooped by migration_context
_MIGRATION_CONTEXT_ATTRS = ['numa_topology', 'pci_requests',
'pci_devices', 'resources']
# These are fields that can be specified as expected_attrs
INSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS +
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS +
_INSTANCE_EXTRA_FIELDS)
# These are fields that most query calls load by default
INSTANCE_DEFAULT_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups']
# Maximum count of tags to one instance
MAX_TAG_COUNT = 50
def _expected_cols(expected_attrs):
"""Return expected_attrs that are columns needing joining.
NB: This function may modify expected_attrs if one
requested attribute requires another.
"""
if not expected_attrs:
return expected_attrs
simple_cols = [attr for attr in expected_attrs
if attr in _INSTANCE_OPTIONAL_JOINED_FIELDS]
complex_cols = ['extra.%s' % field
for field in _INSTANCE_EXTRA_FIELDS
if field in expected_attrs]
if complex_cols:
simple_cols.append('extra')
simple_cols = [x for x in simple_cols if x not in _INSTANCE_EXTRA_FIELDS]
expected_cols = simple_cols + complex_cols
# NOTE(pumaranikar): expected_cols list can contain duplicates since
# caller appends column attributes to expected_attr without checking if
# it is already present in the list or not. Hence, we remove duplicates
# here, if any. The resultant list is sorted based on list index to
# maintain the insertion order.
return sorted(list(set(expected_cols)), key=expected_cols.index)
_NO_DATA_SENTINEL = object()
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class Instance(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 2.0: Initial version
# Version 2.1: Added services
# Version 2.2: Added keypairs
# Version 2.3: Added device_metadata
# Version 2.4: Added trusted_certs
# Version 2.5: Added hard_delete kwarg in destroy
# Version 2.6: Added hidden
# Version 2.7: Added resources
VERSION = '2.7'
fields = {
'id': fields.IntegerField(),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'image_ref': fields.StringField(nullable=True),
'kernel_id': fields.StringField(nullable=True),
'ramdisk_id': fields.StringField(nullable=True),
'hostname': fields.StringField(nullable=True),
'launch_index': fields.IntegerField(nullable=True),
'key_name': fields.StringField(nullable=True),
'key_data': fields.StringField(nullable=True),
'power_state': fields.IntegerField(nullable=True),
'vm_state': fields.StringField(nullable=True),
'task_state': fields.StringField(nullable=True),
'services': fields.ObjectField('ServiceList'),
'memory_mb': fields.IntegerField(nullable=True),
'vcpus': fields.IntegerField(nullable=True),
'root_gb': fields.IntegerField(nullable=True),
'ephemeral_gb': fields.IntegerField(nullable=True),
'ephemeral_key_uuid': fields.UUIDField(nullable=True),
'host': fields.StringField(nullable=True),
'node': fields.StringField(nullable=True),
'instance_type_id': fields.IntegerField(nullable=True),
'user_data': fields.StringField(nullable=True),
'reservation_id': fields.StringField(nullable=True),
'launched_at': fields.DateTimeField(nullable=True),
'terminated_at': fields.DateTimeField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
'launched_on': fields.StringField(nullable=True),
'locked': fields.BooleanField(default=False),
'locked_by': fields.StringField(nullable=True),
'os_type': fields.StringField(nullable=True),
'architecture': fields.StringField(nullable=True),
'vm_mode': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'root_device_name': fields.StringField(nullable=True),
'default_ephemeral_device': fields.StringField(nullable=True),
'default_swap_device': fields.StringField(nullable=True),
'config_drive': fields.StringField(nullable=True),
'access_ip_v4': fields.IPV4AddressField(nullable=True),
'access_ip_v6': fields.IPV6AddressField(nullable=True),
'auto_disk_config': fields.BooleanField(default=False),
'progress': fields.IntegerField(nullable=True),
'shutdown_terminate': fields.BooleanField(default=False),
'disable_terminate': fields.BooleanField(default=False),
# TODO(stephenfin): Remove this in version 3.0 of the object
'cell_name': fields.StringField(nullable=True),
'metadata': fields.DictOfStringsField(),
'system_metadata': fields.DictOfNullableStringsField(),
'info_cache': fields.ObjectField('InstanceInfoCache',
nullable=True),
# TODO(stephenfin): Remove this in version 3.0 of the object as it's
# related to nova-network
'security_groups': fields.ObjectField('SecurityGroupList'),
'fault': fields.ObjectField('InstanceFault', nullable=True),
'cleaned': fields.BooleanField(default=False),
'pci_devices': fields.ObjectField('PciDeviceList', nullable=True),
'numa_topology': fields.ObjectField('InstanceNUMATopology',
nullable=True),
'pci_requests': fields.ObjectField('InstancePCIRequests',
nullable=True),
'device_metadata': fields.ObjectField('InstanceDeviceMetadata',
nullable=True),
'tags': fields.ObjectField('TagList'),
'flavor': fields.ObjectField('Flavor'),
'old_flavor': fields.ObjectField('Flavor', nullable=True),
'new_flavor': fields.ObjectField('Flavor', nullable=True),
'vcpu_model': fields.ObjectField('VirtCPUModel', nullable=True),
'ec2_ids': fields.ObjectField('EC2Ids'),
'migration_context': fields.ObjectField('MigrationContext',
nullable=True),
'keypairs': fields.ObjectField('KeyPairList'),
'trusted_certs': fields.ObjectField('TrustedCerts', nullable=True),
'hidden': fields.BooleanField(default=False),
'resources': fields.ObjectField('ResourceList', nullable=True),
}
obj_extra_fields = ['name']
def obj_make_compatible(self, primitive, target_version):
super(Instance, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (2, 7) and 'resources' in primitive:
del primitive['resources']
if target_version < (2, 6) and 'hidden' in primitive:
del primitive['hidden']
if target_version < (2, 4) and 'trusted_certs' in primitive:
del primitive['trusted_certs']
if target_version < (2, 3) and 'device_metadata' in primitive:
del primitive['device_metadata']
if target_version < (2, 2) and 'keypairs' in primitive:
del primitive['keypairs']
if target_version < (2, 1) and 'services' in primitive:
del primitive['services']
def __init__(self, *args, **kwargs):
super(Instance, self).__init__(*args, **kwargs)
self._reset_metadata_tracking()
@property
def image_meta(self):
return objects.ImageMeta.from_instance(self)
def _reset_metadata_tracking(self, fields=None):
if fields is None or 'system_metadata' in fields:
self._orig_system_metadata = (dict(self.system_metadata) if
'system_metadata' in self else {})
if fields is None or 'metadata' in fields:
self._orig_metadata = (dict(self.metadata) if
'metadata' in self else {})
def obj_clone(self):
"""Create a copy of this instance object."""
nobj = super(Instance, self).obj_clone()
# Since the base object only does a deep copy of the defined fields,
# need to make sure to also copy the additional tracking metadata
# attributes so they don't show as changed and cause the metadata
# to always be updated even when stale information.
if hasattr(self, '_orig_metadata'):
nobj._orig_metadata = dict(self._orig_metadata)
if hasattr(self, '_orig_system_metadata'):
nobj._orig_system_metadata = dict(self._orig_system_metadata)
return nobj
def obj_reset_changes(self, fields=None, recursive=False):
super(Instance, self).obj_reset_changes(fields,
recursive=recursive)
self._reset_metadata_tracking(fields=fields)
def obj_what_changed(self):
changes = super(Instance, self).obj_what_changed()
if 'metadata' in self and self.metadata != self._orig_metadata:
changes.add('metadata')
if 'system_metadata' in self and (self.system_metadata !=
self._orig_system_metadata):
changes.add('system_metadata')
return changes
@classmethod
def _obj_from_primitive(cls, context, objver, primitive):
self = super(Instance, cls)._obj_from_primitive(context, objver,
primitive)
self._reset_metadata_tracking()
return self
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for key in self.fields:
if key == 'name':
# NOTE(danms): prevent recursion
continue
elif not self.obj_attr_is_set(key):
# NOTE(danms): Don't trigger lazy-loads
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
except (exception.ObjectActionError,
exception.OrphanedObjectError):
# This indicates self.id was not set and/or could not be
# lazy loaded. What this means is the instance has not
# been persisted to a db yet, which should indicate it has
# not been scheduled yet. In this situation it will have a
# blank name.
if (self.vm_state == vm_states.BUILDING and
self.task_state == task_states.SCHEDULING):
base_name = ''
else:
# If the vm/task states don't indicate that it's being booted
# then we have a bug here. Log an error and attempt to return
# the uuid which is what an error above would return.
LOG.error('Could not lazy-load instance.id while '
'attempting to generate the instance name.')
base_name = self.uuid
return base_name
def _flavor_from_db(self, db_flavor):
"""Load instance flavor information from instance_extra."""
# Before we stored flavors in instance_extra, certain fields, defined
# in nova.compute.flavors.system_metadata_flavor_props, were stored
# in the instance.system_metadata for the embedded instance.flavor.
# The "disabled" and "is_public" fields weren't one of those keys,
# however, so really old instances that had their embedded flavor
# converted to the serialized instance_extra form won't have the
# disabled attribute set and we need to default those here so callers
# don't explode trying to load instance.flavor.disabled.
def _default_flavor_values(flavor):
if 'disabled' not in flavor:
flavor.disabled = False
if 'is_public' not in flavor:
flavor.is_public = True
flavor_info = jsonutils.loads(db_flavor)
self.flavor = objects.Flavor.obj_from_primitive(flavor_info['cur'])
_default_flavor_values(self.flavor)
if flavor_info['old']:
self.old_flavor = objects.Flavor.obj_from_primitive(
flavor_info['old'])
_default_flavor_values(self.old_flavor)
else:
self.old_flavor = None
if flavor_info['new']:
self.new_flavor = objects.Flavor.obj_from_primitive(
flavor_info['new'])
_default_flavor_values(self.new_flavor)
else:
self.new_flavor = None
self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor'])
@staticmethod
def _from_db_object(context, instance, db_inst, expected_attrs=None):
"""Method to help with migration to objects.
Converts a database entity to a formal object.
"""
instance._context = context
if expected_attrs is None:
expected_attrs = []
# Most of the field names match right now, so be quick
for field in instance.fields:
if field in INSTANCE_OPTIONAL_ATTRS:
continue
elif field == 'deleted':
instance.deleted = db_inst['deleted'] == db_inst['id']
elif field == 'cleaned':
instance.cleaned = db_inst['cleaned'] == 1
else:
instance[field] = db_inst[field]
if 'metadata' in expected_attrs:
instance['metadata'] = utils.instance_meta(db_inst)
if 'system_metadata' in expected_attrs:
instance['system_metadata'] = utils.instance_sys_meta(db_inst)
if 'fault' in expected_attrs:
instance['fault'] = (
objects.InstanceFault.get_latest_for_instance(
context, instance.uuid))
if 'ec2_ids' in expected_attrs:
instance._load_ec2_ids()
if 'info_cache' in expected_attrs:
if db_inst.get('info_cache') is None:
instance.info_cache = None
elif not instance.obj_attr_is_set('info_cache'):
# TODO(danms): If this ever happens on a backlevel instance
# passed to us by a backlevel service, things will break
instance.info_cache = objects.InstanceInfoCache(context)
if instance.info_cache is not None:
instance.info_cache._from_db_object(context,
instance.info_cache,
db_inst['info_cache'])
# TODO(danms): If we are updating these on a backlevel instance,
# we'll end up sending back new versions of these objects (see
# above note for new info_caches
if 'pci_devices' in expected_attrs:
pci_devices = base.obj_make_list(
context, objects.PciDeviceList(context),
objects.PciDevice, db_inst['pci_devices'])
instance['pci_devices'] = pci_devices
# TODO(stephenfin): Remove this as it's related to nova-network
if 'security_groups' in expected_attrs:
sec_groups = base.obj_make_list(
context, objects.SecurityGroupList(context),
objects.SecurityGroup, db_inst.get('security_groups', []))
instance['security_groups'] = sec_groups
if 'tags' in expected_attrs:
tags = base.obj_make_list(
context, objects.TagList(context),
objects.Tag, db_inst['tags'])
instance['tags'] = tags
if 'services' in expected_attrs:
services = base.obj_make_list(
context, objects.ServiceList(context),
objects.Service, db_inst['services'])
instance['services'] = services
instance._extra_attributes_from_db_object(instance, db_inst,
expected_attrs)
instance.obj_reset_changes()
return instance
@staticmethod
def _extra_attributes_from_db_object(instance, db_inst,
expected_attrs=None):
"""Method to help with migration of extra attributes to objects.
"""
if expected_attrs is None:
expected_attrs = []
# NOTE(danms): We can be called with a dict instead of a
# SQLAlchemy object, so we have to be careful here
if hasattr(db_inst, '__dict__'):
have_extra = 'extra' in db_inst.__dict__ and db_inst['extra']
else:
have_extra = 'extra' in db_inst and db_inst['extra']
if 'numa_topology' in expected_attrs:
if have_extra:
instance._load_numa_topology(
db_inst['extra'].get('numa_topology'))
else:
instance.numa_topology = None
if 'pci_requests' in expected_attrs:
if have_extra:
instance._load_pci_requests(
db_inst['extra'].get('pci_requests'))
else:
instance.pci_requests = None
if 'device_metadata' in expected_attrs:
if have_extra:
instance._load_device_metadata(
db_inst['extra'].get('device_metadata'))
else:
instance.device_metadata = None
if 'vcpu_model' in expected_attrs:
if have_extra:
instance._load_vcpu_model(
db_inst['extra'].get('vcpu_model'))
else:
instance.vcpu_model = None
if 'migration_context' in expected_attrs:
if have_extra:
instance._load_migration_context(
db_inst['extra'].get('migration_context'))
else:
instance.migration_context = None
if 'keypairs' in expected_attrs:
if have_extra:
instance._load_keypairs(db_inst['extra'].get('keypairs'))
if 'trusted_certs' in expected_attrs:
if have_extra:
instance._load_trusted_certs(
db_inst['extra'].get('trusted_certs'))
else:
instance.trusted_certs = None
if 'resources' in expected_attrs:
if have_extra:
instance._load_resources(
db_inst['extra'].get('resources'))
else:
instance.resources = None
if any([x in expected_attrs for x in ('flavor',
'old_flavor',
'new_flavor')]):
if have_extra and db_inst['extra'].get('flavor'):
instance._flavor_from_db(db_inst['extra']['flavor'])
@staticmethod
@db.select_db_reader_mode
def _db_instance_get_by_uuid(context, uuid, columns_to_join,
use_slave=False):
return db.instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join)
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = cls._db_instance_get_by_uuid(context, uuid, columns_to_join,
use_slave=use_slave)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable_classmethod
def get_by_id(cls, context, inst_id, expected_attrs=None):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get(context, inst_id,
columns_to_join=columns_to_join)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
if self.obj_attr_is_set('deleted') and self.deleted:
raise exception.ObjectActionError(action='create',
reason='already deleted')
updates = self.obj_get_changes()
# NOTE(danms): We know because of the check above that deleted
# is either unset or false. Since we need to avoid passing False
# down to the DB layer (which uses an integer), we can always
# default it to zero here.
updates['deleted'] = 0
expected_attrs = [attr for attr in INSTANCE_DEFAULT_FIELDS
if attr in updates]
# TODO(stephenfin): Remove this as it's related to nova-network
if 'security_groups' in updates:
updates['security_groups'] = [x.name for x in
updates['security_groups']]
if 'info_cache' in updates:
updates['info_cache'] = {
'network_info': updates['info_cache'].network_info.json()
}
updates['extra'] = {}
numa_topology = updates.pop('numa_topology', None)
expected_attrs.append('numa_topology')
if numa_topology:
updates['extra']['numa_topology'] = numa_topology._to_json()
else:
updates['extra']['numa_topology'] = None
pci_requests = updates.pop('pci_requests', None)
expected_attrs.append('pci_requests')
if pci_requests:
updates['extra']['pci_requests'] = (
pci_requests.to_json())
else:
updates['extra']['pci_requests'] = None
device_metadata = updates.pop('device_metadata', None)
expected_attrs.append('device_metadata')
if device_metadata:
updates['extra']['device_metadata'] = (
device_metadata._to_json())
else:
updates['extra']['device_metadata'] = None
flavor = updates.pop('flavor', None)
if flavor:
expected_attrs.append('flavor')
old = ((self.obj_attr_is_set('old_flavor') and
self.old_flavor) and
self.old_flavor.obj_to_primitive() or None)
new = ((self.obj_attr_is_set('new_flavor') and
self.new_flavor) and
self.new_flavor.obj_to_primitive() or None)
flavor_info = {
'cur': self.flavor.obj_to_primitive(),
'old': old,
'new': new,
}
self._nullify_flavor_description(flavor_info)
updates['extra']['flavor'] = jsonutils.dumps(flavor_info)
keypairs = updates.pop('keypairs', None)
if keypairs is not None:
expected_attrs.append('keypairs')
updates['extra']['keypairs'] = jsonutils.dumps(
keypairs.obj_to_primitive())
vcpu_model = updates.pop('vcpu_model', None)
expected_attrs.append('vcpu_model')
if vcpu_model:
updates['extra']['vcpu_model'] = (
jsonutils.dumps(vcpu_model.obj_to_primitive()))
else:
updates['extra']['vcpu_model'] = None
trusted_certs = updates.pop('trusted_certs', None)
expected_attrs.append('trusted_certs')
if trusted_certs:
updates['extra']['trusted_certs'] = jsonutils.dumps(
trusted_certs.obj_to_primitive())
else:
updates['extra']['trusted_certs'] = None
resources = updates.pop('resources', None)
expected_attrs.append('resources')
if resources:
updates['extra']['resources'] = jsonutils.dumps(
resources.obj_to_primitive())
else:
updates['extra']['resources'] = None
db_inst = db.instance_create(self._context, updates)
self._from_db_object(self._context, self, db_inst, expected_attrs)
# NOTE(danms): The EC2 ids are created on their first load. In order
# to avoid them being missing and having to be loaded later, we
# load them once here on create now that the instance record is
# created.
self._load_ec2_ids()
self.obj_reset_changes(['ec2_ids'])
@base.remotable
def destroy(self, hard_delete=False):
if not self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='destroy',
reason='already destroyed')
if not self.obj_attr_is_set('uuid'):
raise exception.ObjectActionError(action='destroy',
reason='no uuid')
if not self.obj_attr_is_set('host') or not self.host:
# NOTE(danms): If our host is not set, avoid a race
constraint = db.constraint(host=db.equal_any(None))
else:
constraint = None
try:
db_inst = db.instance_destroy(self._context, self.uuid,
constraint=constraint,
hard_delete=hard_delete)
self._from_db_object(self._context, self, db_inst)
except exception.ConstraintNotMet:
raise exception.ObjectActionError(action='destroy',
reason='host changed')
delattr(self, base.get_attrname('id'))
def _save_info_cache(self, context):
if self.info_cache:
with self.info_cache.obj_alternate_context(context):
self.info_cache.save()
# TODO(stephenfin): Remove this as it's related to nova-network
def _save_security_groups(self, context):
security_groups = self.security_groups or []
for secgroup in security_groups:
with secgroup.obj_alternate_context(context):
secgroup.save()
self.security_groups.obj_reset_changes()
def _save_fault(self, context):
# NOTE(danms): I don't think we need to worry about this, do we?
pass
def _save_pci_requests(self, context):
# TODO(danms): Unfortunately, extra.pci_requests is not a serialized
# PciRequests object (!), so we have to handle it specially here.
# That should definitely be fixed!
self._extra_values_to_save['pci_requests'] = (
self.pci_requests.to_json())
def _save_pci_devices(self, context):
# NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker
# permitted to update the DB. all change to devices from here will
# be dropped.
pass
def _save_tags(self, context):
# NOTE(gibi): tags are not saved through the instance
pass
def _save_services(self, context):
# NOTE(mriedem): services are not saved through the instance
pass
@staticmethod
def _nullify_flavor_description(flavor_info):
"""Helper method to nullify descriptions from a set of primitive
flavors.
Note that we don't remove the flavor description since that would
make the versioned notification FlavorPayload have to handle the field
not being set on the embedded instance.flavor.
:param dict: dict of primitive flavor objects where the values are the
flavors which get persisted in the instance_extra.flavor table.
"""
for flavor in flavor_info.values():
if flavor and 'description' in flavor['nova_object.data']:
flavor['nova_object.data']['description'] = None
def _save_flavor(self, context):
if not any([x in self.obj_what_changed() for x in
('flavor', 'old_flavor', 'new_flavor')]):
return
flavor_info = {
'cur': self.flavor.obj_to_primitive(),
'old': (self.old_flavor and
self.old_flavor.obj_to_primitive() or None),
'new': (self.new_flavor and
self.new_flavor.obj_to_primitive() or None),
}
self._nullify_flavor_description(flavor_info)
self._extra_values_to_save['flavor'] = jsonutils.dumps(flavor_info)
self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor'])
def _save_old_flavor(self, context):
if 'old_flavor' in self.obj_what_changed():
self._save_flavor(context)
def _save_new_flavor(self, context):
if 'new_flavor' in self.obj_what_changed():
self._save_flavor(context)
def _save_ec2_ids(self, context):
# NOTE(hanlind): Read-only so no need to save this.
pass
def _save_keypairs(self, context):
if 'keypairs' in self.obj_what_changed():
self._save_extra_generic('keypairs')
self.obj_reset_changes(['keypairs'], recursive=True)
def _save_extra_generic(self, field):
if field in self.obj_what_changed():
obj = getattr(self, field)
value = None
if obj is not None:
value = jsonutils.dumps(obj.obj_to_primitive())
self._extra_values_to_save[field] = value
# TODO(stephenfin): Remove the 'admin_state_reset' field in version 3.0 of
# the object
@base.remotable
def save(self, expected_vm_state=None,
expected_task_state=None, admin_state_reset=False):
"""Save updates to this instance
Column-wise updates will be made based on the result of
self.obj_what_changed(). If expected_task_state is provided,
it will be checked against the in-database copy of the
instance before updates are made.
:param expected_vm_state: Optional tuple of valid vm states
for the instance to be in
:param expected_task_state: Optional tuple of valid task states
for the instance to be in
:param admin_state_reset: True if admin API is forcing setting
of task_state/vm_state
"""
context = self._context
self._extra_values_to_save = {}
updates = {}
changes = self.obj_what_changed()
for field in self.fields:
# NOTE(danms): For object fields, we construct and call a
# helper method like self._save_$attrname()
if (self.obj_attr_is_set(field) and
isinstance(self.fields[field], fields.ObjectField)):
try:
getattr(self, '_save_%s' % field)(context)
except AttributeError:
if field in _INSTANCE_EXTRA_FIELDS:
self._save_extra_generic(field)
continue
LOG.exception('No save handler for %s', field,
instance=self)
except db_exc.DBReferenceError as exp:
if exp.key != 'instance_uuid':
raise
# NOTE(melwitt): This will happen if we instance.save()
# before an instance.create() and FK constraint fails.
# In practice, this occurs in cells during a delete of
# an unscheduled instance. Otherwise, it could happen
# as a result of bug.
raise exception.InstanceNotFound(instance_id=self.uuid)
elif field in changes:
updates[field] = self[field]
if self._extra_values_to_save:
db.instance_extra_update_by_uuid(context, self.uuid,
self._extra_values_to_save)
if not updates:
return
# Cleaned needs to be turned back into an int here
if 'cleaned' in updates:
if updates['cleaned']:
updates['cleaned'] = 1
else:
updates['cleaned'] = 0
if expected_task_state is not None:
updates['expected_task_state'] = expected_task_state
if expected_vm_state is not None:
updates['expected_vm_state'] = expected_vm_state
expected_attrs = [attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
if self.obj_attr_is_set(attr)]
if 'pci_devices' in expected_attrs:
# NOTE(danms): We don't refresh pci_devices on save right now
expected_attrs.remove('pci_devices')
# NOTE(alaski): We need to pull system_metadata for the
# notification.send_update() below. If we don't there's a KeyError
# when it tries to extract the flavor.
if 'system_metadata' not in expected_attrs:
expected_attrs.append('system_metadata')
old_ref, inst_ref = db.instance_update_and_get_original(
context, self.uuid, updates,
columns_to_join=_expected_cols(expected_attrs))
self._from_db_object(context, self, inst_ref,
expected_attrs=expected_attrs)
# NOTE(danms): We have to be super careful here not to trigger
# any lazy-loads that will unmigrate or unbackport something. So,
# make a copy of the instance for notifications first.
new_ref = self.obj_clone()
notifications.send_update(context, old_ref, new_ref)
self.obj_reset_changes()
@base.remotable
def refresh(self, use_slave=False):
extra = [field for field in INSTANCE_OPTIONAL_ATTRS
if self.obj_attr_is_set(field)]
current = self.__class__.get_by_uuid(self._context, uuid=self.uuid,
expected_attrs=extra,
use_slave=use_slave)
# NOTE(danms): We orphan the instance copy so we do not unexpectedly
# trigger a lazy-load (which would mean we failed to calculate the
# expected_attrs properly)
current._context = None
for field in self.fields:
if field not in self:
continue
if field not in current:
# If the field isn't in current we should not
# touch it, triggering a likely-recursive lazy load.
# Log it so we can see it happening though, as it
# probably isn't expected in most cases.
LOG.debug('Field %s is set but not in refreshed '
'instance, skipping', field)
continue
if field == 'info_cache':
self.info_cache.refresh()
elif self[field] != current[field]:
self[field] = current[field]
self.obj_reset_changes()
def _load_generic(self, attrname):
instance = self.__class__.get_by_uuid(self._context,
uuid=self.uuid,
expected_attrs=[attrname])
if attrname not in instance:
# NOTE(danms): Never allow us to recursively-load
raise exception.ObjectActionError(
action='obj_load_attr',
reason=_('loading %s requires recursion') % attrname)
# NOTE(danms): load anything we don't already have from the
# instance we got from the database to make the most of the
# performance hit.
for field in self.fields:
if field in instance and field not in self:
setattr(self, field, getattr(instance, field))
def _load_fault(self):
self.fault = objects.InstanceFault.get_latest_for_instance(
self._context, self.uuid)
def _load_numa_topology(self, db_topology=_NO_DATA_SENTINEL):
if db_topology is None:
self.numa_topology = None
elif db_topology is not _NO_DATA_SENTINEL:
self.numa_topology = \
objects.InstanceNUMATopology.obj_from_db_obj(self.uuid,
db_topology)
else:
try:
self.numa_topology = \
objects.InstanceNUMATopology.get_by_instance_uuid(
self._context, self.uuid)
except exception.NumaTopologyNotFound:
self.numa_topology = None
def _load_pci_requests(self, db_requests=_NO_DATA_SENTINEL):
if db_requests is not _NO_DATA_SENTINEL:
self.pci_requests = objects.InstancePCIRequests.obj_from_db(
self._context, self.uuid, db_requests)
else:
self.pci_requests = \
objects.InstancePCIRequests.get_by_instance_uuid(
self._context, self.uuid)
def _load_device_metadata(self, db_dev_meta=_NO_DATA_SENTINEL):
if db_dev_meta is None:
self.device_metadata = None
elif db_dev_meta is not _NO_DATA_SENTINEL:
self.device_metadata = \
objects.InstanceDeviceMetadata.obj_from_db(
self._context, db_dev_meta)
else:
self.device_metadata = \
objects.InstanceDeviceMetadata.get_by_instance_uuid(
self._context, self.uuid)
def _load_flavor(self):
instance = self.__class__.get_by_uuid(
self._context, uuid=self.uuid,
expected_attrs=['flavor'])
# NOTE(danms): Orphan the instance to make sure we don't lazy-load
# anything below
instance._context = None
self.flavor = instance.flavor
self.old_flavor = instance.old_flavor
self.new_flavor = instance.new_flavor
def _load_vcpu_model(self, db_vcpu_model=_NO_DATA_SENTINEL):
if db_vcpu_model is None:
self.vcpu_model = None
elif db_vcpu_model is _NO_DATA_SENTINEL:
self.vcpu_model = objects.VirtCPUModel.get_by_instance_uuid(
self._context, self.uuid)
else:
db_vcpu_model = jsonutils.loads(db_vcpu_model)
self.vcpu_model = objects.VirtCPUModel.obj_from_primitive(
db_vcpu_model)
def _load_ec2_ids(self):
self.ec2_ids = objects.EC2Ids.get_by_instance(self._context, self)
# TODO(stephenfin): Remove this as it's related to nova-network
def _load_security_groups(self):
self.security_groups = objects.SecurityGroupList.get_by_instance(
self._context, self)
def _load_pci_devices(self):
self.pci_devices = objects.PciDeviceList.get_by_instance_uuid(
self._context, self.uuid)
def _load_migration_context(self, db_context=_NO_DATA_SENTINEL):
if db_context is _NO_DATA_SENTINEL:
try:
self.migration_context = (
objects.MigrationContext.get_by_instance_uuid(
self._context, self.uuid))
except exception.MigrationContextNotFound:
self.migration_context = None
elif db_context is None:
self.migration_context = None
else:
self.migration_context = objects.MigrationContext.obj_from_db_obj(
db_context)
def _load_keypairs(self, db_keypairs=_NO_DATA_SENTINEL):
if db_keypairs is _NO_DATA_SENTINEL:
inst = objects.Instance.get_by_uuid(self._context, self.uuid,
expected_attrs=['keypairs'])
if 'keypairs' in inst:
self.keypairs = inst.keypairs
self.keypairs.obj_reset_changes(recursive=True)
self.obj_reset_changes(['keypairs'])
else:
self.keypairs = objects.KeyPairList(objects=[])
# NOTE(danms): We leave the keypairs attribute dirty in hopes
# someone else will save it for us
elif db_keypairs:
self.keypairs = objects.KeyPairList.obj_from_primitive(
jsonutils.loads(db_keypairs))
self.obj_reset_changes(['keypairs'])
def _load_tags(self):
self.tags = objects.TagList.get_by_resource_id(
self._context, self.uuid)
def _load_trusted_certs(self, db_trusted_certs=_NO_DATA_SENTINEL):
if db_trusted_certs is None:
self.trusted_certs = None
elif db_trusted_certs is _NO_DATA_SENTINEL:
self.trusted_certs = objects.TrustedCerts.get_by_instance_uuid(
self._context, self.uuid)
else:
self.trusted_certs = objects.TrustedCerts.obj_from_primitive(
jsonutils.loads(db_trusted_certs))
def _load_resources(self, db_resources=_NO_DATA_SENTINEL):
if db_resources is None:
self.resources = None
elif db_resources is _NO_DATA_SENTINEL:
self.resources = objects.ResourceList.get_by_instance_uuid(
self._context, self.uuid)
else:
self.resources = objects.ResourceList.obj_from_primitive(
jsonutils.loads(db_resources))
def apply_migration_context(self):
if self.migration_context:
self._set_migration_context_to_instance(prefix='new_')
else:
LOG.debug("Trying to apply a migration context that does not "
"seem to be set for this instance", instance=self)
def revert_migration_context(self):
if self.migration_context:
self._set_migration_context_to_instance(prefix='old_')
else:
LOG.debug("Trying to revert a migration context that does not "
"seem to be set for this instance", instance=self)
def _set_migration_context_to_instance(self, prefix):
for inst_attr_name in _MIGRATION_CONTEXT_ATTRS:
setattr(self, inst_attr_name, None)
attr_name = prefix + inst_attr_name
if attr_name in self.migration_context:
attr_value = getattr(
self.migration_context, attr_name)
setattr(self, inst_attr_name, attr_value)
@contextlib.contextmanager
def mutated_migration_context(self):
"""Context manager to temporarily apply the migration context.
Calling .save() from within the context manager means that the mutated
context will be saved which can cause incorrect resource tracking, and
should be avoided.
"""
# First check to see if we even have a migration context set and if not
# we can exit early without lazy-loading other attributes.
if 'migration_context' in self and self.migration_context is None:
yield
return
current_values = {}
for attr_name in _MIGRATION_CONTEXT_ATTRS:
current_values[attr_name] = getattr(self, attr_name)
self.apply_migration_context()
try:
yield
finally:
for attr_name in _MIGRATION_CONTEXT_ATTRS:
setattr(self, attr_name, current_values[attr_name])
@base.remotable
def drop_migration_context(self):
if self.migration_context:
db.instance_extra_update_by_uuid(self._context, self.uuid,
{'migration_context': None})
self.migration_context = None
def clear_numa_topology(self):
numa_topology = self.numa_topology
if numa_topology is not None:
self.numa_topology = numa_topology.clear_host_pinning()
def obj_load_attr(self, attrname):
# NOTE(danms): We can't lazy-load anything without a context and a uuid
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
if 'uuid' not in self:
raise exception.ObjectActionError(
action='obj_load_attr',
reason=_('attribute %s not lazy-loadable') % attrname)
LOG.debug("Lazy-loading '%(attr)s' on %(name)s uuid %(uuid)s",
{'attr': attrname,
'name': self.obj_name(),
'uuid': self.uuid,
})
with utils.temporary_mutation(self._context, read_deleted='yes'):
self._obj_load_attr(attrname)
def _obj_load_attr(self, attrname):
"""Internal method for loading attributes from instances.
NOTE: Do not use this directly.
This method contains the implementation of lazy-loading attributes
from Instance object, minus some massaging of the context and
error-checking. This should always be called with the object-local
context set for reading deleted instances and with uuid set. All
of the code below depends on those two things. Thus, this should
only be called from obj_load_attr() itself.
:param attrname: The name of the attribute to be loaded
"""
# NOTE(danms): We handle some fields differently here so that we
# can be more efficient
if attrname == 'fault':
self._load_fault()
elif attrname == 'numa_topology':
self._load_numa_topology()
elif attrname == 'device_metadata':
self._load_device_metadata()
elif attrname == 'pci_requests':
self._load_pci_requests()
elif attrname == 'vcpu_model':
self._load_vcpu_model()
elif attrname == 'ec2_ids':
self._load_ec2_ids()
elif attrname == 'migration_context':
self._load_migration_context()
elif attrname == 'keypairs':
# NOTE(danms): Let keypairs control its own destiny for
# resetting changes.
return self._load_keypairs()
elif attrname == 'trusted_certs':
return self._load_trusted_certs()
elif attrname == 'resources':
return self._load_resources()
elif attrname == 'security_groups':
self._load_security_groups()
elif attrname == 'pci_devices':
self._load_pci_devices()
elif 'flavor' in attrname:
self._load_flavor()
elif attrname == 'services' and self.deleted:
# NOTE(mriedem): The join in the data model for instances.services
# filters on instances.deleted == 0, so if the instance is deleted
# don't attempt to even load services since we'll fail.
self.services = objects.ServiceList(self._context)
elif attrname == 'tags':
if self.deleted:
# NOTE(mriedem): Same story as services, the DB API query
# in instance_tag_get_by_instance_uuid will fail if the
# instance has been deleted so just return an empty tag list.
self.tags = objects.TagList(self._context)
else:
self._load_tags()
elif attrname in self.fields and attrname != 'id':
# NOTE(danms): We've never let 'id' be lazy-loaded, and use its
# absence as a sentinel that it hasn't been created in the database
# yet, so refuse to do so here.
self._load_generic(attrname)
else:
# NOTE(danms): This is historically what we did for
# something not in a field that was force-loaded. So, just
# do this for consistency.
raise exception.ObjectActionError(
action='obj_load_attr',
reason=_('attribute %s not lazy-loadable') % attrname)
self.obj_reset_changes([attrname])
def get_flavor(self, namespace=None):
prefix = ('%s_' % namespace) if namespace is not None else ''
attr = '%sflavor' % prefix
try:
return getattr(self, attr)
except exception.FlavorNotFound:
# NOTE(danms): This only happens in the case where we don't
# have flavor information in instance_extra, and doing
# this triggers a lookup based on our instance_type_id for
# (very) legacy instances. That legacy code expects a None here,
# so emulate it for this helper, even though the actual attribute
# is not nullable.
return None
@base.remotable
def delete_metadata_key(self, key):
"""Optimized metadata delete method.
This provides a more efficient way to delete a single metadata
key, instead of just calling instance.save(). This should be called
with the key still present in self.metadata, which it will update
after completion.
"""
db.instance_metadata_delete(self._context, self.uuid, key)
md_was_changed = 'metadata' in self.obj_what_changed()
del self.metadata[key]
self._orig_metadata.pop(key, None)
notifications.send_update(self._context, self, self)
if not md_was_changed:
self.obj_reset_changes(['metadata'])
def get_network_info(self):
if self.info_cache is None:
return network_model.NetworkInfo.hydrate([])
return self.info_cache.network_info
def get_bdms(self):
return objects.BlockDeviceMappingList.get_by_instance_uuid(
self._context, self.uuid)
def _make_instance_list(context, inst_list, db_inst_list, expected_attrs):
get_fault = expected_attrs and 'fault' in expected_attrs
inst_faults = {}
if get_fault:
# Build an instance_uuid:latest-fault mapping
expected_attrs.remove('fault')
instance_uuids = [inst['uuid'] for inst in db_inst_list]
faults = objects.InstanceFaultList.get_by_instance_uuids(
context, instance_uuids)
for fault in faults:
if fault.instance_uuid not in inst_faults:
inst_faults[fault.instance_uuid] = fault
inst_cls = objects.Instance
inst_list.objects = []
for db_inst in db_inst_list:
inst_obj = inst_cls._from_db_object(
context, inst_cls(context), db_inst,
expected_attrs=expected_attrs)
if get_fault:
inst_obj.fault = inst_faults.get(inst_obj.uuid, None)
inst_list.objects.append(inst_obj)
inst_list.obj_reset_changes()
return inst_list
@db_api.pick_context_manager_writer
def populate_missing_availability_zones(context, count):
# instances without host have no reasonable AZ to set
not_empty_host = models.Instance.host != None # noqa E711
instances = (context.session.query(models.Instance).
filter(not_empty_host).
filter_by(availability_zone=None).limit(count).all())
count_all = len(instances)
count_hit = 0
for instance in instances:
az = avail_zone.get_instance_availability_zone(context, instance)
instance.availability_zone = az
instance.save(context.session)
count_hit += 1
return count_all, count_hit
@base.NovaObjectRegistry.register
class InstanceList(base.ObjectListBase, base.NovaObject):
# Version 2.0: Initial Version
# Version 2.1: Add get_uuids_by_host()
# Version 2.2: Pagination for get_active_by_window_joined()
# Version 2.3: Add get_count_by_vm_state()
# Version 2.4: Add get_counts()
# Version 2.5: Add get_uuids_by_host_and_node()
# Version 2.6: Add get_uuids_by_hosts()
VERSION = '2.6'
fields = {
'objects': fields.ListOfObjectsField('Instance'),
}
@classmethod
@db.select_db_reader_mode
def _get_by_filters_impl(cls, context, filters,
sort_key='created_at', sort_dir='desc', limit=None,
marker=None, expected_attrs=None, use_slave=False,
sort_keys=None, sort_dirs=None):
if sort_keys or sort_dirs:
db_inst_list = db.instance_get_all_by_filters_sort(
context, filters, limit=limit, marker=marker,
columns_to_join=_expected_cols(expected_attrs),
sort_keys=sort_keys, sort_dirs=sort_dirs)
else:
db_inst_list = db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir, limit=limit,
marker=marker, columns_to_join=_expected_cols(expected_attrs))
return db_inst_list
@base.remotable_classmethod
def get_by_filters(cls, context, filters,
sort_key='created_at', sort_dir='desc', limit=None,
marker=None, expected_attrs=None, use_slave=False,
sort_keys=None, sort_dirs=None):
db_inst_list = cls._get_by_filters_impl(
context, filters, sort_key=sort_key, sort_dir=sort_dir,
limit=limit, marker=marker, expected_attrs=expected_attrs,
use_slave=use_slave, sort_keys=sort_keys, sort_dirs=sort_dirs)
# NOTE(melwitt): _make_instance_list could result in joined objects'
# (from expected_attrs) _from_db_object methods being called during
# Instance._from_db_object, each of which might choose to perform
# database writes. So, we call this outside of _get_by_filters_impl to
# avoid being nested inside a 'reader' database transaction context.
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@staticmethod
@db.select_db_reader_mode
def _db_instance_get_all_by_host(context, host, columns_to_join,
use_slave=False):
return db.instance_get_all_by_host(context, host,
columns_to_join=columns_to_join)
@base.remotable_classmethod
def get_by_host(cls, context, host, expected_attrs=None, use_slave=False):
db_inst_list = cls._db_instance_get_all_by_host(
context, host, columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host_and_node(cls, context, host, node, expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_node(
context, host, node,
columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@staticmethod
@db_api.pick_context_manager_reader
def _get_uuids_by_host_and_node(context, host, node):
return context.session.query(
models.Instance.uuid).filter_by(
host=host).filter_by(node=node).filter_by(deleted=0).all()
@base.remotable_classmethod
def get_uuids_by_host_and_node(cls, context, host, node):
"""Return non-deleted instance UUIDs for the given host and node.
:param context: nova auth request context
:param host: Filter instances on this host.
:param node: Filter instances on this node.
:returns: list of non-deleted instance UUIDs on the given host and node
"""
return cls._get_uuids_by_host_and_node(context, host, node)
@base.remotable_classmethod
def get_by_host_and_not_type(cls, context, host, type_id=None,
expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_not_type(
context, host, type_id=type_id)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_all(cls, context, expected_attrs=None):
"""Returns all instances on all nodes."""
db_instances = db.instance_get_all(
context, columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_instances,
expected_attrs)
@base.remotable_classmethod
def get_hung_in_rebooting(cls, context, reboot_window,
expected_attrs=None):
db_inst_list = db.instance_get_all_hung_in_rebooting(context,
reboot_window)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@staticmethod
@db.select_db_reader_mode
def _db_instance_get_active_by_window_joined(
context, begin, end, project_id, host, columns_to_join,
use_slave=False, limit=None, marker=None):
return db.instance_get_active_by_window_joined(
context, begin, end, project_id, host,
columns_to_join=columns_to_join, limit=limit, marker=marker)
@base.remotable_classmethod
def _get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
expected_attrs=None, use_slave=False,
limit=None, marker=None):
# NOTE(mriedem): We need to convert the begin/end timestamp strings
# to timezone-aware datetime objects for the DB API call.
begin = timeutils.parse_isotime(begin)
end = timeutils.parse_isotime(end) if end else None
db_inst_list = cls._db_instance_get_active_by_window_joined(
context, begin, end, project_id, host,
columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave, limit=limit, marker=marker)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@classmethod
def get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
expected_attrs=None, use_slave=False,
limit=None, marker=None):
"""Get instances and joins active during a certain time window.
:param:context: nova request context
:param:begin: datetime for the start of the time window
:param:end: datetime for the end of the time window
:param:project_id: used to filter instances by project
:param:host: used to filter instances on a given compute host
:param:expected_attrs: list of related fields that can be joined
in the database layer when querying for instances
:param use_slave if True, ship this query off to a DB slave
:param limit: maximum number of instances to return per page
:param marker: last instance uuid from the previous page
:returns: InstanceList
"""
# NOTE(mriedem): We have to convert the datetime objects to string
# primitives for the remote call.
begin = utils.isotime(begin)
end = utils.isotime(end) if end else None
return cls._get_active_by_window_joined(context, begin, end,
project_id, host,
expected_attrs,
use_slave=use_slave,
limit=limit, marker=marker)
# TODO(stephenfin): Remove this as it's related to nova-network
@base.remotable_classmethod
def get_by_security_group_id(cls, context, security_group_id):
db_secgroup = db.security_group_get(
context, security_group_id,
columns_to_join=['instances.info_cache',
'instances.system_metadata'])
return _make_instance_list(context, cls(), db_secgroup['instances'],
['info_cache', 'system_metadata'])
# TODO(stephenfin): Remove this as it's related to nova-network
@classmethod
def get_by_security_group(cls, context, security_group):
return cls.get_by_security_group_id(context, security_group.id)
# TODO(stephenfin): Remove this as it's related to nova-network
@base.remotable_classmethod
def get_by_grantee_security_group_ids(cls, context, security_group_ids):
raise NotImplementedError()
def fill_faults(self):
"""Batch query the database for our instances' faults.
:returns: A list of instance uuids for which faults were found.
"""
uuids = [inst.uuid for inst in self]
faults = objects.InstanceFaultList.get_latest_by_instance_uuids(
self._context, uuids)
faults_by_uuid = {}
for fault in faults:
faults_by_uuid[fault.instance_uuid] = fault
for instance in self:
if instance.uuid in faults_by_uuid:
instance.fault = faults_by_uuid[instance.uuid]
else:
# NOTE(danms): Otherwise the caller will cause a lazy-load
# when checking it, and we know there are none
instance.fault = None
instance.obj_reset_changes(['fault'])
return faults_by_uuid.keys()
@base.remotable_classmethod
def get_uuids_by_host(cls, context, host):
return db.instance_get_all_uuids_by_hosts(context, [host])[host]
@base.remotable_classmethod
def get_uuids_by_hosts(cls, context, hosts):
"""Returns a dict, keyed by hypervisor hostname, of a list of instance
UUIDs associated with that compute node.
"""
return db.instance_get_all_uuids_by_hosts(context, hosts)
@staticmethod
@db_api.pick_context_manager_reader
def _get_count_by_vm_state_in_db(context, project_id, user_id, vm_state):
return context.session.query(models.Instance.id).\
filter_by(deleted=0).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
filter_by(vm_state=vm_state).\
count()
@base.remotable_classmethod
def get_count_by_vm_state(cls, context, project_id, user_id, vm_state):
return cls._get_count_by_vm_state_in_db(context, project_id, user_id,
vm_state)
@staticmethod
@db_api.pick_context_manager_reader
def _get_counts_in_db(context, project_id, user_id=None):
# NOTE(melwitt): Copied from nova/db/sqlalchemy/api.py:
# It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround.
not_soft_deleted = or_(
models.Instance.vm_state != vm_states.SOFT_DELETED,
models.Instance.vm_state == null()
)
project_query = context.session.query(
func.count(models.Instance.id),
func.sum(models.Instance.vcpus),
func.sum(models.Instance.memory_mb)).\
filter_by(deleted=0).\
filter(not_soft_deleted).\
filter_by(project_id=project_id)
# NOTE(mriedem): Filter out hidden instances since there should be a
# non-hidden version of the instance in another cell database and the
# API will only show one of them, so we don't count the hidden copy.
project_query = project_query.filter(
or_(models.Instance.hidden == false(),
models.Instance.hidden == null()))
project_result = project_query.first()
fields = ('instances', 'cores', 'ram')
project_counts = {field: int(project_result[idx] or 0)
for idx, field in enumerate(fields)}
counts = {'project': project_counts}
if user_id:
user_result = project_query.filter_by(user_id=user_id).first()
user_counts = {field: int(user_result[idx] or 0)
for idx, field in enumerate(fields)}
counts['user'] = user_counts
return counts
@base.remotable_classmethod
def get_counts(cls, context, project_id, user_id=None):
"""Get the counts of Instance objects in the database.
:param context: The request context for database access
:param project_id: The project_id to count across
:param user_id: The user_id to count across
:returns: A dict containing the project-scoped counts and user-scoped
counts if user_id is specified. For example:
{'project': {'instances': <count across project>,
'cores': <count across project>,
'ram': <count across project},
'user': {'instances': <count across user>,
'cores': <count across user>,
'ram': <count across user>}}
"""
return cls._get_counts_in_db(context, project_id, user_id=user_id)
@staticmethod
@db_api.pick_context_manager_reader
def _get_count_by_hosts(context, hosts):
return context.session.query(models.Instance).\
filter_by(deleted=0).\
filter(models.Instance.host.in_(hosts)).count()
@classmethod
def get_count_by_hosts(cls, context, hosts):
return cls._get_count_by_hosts(context, hosts)
| [
"[email protected]"
]
| |
f7fff0ac356294d06420d93470eddbf0fdae1747 | b78721fca486f8cc5e486c50c98218fef5453215 | /ders_06_fonksiyonlar_01/parametresiz_fonk_01.py | 5292c7d4f57255af1049abf9f13b1abf4b2b4317 | []
| no_license | lakadirgeldi57/Bilgisayar-Bilimi-Python-Dersleri | bc8a37fc2a15d3bec2538a2c999434d53876b9bd | 253e2c1498ff86d9a3c13cc3d2bdc74278c41938 | refs/heads/master | 2020-03-12T00:58:00.020432 | 2018-04-02T08:48:32 | 2018-04-02T08:48:32 | 130,363,017 | 0 | 0 | null | 2018-04-20T13:04:19 | 2018-04-20T13:04:18 | null | UTF-8 | Python | false | false | 109 | py | from random import random
random()
print(random()*10)
#random(20) #random fonksiyonu parametresiz çalışır | [
"[email protected]"
]
| |
b406e80149521103532381d0ae26b036733a82c3 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/unfett.py | 08fb69846efbb7ebacd195fa5a2e1e3f7091d4d0 | []
| no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 314 | py | ii = [('CookGHP3.py', 1), ('MarrFDI.py', 1), ('WilbRLW4.py', 1), ('CookGHP.py', 1), ('WilkJMC3.py', 1), ('AubePRP.py', 1), ('ChalTPW2.py', 3), ('ClarGE2.py', 1), ('GilmCRS.py', 2), ('DibdTRL2.py', 1), ('WadeJEB.py', 1), ('NewmJLP.py', 2), ('BabbCRD.py', 1), ('ClarGE3.py', 1), ('ChalTPW.py', 1), ('KeigTSS.py', 1)] | [
"[email protected]"
]
| |
b76ff91f6f8b759a8badf1e850fa18b4717619a1 | 7d122748fb075ffe16e82e3616cf5e5b60dee5bb | /custom/plm_date_bom-11.0.1.1/plm_date_bom/extended_class/mrp_bom_extension.py | ca28d1564b38428586c80c1d1071c319df543794 | []
| no_license | kulius/odoo11_uw | 95cd3b9cfdb18676e61d3565901f8ded0ee537d3 | a6f950a4c05c90ac5f53c1602ac2cda33faf41ee | refs/heads/master | 2021-08-07T07:53:15.585825 | 2018-07-23T03:33:20 | 2018-07-23T03:33:20 | 131,130,935 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 7,842 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OmniaSolutions, Your own solutions
# Copyright (C) 2010 OmniaSolutions (<http://omniasolutions.eu>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
'''
Created on 18 Jul 2016
@author: Daniel Smerghetto
'''
import logging
from odoo import models
from odoo import fields
from odoo import api
from odoo import _
from odoo.exceptions import UserError
class mrp_bom_extension_data(models.Model):
_name = 'mrp.bom'
_inherit = 'mrp.bom'
@api.multi
def _obsolete_compute(self):
'''
Verify if obsolete lines are present in current bom
'''
for bomObj in self:
obsoleteFlag = False
for bomLine in bomObj.bom_line_ids:
if bomLine.product_id.state == 'obsoleted':
obsoleteFlag = True
break
bomObj.sudo().obsolete_presents = obsoleteFlag
bomObj.sudo().write({'obsolete_presents': obsoleteFlag}) # don't remove this force write or when form is opened the value is not updated
# If store = True is set you need to provide @api.depends because odoo has to know when to compute that field.
# If you decide to compute that field each time without store you have always to put it in the view or the field will not be computed
obsolete_presents_computed = fields.Boolean(string=_("Obsolete presents computed"), compute='_obsolete_compute')
obsolete_presents = fields.Boolean(_("Obsolete presents"))
@api.onchange('bom_line_ids')
def onchangeBomLine(self):
self._obsolete_compute()
@api.multi
def action_wizard_compute_bom(self):
return {
'domain': [],
'name': _('Bom Computation Type'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'plm.temporary_date_compute',
'type': 'ir.actions.act_window',
'target': 'new',
}
@api.multi
def showAllBomsToCompute(self):
outLines = []
def recursion(bomBrwsList):
for bomBrws in bomBrwsList:
for bomLineBrws in bomBrws.bom_line_ids:
templateBrws = bomLineBrws.product_id.product_tmpl_id
bomIds = self.getBomFromTemplate(templateBrws, 'normal')
recursion(bomIds)
if not templateBrws:
logging.warning('Product %s is not related to a product template.' % (bomLineBrws.product_id.id))
continue
if templateBrws.state == 'obsoleted':
outLines.append(bomBrws.id)
recursion(self)
outLines = list(set(outLines))
return {
'type': 'ir.actions.act_window',
'name': _('Product Engineering'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'mrp.bom',
'domain': [('id', 'in', outLines)],
}
def getBomFromTemplate(self, prodTmplBrws, bomType):
'''
Return bom object from product template and bom type
'''
return self.search([('product_tmpl_id', '=', prodTmplBrws.id), ('type', '=', bomType)])
class mrp_bom_data_compute(models.Model):
_name = 'plm.temporary_date_compute'
compute_type = fields.Selection([
('update', _('Update Bom replacing obsoleted bom lines with components at the latest revision.')),
('new_bom', _('Create new bom using last revision of all components.'))
],
_('Compute Type'),
required=True)
@api.multi
def action_compute_bom(self):
'''
Divide due to choosen operation
'''
bomIds = self.env.context.get('active_ids', []) # Surely one record a time arrive here because comes from xml
if self.compute_type == 'update':
self.updateObsoleteBom(bomIds)
elif self.compute_type == 'new_bom':
self.copyObsoleteBom(bomIds)
else:
raise _('You must select at least one option!')
def updateObsoleteBom(self, bomIds=[], recursive=False):
'''
Update all obsoleted bom lines with last released product
'''
bomObj = self.env['mrp.bom']
prodProdObj = self.env['product.product']
for bomBrws in bomObj.browse(bomIds):
if bomBrws.type != 'normal':
raise UserError(_('This functionality is avaible only for normal bom.'))
for bomLineBrws in bomBrws.bom_line_ids:
templateBrws = bomLineBrws.product_id.product_tmpl_id
if recursive:
bomIds = bomObj.getBomFromTemplate(templateBrws, 'normal').ids
self.updateObsoleteBom(bomIds)
if not templateBrws:
logging.warning('Product %s is not related to a product template.' % (bomLineBrws.product_id.id))
continue
if templateBrws.state == 'obsoleted':
eng_code = templateBrws.engineering_code
prodProdBrws = prodProdObj.search([('engineering_code', '=', eng_code)], order='engineering_revision DESC', limit=1)
for prodBrws in prodProdBrws:
bomLineBrws.product_id = prodBrws
if recursive:
# Check if new added product has boms
self.updateObsoleteBom(prodBrws.product_tmpl_id.bom_ids.ids)
bomBrws._obsolete_compute()
return {
'type': 'ir.actions.act_window',
'name': _('Product Engineering'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mrp.bom',
'domain': [('id', 'in', bomIds)],
}
def copyObsoleteBom(self, bomIds=[]):
'''
Copy current bom containing obsoleted components and update the copy with the last product revisions
'''
bomObject = self.env['mrp.bom']
for bomId in bomIds:
newBomBrws = bomObject.browse(bomId).copy()
self.updateObsoleteBom(newBomBrws.ids)
bomObject.browse(bomIds).write({'active': False})
return {
'type': 'ir.actions.act_window',
'name': _('Product Engineering'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mrp.bom',
'domain': [('id', 'in', newBomBrws.id)],
}
class bom_line_obsoleted_extension(models.Model):
_name = 'mrp.bom.line'
_inherit = 'mrp.bom.line'
@api.onchange('state')
def onchange_line_state(self):
'''
Force update flag every time bom line state changes
'''
for bomLineObj in self:
bomBrws = bomLineObj.bom_id
bomBrws._obsolete_compute()
| [
"[email protected]"
]
| |
f35f28d00f30430c9bed83d19a5e8c63c8ceee27 | fab14fae2b494068aa793901d76464afb965df7e | /benchmarks/ltl_maxplus/f3/maxplus_20_96.py | 2290b13126da73730e698908abfb46342a0f1f39 | [
"MIT"
]
| permissive | teodorov/F3 | 673f6f9ccc25acdfdecbfc180f439253474ba250 | c863215c318d7d5f258eb9be38c6962cf6863b52 | refs/heads/master | 2023-08-04T17:37:38.771863 | 2021-09-16T07:38:28 | 2021-09-16T07:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43,733 | py |
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_true, msat_make_false
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type
from mathsat import msat_make_and as _msat_make_and
from mathsat import msat_make_or as _msat_make_or
from mathsat import msat_make_not
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
def msat_make_and(menv: msat_env, *args):
if len(args) == 0:
return msat_make_true(menv)
if len(args) == 1:
return args[0]
res = _msat_make_and(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_and(menv, res, arg)
return res
def msat_make_or(menv: msat_env, *args):
if len(args) == 0:
return msat_make_false(menv)
if len(args) == 1:
return args[0]
res = _msat_make_or(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_or(menv, res, arg)
return res
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_m1 = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, n_m1)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
names = ["x_0", "x_1", "x_2", "x_3", "x_4", "x_5", "x_6", "x_7", "x_8", "x_9", "x_10", "x_11", "x_12", "x_13", "x_14", "x_15", "x_16", "x_17", "x_18", "x_19"]
xs = [msat_declare_function(menv, name, real_type)
for name in names]
xs = [msat_make_constant(menv, x) for x in xs]
x_xs = [msat_declare_function(menv, name_next(name), real_type)
for name in names]
x_xs = [msat_make_constant(menv, x_x) for x_x in x_xs]
curr2next = {x: x_x for x, x_x in zip(xs, x_xs)}
n_10_0 = msat_make_number(menv, "10.0")
n_11_0 = msat_make_number(menv, "11.0")
n_12_0 = msat_make_number(menv, "12.0")
n_13_0 = msat_make_number(menv, "13.0")
n_14_0 = msat_make_number(menv, "14.0")
n_15_0 = msat_make_number(menv, "15.0")
n_16_0 = msat_make_number(menv, "16.0")
n_17_0 = msat_make_number(menv, "17.0")
n_18_0 = msat_make_number(menv, "18.0")
n_19_0 = msat_make_number(menv, "19.0")
n_1_0 = msat_make_number(menv, "1.0")
n_20_0 = msat_make_number(menv, "20.0")
n_2_0 = msat_make_number(menv, "2.0")
n_3_0 = msat_make_number(menv, "3.0")
n_4_0 = msat_make_number(menv, "4.0")
n_5_0 = msat_make_number(menv, "5.0")
n_6_0 = msat_make_number(menv, "6.0")
n_7_0 = msat_make_number(menv, "7.0")
n_8_0 = msat_make_number(menv, "8.0")
n_9_0 = msat_make_number(menv, "9.0")
init = msat_make_true(menv)
trans = msat_make_true(menv)
# transitions
expr0 = msat_make_plus(menv, xs[0], n_2_0)
expr1 = msat_make_plus(menv, xs[1], n_4_0)
expr2 = msat_make_plus(menv, xs[3], n_3_0)
expr3 = msat_make_plus(menv, xs[8], n_8_0)
expr4 = msat_make_plus(menv, xs[9], n_14_0)
expr5 = msat_make_plus(menv, xs[10], n_15_0)
expr6 = msat_make_plus(menv, xs[15], n_2_0)
expr7 = msat_make_plus(menv, xs[16], n_3_0)
expr8 = msat_make_plus(menv, xs[17], n_17_0)
expr9 = msat_make_plus(menv, xs[18], n_1_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[0], expr0),
msat_make_geq(menv, x_xs[0], expr1),
msat_make_geq(menv, x_xs[0], expr2),
msat_make_geq(menv, x_xs[0], expr3),
msat_make_geq(menv, x_xs[0], expr4),
msat_make_geq(menv, x_xs[0], expr5),
msat_make_geq(menv, x_xs[0], expr6),
msat_make_geq(menv, x_xs[0], expr7),
msat_make_geq(menv, x_xs[0], expr8),
msat_make_geq(menv, x_xs[0], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[0], expr0),
msat_make_equal(menv, x_xs[0], expr1),
msat_make_equal(menv, x_xs[0], expr2),
msat_make_equal(menv, x_xs[0], expr3),
msat_make_equal(menv, x_xs[0], expr4),
msat_make_equal(menv, x_xs[0], expr5),
msat_make_equal(menv, x_xs[0], expr6),
msat_make_equal(menv, x_xs[0], expr7),
msat_make_equal(menv, x_xs[0], expr8),
msat_make_equal(menv, x_xs[0], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_19_0)
expr1 = msat_make_plus(menv, xs[2], n_18_0)
expr2 = msat_make_plus(menv, xs[3], n_13_0)
expr3 = msat_make_plus(menv, xs[8], n_10_0)
expr4 = msat_make_plus(menv, xs[9], n_16_0)
expr5 = msat_make_plus(menv, xs[11], n_16_0)
expr6 = msat_make_plus(menv, xs[12], n_10_0)
expr7 = msat_make_plus(menv, xs[13], n_10_0)
expr8 = msat_make_plus(menv, xs[16], n_7_0)
expr9 = msat_make_plus(menv, xs[19], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[1], expr0),
msat_make_geq(menv, x_xs[1], expr1),
msat_make_geq(menv, x_xs[1], expr2),
msat_make_geq(menv, x_xs[1], expr3),
msat_make_geq(menv, x_xs[1], expr4),
msat_make_geq(menv, x_xs[1], expr5),
msat_make_geq(menv, x_xs[1], expr6),
msat_make_geq(menv, x_xs[1], expr7),
msat_make_geq(menv, x_xs[1], expr8),
msat_make_geq(menv, x_xs[1], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[1], expr0),
msat_make_equal(menv, x_xs[1], expr1),
msat_make_equal(menv, x_xs[1], expr2),
msat_make_equal(menv, x_xs[1], expr3),
msat_make_equal(menv, x_xs[1], expr4),
msat_make_equal(menv, x_xs[1], expr5),
msat_make_equal(menv, x_xs[1], expr6),
msat_make_equal(menv, x_xs[1], expr7),
msat_make_equal(menv, x_xs[1], expr8),
msat_make_equal(menv, x_xs[1], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_18_0)
expr1 = msat_make_plus(menv, xs[1], n_17_0)
expr2 = msat_make_plus(menv, xs[5], n_9_0)
expr3 = msat_make_plus(menv, xs[6], n_10_0)
expr4 = msat_make_plus(menv, xs[7], n_7_0)
expr5 = msat_make_plus(menv, xs[9], n_15_0)
expr6 = msat_make_plus(menv, xs[11], n_9_0)
expr7 = msat_make_plus(menv, xs[12], n_1_0)
expr8 = msat_make_plus(menv, xs[13], n_5_0)
expr9 = msat_make_plus(menv, xs[14], n_15_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[2], expr0),
msat_make_geq(menv, x_xs[2], expr1),
msat_make_geq(menv, x_xs[2], expr2),
msat_make_geq(menv, x_xs[2], expr3),
msat_make_geq(menv, x_xs[2], expr4),
msat_make_geq(menv, x_xs[2], expr5),
msat_make_geq(menv, x_xs[2], expr6),
msat_make_geq(menv, x_xs[2], expr7),
msat_make_geq(menv, x_xs[2], expr8),
msat_make_geq(menv, x_xs[2], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[2], expr0),
msat_make_equal(menv, x_xs[2], expr1),
msat_make_equal(menv, x_xs[2], expr2),
msat_make_equal(menv, x_xs[2], expr3),
msat_make_equal(menv, x_xs[2], expr4),
msat_make_equal(menv, x_xs[2], expr5),
msat_make_equal(menv, x_xs[2], expr6),
msat_make_equal(menv, x_xs[2], expr7),
msat_make_equal(menv, x_xs[2], expr8),
msat_make_equal(menv, x_xs[2], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_19_0)
expr1 = msat_make_plus(menv, xs[1], n_6_0)
expr2 = msat_make_plus(menv, xs[2], n_11_0)
expr3 = msat_make_plus(menv, xs[6], n_4_0)
expr4 = msat_make_plus(menv, xs[7], n_9_0)
expr5 = msat_make_plus(menv, xs[8], n_3_0)
expr6 = msat_make_plus(menv, xs[12], n_6_0)
expr7 = msat_make_plus(menv, xs[16], n_6_0)
expr8 = msat_make_plus(menv, xs[17], n_4_0)
expr9 = msat_make_plus(menv, xs[19], n_18_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[3], expr0),
msat_make_geq(menv, x_xs[3], expr1),
msat_make_geq(menv, x_xs[3], expr2),
msat_make_geq(menv, x_xs[3], expr3),
msat_make_geq(menv, x_xs[3], expr4),
msat_make_geq(menv, x_xs[3], expr5),
msat_make_geq(menv, x_xs[3], expr6),
msat_make_geq(menv, x_xs[3], expr7),
msat_make_geq(menv, x_xs[3], expr8),
msat_make_geq(menv, x_xs[3], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[3], expr0),
msat_make_equal(menv, x_xs[3], expr1),
msat_make_equal(menv, x_xs[3], expr2),
msat_make_equal(menv, x_xs[3], expr3),
msat_make_equal(menv, x_xs[3], expr4),
msat_make_equal(menv, x_xs[3], expr5),
msat_make_equal(menv, x_xs[3], expr6),
msat_make_equal(menv, x_xs[3], expr7),
msat_make_equal(menv, x_xs[3], expr8),
msat_make_equal(menv, x_xs[3], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_16_0)
expr1 = msat_make_plus(menv, xs[5], n_9_0)
expr2 = msat_make_plus(menv, xs[7], n_19_0)
expr3 = msat_make_plus(menv, xs[8], n_10_0)
expr4 = msat_make_plus(menv, xs[10], n_16_0)
expr5 = msat_make_plus(menv, xs[11], n_11_0)
expr6 = msat_make_plus(menv, xs[12], n_17_0)
expr7 = msat_make_plus(menv, xs[13], n_10_0)
expr8 = msat_make_plus(menv, xs[14], n_6_0)
expr9 = msat_make_plus(menv, xs[15], n_18_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[4], expr0),
msat_make_geq(menv, x_xs[4], expr1),
msat_make_geq(menv, x_xs[4], expr2),
msat_make_geq(menv, x_xs[4], expr3),
msat_make_geq(menv, x_xs[4], expr4),
msat_make_geq(menv, x_xs[4], expr5),
msat_make_geq(menv, x_xs[4], expr6),
msat_make_geq(menv, x_xs[4], expr7),
msat_make_geq(menv, x_xs[4], expr8),
msat_make_geq(menv, x_xs[4], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[4], expr0),
msat_make_equal(menv, x_xs[4], expr1),
msat_make_equal(menv, x_xs[4], expr2),
msat_make_equal(menv, x_xs[4], expr3),
msat_make_equal(menv, x_xs[4], expr4),
msat_make_equal(menv, x_xs[4], expr5),
msat_make_equal(menv, x_xs[4], expr6),
msat_make_equal(menv, x_xs[4], expr7),
msat_make_equal(menv, x_xs[4], expr8),
msat_make_equal(menv, x_xs[4], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_20_0)
expr1 = msat_make_plus(menv, xs[1], n_2_0)
expr2 = msat_make_plus(menv, xs[2], n_16_0)
expr3 = msat_make_plus(menv, xs[6], n_12_0)
expr4 = msat_make_plus(menv, xs[11], n_10_0)
expr5 = msat_make_plus(menv, xs[13], n_20_0)
expr6 = msat_make_plus(menv, xs[14], n_11_0)
expr7 = msat_make_plus(menv, xs[15], n_18_0)
expr8 = msat_make_plus(menv, xs[17], n_13_0)
expr9 = msat_make_plus(menv, xs[19], n_6_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[5], expr0),
msat_make_geq(menv, x_xs[5], expr1),
msat_make_geq(menv, x_xs[5], expr2),
msat_make_geq(menv, x_xs[5], expr3),
msat_make_geq(menv, x_xs[5], expr4),
msat_make_geq(menv, x_xs[5], expr5),
msat_make_geq(menv, x_xs[5], expr6),
msat_make_geq(menv, x_xs[5], expr7),
msat_make_geq(menv, x_xs[5], expr8),
msat_make_geq(menv, x_xs[5], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[5], expr0),
msat_make_equal(menv, x_xs[5], expr1),
msat_make_equal(menv, x_xs[5], expr2),
msat_make_equal(menv, x_xs[5], expr3),
msat_make_equal(menv, x_xs[5], expr4),
msat_make_equal(menv, x_xs[5], expr5),
msat_make_equal(menv, x_xs[5], expr6),
msat_make_equal(menv, x_xs[5], expr7),
msat_make_equal(menv, x_xs[5], expr8),
msat_make_equal(menv, x_xs[5], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_13_0)
expr1 = msat_make_plus(menv, xs[2], n_11_0)
expr2 = msat_make_plus(menv, xs[5], n_8_0)
expr3 = msat_make_plus(menv, xs[7], n_11_0)
expr4 = msat_make_plus(menv, xs[10], n_12_0)
expr5 = msat_make_plus(menv, xs[11], n_14_0)
expr6 = msat_make_plus(menv, xs[13], n_16_0)
expr7 = msat_make_plus(menv, xs[14], n_12_0)
expr8 = msat_make_plus(menv, xs[15], n_8_0)
expr9 = msat_make_plus(menv, xs[18], n_15_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[6], expr0),
msat_make_geq(menv, x_xs[6], expr1),
msat_make_geq(menv, x_xs[6], expr2),
msat_make_geq(menv, x_xs[6], expr3),
msat_make_geq(menv, x_xs[6], expr4),
msat_make_geq(menv, x_xs[6], expr5),
msat_make_geq(menv, x_xs[6], expr6),
msat_make_geq(menv, x_xs[6], expr7),
msat_make_geq(menv, x_xs[6], expr8),
msat_make_geq(menv, x_xs[6], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[6], expr0),
msat_make_equal(menv, x_xs[6], expr1),
msat_make_equal(menv, x_xs[6], expr2),
msat_make_equal(menv, x_xs[6], expr3),
msat_make_equal(menv, x_xs[6], expr4),
msat_make_equal(menv, x_xs[6], expr5),
msat_make_equal(menv, x_xs[6], expr6),
msat_make_equal(menv, x_xs[6], expr7),
msat_make_equal(menv, x_xs[6], expr8),
msat_make_equal(menv, x_xs[6], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_4_0)
expr1 = msat_make_plus(menv, xs[3], n_3_0)
expr2 = msat_make_plus(menv, xs[8], n_13_0)
expr3 = msat_make_plus(menv, xs[9], n_18_0)
expr4 = msat_make_plus(menv, xs[12], n_13_0)
expr5 = msat_make_plus(menv, xs[13], n_17_0)
expr6 = msat_make_plus(menv, xs[14], n_16_0)
expr7 = msat_make_plus(menv, xs[16], n_9_0)
expr8 = msat_make_plus(menv, xs[17], n_2_0)
expr9 = msat_make_plus(menv, xs[18], n_13_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[7], expr0),
msat_make_geq(menv, x_xs[7], expr1),
msat_make_geq(menv, x_xs[7], expr2),
msat_make_geq(menv, x_xs[7], expr3),
msat_make_geq(menv, x_xs[7], expr4),
msat_make_geq(menv, x_xs[7], expr5),
msat_make_geq(menv, x_xs[7], expr6),
msat_make_geq(menv, x_xs[7], expr7),
msat_make_geq(menv, x_xs[7], expr8),
msat_make_geq(menv, x_xs[7], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[7], expr0),
msat_make_equal(menv, x_xs[7], expr1),
msat_make_equal(menv, x_xs[7], expr2),
msat_make_equal(menv, x_xs[7], expr3),
msat_make_equal(menv, x_xs[7], expr4),
msat_make_equal(menv, x_xs[7], expr5),
msat_make_equal(menv, x_xs[7], expr6),
msat_make_equal(menv, x_xs[7], expr7),
msat_make_equal(menv, x_xs[7], expr8),
msat_make_equal(menv, x_xs[7], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_5_0)
expr1 = msat_make_plus(menv, xs[3], n_4_0)
expr2 = msat_make_plus(menv, xs[5], n_14_0)
expr3 = msat_make_plus(menv, xs[7], n_13_0)
expr4 = msat_make_plus(menv, xs[10], n_2_0)
expr5 = msat_make_plus(menv, xs[11], n_20_0)
expr6 = msat_make_plus(menv, xs[12], n_17_0)
expr7 = msat_make_plus(menv, xs[15], n_17_0)
expr8 = msat_make_plus(menv, xs[16], n_16_0)
expr9 = msat_make_plus(menv, xs[19], n_16_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[8], expr0),
msat_make_geq(menv, x_xs[8], expr1),
msat_make_geq(menv, x_xs[8], expr2),
msat_make_geq(menv, x_xs[8], expr3),
msat_make_geq(menv, x_xs[8], expr4),
msat_make_geq(menv, x_xs[8], expr5),
msat_make_geq(menv, x_xs[8], expr6),
msat_make_geq(menv, x_xs[8], expr7),
msat_make_geq(menv, x_xs[8], expr8),
msat_make_geq(menv, x_xs[8], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[8], expr0),
msat_make_equal(menv, x_xs[8], expr1),
msat_make_equal(menv, x_xs[8], expr2),
msat_make_equal(menv, x_xs[8], expr3),
msat_make_equal(menv, x_xs[8], expr4),
msat_make_equal(menv, x_xs[8], expr5),
msat_make_equal(menv, x_xs[8], expr6),
msat_make_equal(menv, x_xs[8], expr7),
msat_make_equal(menv, x_xs[8], expr8),
msat_make_equal(menv, x_xs[8], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_7_0)
expr1 = msat_make_plus(menv, xs[3], n_16_0)
expr2 = msat_make_plus(menv, xs[4], n_4_0)
expr3 = msat_make_plus(menv, xs[5], n_16_0)
expr4 = msat_make_plus(menv, xs[9], n_15_0)
expr5 = msat_make_plus(menv, xs[10], n_13_0)
expr6 = msat_make_plus(menv, xs[11], n_12_0)
expr7 = msat_make_plus(menv, xs[14], n_15_0)
expr8 = msat_make_plus(menv, xs[16], n_13_0)
expr9 = msat_make_plus(menv, xs[19], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[9], expr0),
msat_make_geq(menv, x_xs[9], expr1),
msat_make_geq(menv, x_xs[9], expr2),
msat_make_geq(menv, x_xs[9], expr3),
msat_make_geq(menv, x_xs[9], expr4),
msat_make_geq(menv, x_xs[9], expr5),
msat_make_geq(menv, x_xs[9], expr6),
msat_make_geq(menv, x_xs[9], expr7),
msat_make_geq(menv, x_xs[9], expr8),
msat_make_geq(menv, x_xs[9], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[9], expr0),
msat_make_equal(menv, x_xs[9], expr1),
msat_make_equal(menv, x_xs[9], expr2),
msat_make_equal(menv, x_xs[9], expr3),
msat_make_equal(menv, x_xs[9], expr4),
msat_make_equal(menv, x_xs[9], expr5),
msat_make_equal(menv, x_xs[9], expr6),
msat_make_equal(menv, x_xs[9], expr7),
msat_make_equal(menv, x_xs[9], expr8),
msat_make_equal(menv, x_xs[9], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_7_0)
expr1 = msat_make_plus(menv, xs[3], n_4_0)
expr2 = msat_make_plus(menv, xs[5], n_11_0)
expr3 = msat_make_plus(menv, xs[6], n_13_0)
expr4 = msat_make_plus(menv, xs[7], n_2_0)
expr5 = msat_make_plus(menv, xs[10], n_19_0)
expr6 = msat_make_plus(menv, xs[13], n_19_0)
expr7 = msat_make_plus(menv, xs[16], n_19_0)
expr8 = msat_make_plus(menv, xs[17], n_4_0)
expr9 = msat_make_plus(menv, xs[19], n_1_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[10], expr0),
msat_make_geq(menv, x_xs[10], expr1),
msat_make_geq(menv, x_xs[10], expr2),
msat_make_geq(menv, x_xs[10], expr3),
msat_make_geq(menv, x_xs[10], expr4),
msat_make_geq(menv, x_xs[10], expr5),
msat_make_geq(menv, x_xs[10], expr6),
msat_make_geq(menv, x_xs[10], expr7),
msat_make_geq(menv, x_xs[10], expr8),
msat_make_geq(menv, x_xs[10], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[10], expr0),
msat_make_equal(menv, x_xs[10], expr1),
msat_make_equal(menv, x_xs[10], expr2),
msat_make_equal(menv, x_xs[10], expr3),
msat_make_equal(menv, x_xs[10], expr4),
msat_make_equal(menv, x_xs[10], expr5),
msat_make_equal(menv, x_xs[10], expr6),
msat_make_equal(menv, x_xs[10], expr7),
msat_make_equal(menv, x_xs[10], expr8),
msat_make_equal(menv, x_xs[10], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_6_0)
expr1 = msat_make_plus(menv, xs[4], n_1_0)
expr2 = msat_make_plus(menv, xs[6], n_6_0)
expr3 = msat_make_plus(menv, xs[10], n_20_0)
expr4 = msat_make_plus(menv, xs[11], n_4_0)
expr5 = msat_make_plus(menv, xs[12], n_13_0)
expr6 = msat_make_plus(menv, xs[13], n_6_0)
expr7 = msat_make_plus(menv, xs[14], n_4_0)
expr8 = msat_make_plus(menv, xs[15], n_16_0)
expr9 = msat_make_plus(menv, xs[19], n_7_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[11], expr0),
msat_make_geq(menv, x_xs[11], expr1),
msat_make_geq(menv, x_xs[11], expr2),
msat_make_geq(menv, x_xs[11], expr3),
msat_make_geq(menv, x_xs[11], expr4),
msat_make_geq(menv, x_xs[11], expr5),
msat_make_geq(menv, x_xs[11], expr6),
msat_make_geq(menv, x_xs[11], expr7),
msat_make_geq(menv, x_xs[11], expr8),
msat_make_geq(menv, x_xs[11], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[11], expr0),
msat_make_equal(menv, x_xs[11], expr1),
msat_make_equal(menv, x_xs[11], expr2),
msat_make_equal(menv, x_xs[11], expr3),
msat_make_equal(menv, x_xs[11], expr4),
msat_make_equal(menv, x_xs[11], expr5),
msat_make_equal(menv, x_xs[11], expr6),
msat_make_equal(menv, x_xs[11], expr7),
msat_make_equal(menv, x_xs[11], expr8),
msat_make_equal(menv, x_xs[11], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_8_0)
expr1 = msat_make_plus(menv, xs[4], n_18_0)
expr2 = msat_make_plus(menv, xs[5], n_14_0)
expr3 = msat_make_plus(menv, xs[8], n_15_0)
expr4 = msat_make_plus(menv, xs[13], n_9_0)
expr5 = msat_make_plus(menv, xs[14], n_7_0)
expr6 = msat_make_plus(menv, xs[15], n_12_0)
expr7 = msat_make_plus(menv, xs[17], n_1_0)
expr8 = msat_make_plus(menv, xs[18], n_1_0)
expr9 = msat_make_plus(menv, xs[19], n_17_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[12], expr0),
msat_make_geq(menv, x_xs[12], expr1),
msat_make_geq(menv, x_xs[12], expr2),
msat_make_geq(menv, x_xs[12], expr3),
msat_make_geq(menv, x_xs[12], expr4),
msat_make_geq(menv, x_xs[12], expr5),
msat_make_geq(menv, x_xs[12], expr6),
msat_make_geq(menv, x_xs[12], expr7),
msat_make_geq(menv, x_xs[12], expr8),
msat_make_geq(menv, x_xs[12], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[12], expr0),
msat_make_equal(menv, x_xs[12], expr1),
msat_make_equal(menv, x_xs[12], expr2),
msat_make_equal(menv, x_xs[12], expr3),
msat_make_equal(menv, x_xs[12], expr4),
msat_make_equal(menv, x_xs[12], expr5),
msat_make_equal(menv, x_xs[12], expr6),
msat_make_equal(menv, x_xs[12], expr7),
msat_make_equal(menv, x_xs[12], expr8),
msat_make_equal(menv, x_xs[12], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_6_0)
expr1 = msat_make_plus(menv, xs[2], n_10_0)
expr2 = msat_make_plus(menv, xs[3], n_4_0)
expr3 = msat_make_plus(menv, xs[6], n_2_0)
expr4 = msat_make_plus(menv, xs[9], n_7_0)
expr5 = msat_make_plus(menv, xs[10], n_8_0)
expr6 = msat_make_plus(menv, xs[12], n_5_0)
expr7 = msat_make_plus(menv, xs[13], n_17_0)
expr8 = msat_make_plus(menv, xs[14], n_17_0)
expr9 = msat_make_plus(menv, xs[15], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[13], expr0),
msat_make_geq(menv, x_xs[13], expr1),
msat_make_geq(menv, x_xs[13], expr2),
msat_make_geq(menv, x_xs[13], expr3),
msat_make_geq(menv, x_xs[13], expr4),
msat_make_geq(menv, x_xs[13], expr5),
msat_make_geq(menv, x_xs[13], expr6),
msat_make_geq(menv, x_xs[13], expr7),
msat_make_geq(menv, x_xs[13], expr8),
msat_make_geq(menv, x_xs[13], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[13], expr0),
msat_make_equal(menv, x_xs[13], expr1),
msat_make_equal(menv, x_xs[13], expr2),
msat_make_equal(menv, x_xs[13], expr3),
msat_make_equal(menv, x_xs[13], expr4),
msat_make_equal(menv, x_xs[13], expr5),
msat_make_equal(menv, x_xs[13], expr6),
msat_make_equal(menv, x_xs[13], expr7),
msat_make_equal(menv, x_xs[13], expr8),
msat_make_equal(menv, x_xs[13], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_2_0)
expr1 = msat_make_plus(menv, xs[4], n_19_0)
expr2 = msat_make_plus(menv, xs[5], n_14_0)
expr3 = msat_make_plus(menv, xs[6], n_17_0)
expr4 = msat_make_plus(menv, xs[8], n_14_0)
expr5 = msat_make_plus(menv, xs[9], n_1_0)
expr6 = msat_make_plus(menv, xs[10], n_18_0)
expr7 = msat_make_plus(menv, xs[16], n_14_0)
expr8 = msat_make_plus(menv, xs[18], n_20_0)
expr9 = msat_make_plus(menv, xs[19], n_5_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[14], expr0),
msat_make_geq(menv, x_xs[14], expr1),
msat_make_geq(menv, x_xs[14], expr2),
msat_make_geq(menv, x_xs[14], expr3),
msat_make_geq(menv, x_xs[14], expr4),
msat_make_geq(menv, x_xs[14], expr5),
msat_make_geq(menv, x_xs[14], expr6),
msat_make_geq(menv, x_xs[14], expr7),
msat_make_geq(menv, x_xs[14], expr8),
msat_make_geq(menv, x_xs[14], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[14], expr0),
msat_make_equal(menv, x_xs[14], expr1),
msat_make_equal(menv, x_xs[14], expr2),
msat_make_equal(menv, x_xs[14], expr3),
msat_make_equal(menv, x_xs[14], expr4),
msat_make_equal(menv, x_xs[14], expr5),
msat_make_equal(menv, x_xs[14], expr6),
msat_make_equal(menv, x_xs[14], expr7),
msat_make_equal(menv, x_xs[14], expr8),
msat_make_equal(menv, x_xs[14], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_12_0)
expr1 = msat_make_plus(menv, xs[1], n_2_0)
expr2 = msat_make_plus(menv, xs[4], n_3_0)
expr3 = msat_make_plus(menv, xs[5], n_12_0)
expr4 = msat_make_plus(menv, xs[7], n_6_0)
expr5 = msat_make_plus(menv, xs[8], n_9_0)
expr6 = msat_make_plus(menv, xs[9], n_11_0)
expr7 = msat_make_plus(menv, xs[10], n_8_0)
expr8 = msat_make_plus(menv, xs[15], n_16_0)
expr9 = msat_make_plus(menv, xs[18], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[15], expr0),
msat_make_geq(menv, x_xs[15], expr1),
msat_make_geq(menv, x_xs[15], expr2),
msat_make_geq(menv, x_xs[15], expr3),
msat_make_geq(menv, x_xs[15], expr4),
msat_make_geq(menv, x_xs[15], expr5),
msat_make_geq(menv, x_xs[15], expr6),
msat_make_geq(menv, x_xs[15], expr7),
msat_make_geq(menv, x_xs[15], expr8),
msat_make_geq(menv, x_xs[15], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[15], expr0),
msat_make_equal(menv, x_xs[15], expr1),
msat_make_equal(menv, x_xs[15], expr2),
msat_make_equal(menv, x_xs[15], expr3),
msat_make_equal(menv, x_xs[15], expr4),
msat_make_equal(menv, x_xs[15], expr5),
msat_make_equal(menv, x_xs[15], expr6),
msat_make_equal(menv, x_xs[15], expr7),
msat_make_equal(menv, x_xs[15], expr8),
msat_make_equal(menv, x_xs[15], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_7_0)
expr1 = msat_make_plus(menv, xs[2], n_16_0)
expr2 = msat_make_plus(menv, xs[3], n_17_0)
expr3 = msat_make_plus(menv, xs[5], n_9_0)
expr4 = msat_make_plus(menv, xs[7], n_8_0)
expr5 = msat_make_plus(menv, xs[9], n_19_0)
expr6 = msat_make_plus(menv, xs[12], n_15_0)
expr7 = msat_make_plus(menv, xs[15], n_18_0)
expr8 = msat_make_plus(menv, xs[17], n_1_0)
expr9 = msat_make_plus(menv, xs[18], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[16], expr0),
msat_make_geq(menv, x_xs[16], expr1),
msat_make_geq(menv, x_xs[16], expr2),
msat_make_geq(menv, x_xs[16], expr3),
msat_make_geq(menv, x_xs[16], expr4),
msat_make_geq(menv, x_xs[16], expr5),
msat_make_geq(menv, x_xs[16], expr6),
msat_make_geq(menv, x_xs[16], expr7),
msat_make_geq(menv, x_xs[16], expr8),
msat_make_geq(menv, x_xs[16], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[16], expr0),
msat_make_equal(menv, x_xs[16], expr1),
msat_make_equal(menv, x_xs[16], expr2),
msat_make_equal(menv, x_xs[16], expr3),
msat_make_equal(menv, x_xs[16], expr4),
msat_make_equal(menv, x_xs[16], expr5),
msat_make_equal(menv, x_xs[16], expr6),
msat_make_equal(menv, x_xs[16], expr7),
msat_make_equal(menv, x_xs[16], expr8),
msat_make_equal(menv, x_xs[16], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_15_0)
expr1 = msat_make_plus(menv, xs[3], n_7_0)
expr2 = msat_make_plus(menv, xs[4], n_1_0)
expr3 = msat_make_plus(menv, xs[5], n_5_0)
expr4 = msat_make_plus(menv, xs[6], n_6_0)
expr5 = msat_make_plus(menv, xs[8], n_4_0)
expr6 = msat_make_plus(menv, xs[10], n_15_0)
expr7 = msat_make_plus(menv, xs[12], n_4_0)
expr8 = msat_make_plus(menv, xs[15], n_18_0)
expr9 = msat_make_plus(menv, xs[18], n_18_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[17], expr0),
msat_make_geq(menv, x_xs[17], expr1),
msat_make_geq(menv, x_xs[17], expr2),
msat_make_geq(menv, x_xs[17], expr3),
msat_make_geq(menv, x_xs[17], expr4),
msat_make_geq(menv, x_xs[17], expr5),
msat_make_geq(menv, x_xs[17], expr6),
msat_make_geq(menv, x_xs[17], expr7),
msat_make_geq(menv, x_xs[17], expr8),
msat_make_geq(menv, x_xs[17], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[17], expr0),
msat_make_equal(menv, x_xs[17], expr1),
msat_make_equal(menv, x_xs[17], expr2),
msat_make_equal(menv, x_xs[17], expr3),
msat_make_equal(menv, x_xs[17], expr4),
msat_make_equal(menv, x_xs[17], expr5),
msat_make_equal(menv, x_xs[17], expr6),
msat_make_equal(menv, x_xs[17], expr7),
msat_make_equal(menv, x_xs[17], expr8),
msat_make_equal(menv, x_xs[17], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_20_0)
expr1 = msat_make_plus(menv, xs[1], n_19_0)
expr2 = msat_make_plus(menv, xs[2], n_19_0)
expr3 = msat_make_plus(menv, xs[5], n_19_0)
expr4 = msat_make_plus(menv, xs[6], n_3_0)
expr5 = msat_make_plus(menv, xs[15], n_15_0)
expr6 = msat_make_plus(menv, xs[16], n_4_0)
expr7 = msat_make_plus(menv, xs[17], n_20_0)
expr8 = msat_make_plus(menv, xs[18], n_11_0)
expr9 = msat_make_plus(menv, xs[19], n_8_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[18], expr0),
msat_make_geq(menv, x_xs[18], expr1),
msat_make_geq(menv, x_xs[18], expr2),
msat_make_geq(menv, x_xs[18], expr3),
msat_make_geq(menv, x_xs[18], expr4),
msat_make_geq(menv, x_xs[18], expr5),
msat_make_geq(menv, x_xs[18], expr6),
msat_make_geq(menv, x_xs[18], expr7),
msat_make_geq(menv, x_xs[18], expr8),
msat_make_geq(menv, x_xs[18], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[18], expr0),
msat_make_equal(menv, x_xs[18], expr1),
msat_make_equal(menv, x_xs[18], expr2),
msat_make_equal(menv, x_xs[18], expr3),
msat_make_equal(menv, x_xs[18], expr4),
msat_make_equal(menv, x_xs[18], expr5),
msat_make_equal(menv, x_xs[18], expr6),
msat_make_equal(menv, x_xs[18], expr7),
msat_make_equal(menv, x_xs[18], expr8),
msat_make_equal(menv, x_xs[18], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_14_0)
expr1 = msat_make_plus(menv, xs[3], n_7_0)
expr2 = msat_make_plus(menv, xs[6], n_20_0)
expr3 = msat_make_plus(menv, xs[7], n_18_0)
expr4 = msat_make_plus(menv, xs[8], n_19_0)
expr5 = msat_make_plus(menv, xs[9], n_5_0)
expr6 = msat_make_plus(menv, xs[10], n_4_0)
expr7 = msat_make_plus(menv, xs[12], n_4_0)
expr8 = msat_make_plus(menv, xs[15], n_6_0)
expr9 = msat_make_plus(menv, xs[19], n_13_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[19], expr0),
msat_make_geq(menv, x_xs[19], expr1),
msat_make_geq(menv, x_xs[19], expr2),
msat_make_geq(menv, x_xs[19], expr3),
msat_make_geq(menv, x_xs[19], expr4),
msat_make_geq(menv, x_xs[19], expr5),
msat_make_geq(menv, x_xs[19], expr6),
msat_make_geq(menv, x_xs[19], expr7),
msat_make_geq(menv, x_xs[19], expr8),
msat_make_geq(menv, x_xs[19], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[19], expr0),
msat_make_equal(menv, x_xs[19], expr1),
msat_make_equal(menv, x_xs[19], expr2),
msat_make_equal(menv, x_xs[19], expr3),
msat_make_equal(menv, x_xs[19], expr4),
msat_make_equal(menv, x_xs[19], expr5),
msat_make_equal(menv, x_xs[19], expr6),
msat_make_equal(menv, x_xs[19], expr7),
msat_make_equal(menv, x_xs[19], expr8),
msat_make_equal(menv, x_xs[19], expr9),))
trans = msat_make_and(menv, trans, _t)
# ltl property: (X (F (G (X (x_12 - x_13 > 1)))))
ltl = enc.make_X(enc.make_F(enc.make_G(enc.make_X(msat_make_gt(menv, msat_make_minus(menv, xs[12], xs[13]), msat_make_number(menv, "1"))))))
return TermMap(curr2next), init, trans, ltl
| [
"[email protected]"
]
| |
bc6317ffce733d59f4cb9a2013922e8ab494fce7 | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /toontown/src/battle/BattleBase.py | 4534121d532aca205c36f1cedce7650e1355db65 | []
| no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 13,125 | py | from pandac.PandaModules import *
from toontown.toonbase.ToontownBattleGlobals import *
from direct.task.Timer import *
import math
from direct.directnotify import DirectNotifyGlobal
from toontown.toon import NPCToons
from toontown.toonbase import TTLocalizer
# locations of the various types of data within the toonAttacks list
# used when calculating attack damage, accuracy bonus, and damage bonus
#
TOON_ID_COL = 0
TOON_TRACK_COL = 1
TOON_LVL_COL = 2
TOON_TGT_COL = 3
TOON_HP_COL = 4
TOON_ACCBONUS_COL = 5
TOON_HPBONUS_COL = 6
TOON_KBBONUS_COL = 7
SUIT_DIED_COL = 8
SUIT_REVIVE_COL = 9
# locations of the various types of data within the suitAttacks list
# used when calculating toon attack type, target, and attack damage
#
SUIT_ID_COL = 0
SUIT_ATK_COL = 1
SUIT_TGT_COL = 2
SUIT_HP_COL = 3
TOON_DIED_COL = 4
SUIT_BEFORE_TOONS_COL = 5
SUIT_TAUNT_COL = 6
# Toon actions and attacks
#
NO_ID = -1
NO_ATTACK = -1
UN_ATTACK = -2
PASS_ATTACK = -3 # used so we can display pass indicator
NO_TRAP = -1
LURE_SUCCEEDED = -1
PASS = 98
SOS = 99
NPCSOS = 97
PETSOS = 96
FIRE = 100
# Defined in ToontownBattleGlobals.py
HEAL = HEAL_TRACK
TRAP = TRAP_TRACK
LURE = LURE_TRACK
SOUND = SOUND_TRACK
THROW = THROW_TRACK
SQUIRT = SQUIRT_TRACK
DROP = DROP_TRACK
# For reference, in ToontownBattleGlobals
# NPC_RESTOCK_GAGS = 7
# NPC_TOONS_HIT = 8
# NPC_COGS_MISS = 9
# Attack times
#
TOON_ATTACK_TIME = 12.0
SUIT_ATTACK_TIME = 12.0
TOON_TRAP_DELAY = 0.8
TOON_SOUND_DELAY = 1.0
TOON_THROW_DELAY = 0.5
TOON_THROW_SUIT_DELAY = 1.0
TOON_SQUIRT_DELAY = 0.5
TOON_SQUIRT_SUIT_DELAY = 1.0
TOON_DROP_DELAY = 0.8
TOON_DROP_SUIT_DELAY = 1.0
TOON_RUN_T = 3.3
TIMEOUT_PER_USER = 5
TOON_FIRE_DELAY = 0.5
TOON_FIRE_SUIT_DELAY = 1.0
# Reward times
#
REWARD_TIMEOUT = 120
FLOOR_REWARD_TIMEOUT = 4
BUILDING_REWARD_TIMEOUT = 300
try:
# debugBattles = base.config.GetBool('debug-battles', 0)
CLIENT_INPUT_TIMEOUT = base.config.GetFloat('battle-input-timeout', TTLocalizer.BBbattleInputTimeout)
except:
# debugBattles = simbase.config.GetBool('debug-battles', 0)
CLIENT_INPUT_TIMEOUT = simbase.config.GetFloat('battle-input-timeout', TTLocalizer.BBbattleInputTimeout)
def levelAffectsGroup(track, level):
#return (level % 2)
return attackAffectsGroup(track, level) #UBER
def attackAffectsGroup(track, level, type=None):
#if (track == HEAL and (level % 2)):
# return 1
#elif (track == LURE and (level % 2)):
# return 1
#elif (track == SOUND):
# return 1
#elif (track == NPCSOS or type == NPCSOS or track == PETSOS or type == PETSOS):
# return 1
#else:
# return 0
if (track == NPCSOS or type == NPCSOS or track == PETSOS or type == PETSOS):
return 1
elif (track >= 0) and (track <= DROP_TRACK):
return AvPropTargetCat[AvPropTarget[track]][level]
else:
return 0
def getToonAttack(id, track=NO_ATTACK, level=-1, target=-1):
""" getToonAttack(id, track, level, target)
"""
return [id, track, level, target, [], 0, 0, [], 0, 0]
def getDefaultSuitAttacks():
""" getDefaultSuitAttacks()
"""
suitAttacks = [[NO_ID, NO_ATTACK, -1, [], 0, 0, 0],
[NO_ID, NO_ATTACK, -1, [], 0, 0, 0],
[NO_ID, NO_ATTACK, -1, [], 0, 0, 0],
[NO_ID, NO_ATTACK, -1, [], 0, 0, 0]]
return suitAttacks
def getDefaultSuitAttack():
""" getDefaultSuitAttack()
"""
return [NO_ID, NO_ATTACK, -1, [], 0, 0, 0]
def findToonAttack(toons, attacks, track):
""" findToonAttack(toons, attacks, track)
Return all attacks of the specified track sorted by increasing level
"""
foundAttacks = []
for t in toons:
if (attacks.has_key(t)):
attack = attacks[t]
local_track = attack[TOON_TRACK_COL]
# If it's an NPC, convert to the appropriate track
if (track != NPCSOS and attack[TOON_TRACK_COL] == NPCSOS):
local_track = NPCToons.getNPCTrack(attack[TOON_TGT_COL])
if (local_track == track):
if local_track == FIRE:
canFire = 1
for attackCheck in foundAttacks:
if attackCheck[TOON_TGT_COL] == attack[TOON_TGT_COL]:
canFire = 0
else:
pass
if canFire:
assert(t == attack[TOON_ID_COL])
foundAttacks.append(attack)
else:
assert(t == attack[TOON_ID_COL])
foundAttacks.append(attack)
def compFunc(a, b):
if (a[TOON_LVL_COL] > b[TOON_LVL_COL]):
return 1
elif (a[TOON_LVL_COL] < b[TOON_LVL_COL]):
return -1
return 0
foundAttacks.sort(compFunc)
return foundAttacks
# A little pad time added to server time calculations, to allow for
# slow or out-of-sync clients. In general, the AI server will give
# each client the expected time to complete its movie, plus
# SERVER_BUFFER_TIME, and then will ask all the clients to move on
# with or without the slow one(s).
SERVER_BUFFER_TIME = 2.0
#CLIENT_INPUT_TIMEOUT = TTLocalizer.BBbattleInputTimeout
SERVER_INPUT_TIMEOUT = CLIENT_INPUT_TIMEOUT + SERVER_BUFFER_TIME
# The maximum time we expect a suit to take walk to its position in
# battle.
MAX_JOIN_T = TTLocalizer.BBbattleInputTimeout
# The length of time for a faceoff taunt.
FACEOFF_TAUNT_T = 3.5
# length of time we look at the interactive prop helping toons
FACEOFF_LOOK_AT_PROP_T = 6
# The amount of time it takes to open up the elevator doors and walk
# out.
ELEVATOR_T = 4.0
BATTLE_SMALL_VALUE = 0.0000001
# This is the furthest we expect to have to walk from the face-off to
# get the battle. If we are further away than this, we suspect we are
# victims of clock skew.
MAX_EXPECTED_DISTANCE_FROM_BATTLE = 50.0
class BattleBase:
notify = DirectNotifyGlobal.directNotify.newCategory('BattleBase')
# This defines the points where the suits will stand in battle.
# For each number of suits in the battle (1, 2, 3, or 4), the
# corresponding element of suitPoints is a list of n (pos, heading)
# pairs for each of the n suits to stand.
suitPoints = (
((Point3(0, 5, 0), 179),
),
((Point3(2, 5.3, 0), 170),
(Point3(-2, 5.3, 0), 180),
),
((Point3(4, 5.2, 0), 170),
(Point3(0, 6, 0), 179),
(Point3(-4, 5.2, 0), 190),
),
((Point3(6, 4.4, 0), 160),
(Point3(2, 6.3, 0), 170),
(Point3(-2, 6.3, 0), 190),
(Point3(-6, 4.4, 0), 200),
))
# And this defines the single set of points for suits who are
# "pending": they have joined the battle, but are waiting for the
# next round to begin before they take their place.
suitPendingPoints = (
(Point3(-4, 8.2, 0), 190),
(Point3(0, 9, 0), 179),
(Point3(4, 8.2, 0), 170),
(Point3(8, 3.2, 0), 160),
)
# This is similar to the above, but for toons instead of suits.
toonPoints = (
((Point3(0, -6, 0), 0),
),
((Point3(1.5, -6.5, 0), 5),
(Point3(-1.5, -6.5, 0), -5),
),
((Point3(3, -6.75, 0), 5),
(Point3(0, -7, 0), 0),
(Point3(-3, -6.75, 0), -5),
),
((Point3(4.5, -7, 0), 10),
(Point3(1.5, -7.5, 0), 5),
(Point3(-1.5, -7.5, 0), -5),
(Point3(-4.5, -7, 0), -10),
))
toonPendingPoints = (
(Point3(-3, -8, 0), -5),
(Point3(0, -9, 0), 0),
(Point3(3, -8, 0), 5),
(Point3(5.5, -5.5, 0), 20),
)
# These define the points on the perimeter of the battle circle
# for suits and toons who are "joining"; this allows the avatar to
# walk a circle around the battle to get to its pending point,
# defined above.
posA = Point3(0, 10, 0)
posB = Point3(-7.071, 7.071, 0)
posC = Point3(-10, 0, 0)
posD = Point3(-7.071, -7.071, 0)
posE = Point3(0, -10, 0)
posF = Point3(7.071, -7.071, 0)
posG = Point3(10, 0, 0)
posH = Point3(7.071, 7.071, 0)
allPoints = (posA, posB, posC, posD, posE, posF, posG, posH)
toonCwise = [posA, posB, posC, posD, posE]
toonCCwise = [posH, posG, posF, posE]
suitCwise = [posE, posF, posG, posH, posA]
suitCCwise = [posD, posC, posB, posA]
suitSpeed = 4.8
toonSpeed = 8.0
def __init__(self):
""" __init__()
"""
self.pos = Point3(0, 0, 0)
self.initialSuitPos = Point3(0, 1, 0)
self.timer = Timer()
self.resetLists()
def resetLists(self):
""" resetLists()
"""
self.suits = []
self.pendingSuits = []
self.joiningSuits = []
self.activeSuits = []
self.luredSuits = []
self.suitGone = 0
self.toons = []
self.joiningToons = []
self.pendingToons = []
self.activeToons = []
self.runningToons = []
self.toonGone = 0
# keep track of toons who helped, so we know which toons just passed all the time
self.helpfulToons = []
def calcFaceoffTime(self, centerpos, suitpos):
""" calcFaceoffTime(centerpos, suitpos)
"""
facing = Vec3(centerpos - suitpos)
facing.normalize()
suitdest = Point3(centerpos - Point3(facing * 6.0))
dist = Vec3(suitdest - suitpos).length()
return (dist / BattleBase.suitSpeed)
def calcSuitMoveTime(self, pos0, pos1):
""" calcSuitMoveTime(pos0, pos1)
"""
dist = Vec3(pos0 - pos1).length()
return (dist / BattleBase.suitSpeed)
def calcToonMoveTime(self, pos0, pos1):
""" calcToonMoveTime(pos0, pos1)
"""
dist = Vec3(pos0 - pos1).length()
return (dist / BattleBase.toonSpeed)
def buildJoinPointList(self, avPos, destPos, toon=0):
""" buildJoinPointList(avPos, destPos, toon)
This function is called when suits or toons ask to join the
battle and need to figure out how to walk to their selected
pending point (destPos). It builds a list of points the
avatar should walk through in order to get there. If the list
is empty, the avatar will walk straight there.
"""
# In the default case, avatars walk around the perimeter of
# the battle cell to get to their target point. Figure out
# the shortest path around the circle.
# First, find the closest battle join point
minDist = 999999.0
nearestP = None
for p in BattleBase.allPoints:
dist = Vec3(avPos - p).length()
if (dist < minDist):
nearestP = p
minDist = dist
assert(nearestP != None)
self.notify.debug('buildJoinPointList() - avp: %s nearp: %s' % \
(avPos, nearestP))
# See if destPos is the closest point
dist = Vec3(avPos - destPos).length()
if (dist < minDist):
self.notify.debug('buildJoinPointList() - destPos is nearest')
return []
if (toon == 1):
if (nearestP == BattleBase.posE):
self.notify.debug('buildJoinPointList() - posE')
plist = [BattleBase.posE]
elif (BattleBase.toonCwise.count(nearestP) == 1):
self.notify.debug('buildJoinPointList() - clockwise')
index = BattleBase.toonCwise.index(nearestP)
plist = BattleBase.toonCwise[index+1:]
else:
self.notify.debug('buildJoinPointList() - counter-clockwise')
assert(BattleBase.toonCCwise.count(nearestP) == 1)
index = BattleBase.toonCCwise.index(nearestP)
plist = BattleBase.toonCCwise[index+1:]
else:
if (nearestP == BattleBase.posA):
self.notify.debug('buildJoinPointList() - posA')
plist = [BattleBase.posA]
elif (BattleBase.suitCwise.count(nearestP) == 1):
self.notify.debug('buildJoinPointList() - clockwise')
index = BattleBase.suitCwise.index(nearestP)
plist = BattleBase.suitCwise[index+1:]
else:
self.notify.debug('buildJoinPointList() - counter-clockwise')
assert(BattleBase.suitCCwise.count(nearestP) == 1)
index = BattleBase.suitCCwise.index(nearestP)
plist = BattleBase.suitCCwise[index+1:]
self.notify.debug('buildJoinPointList() - plist: %s' % plist)
return plist
def addHelpfulToon(self, toonId):
"""Add toonId to our helpful toons, make sure it's in the list at most once."""
if toonId not in self.helpfulToons:
self.helpfulToons.append(toonId)
| [
"[email protected]"
]
| |
2d84981defdffa31bab43937d8c437d0f446791f | cb4d5f3b8e4bd0a35acd3b61152f78ed7098ddb6 | /baby/urls.py | 127336d28e6864648102c2c9db0effde63b74ca7 | []
| no_license | apatten001/Shower | 8b3c5a94d11c2299774c6839a956e59e30e6b8cc | b4fe99ffbe992f6b01c5b86d9331771a0554cb11 | refs/heads/master | 2021-04-07T02:35:51.035339 | 2018-03-15T21:44:36 | 2018-03-15T21:44:36 | 125,427,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | """baby URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from shower.views import HomeView
urlpatterns = [
path('admin/', admin.site.urls),
path('', HomeView.as_view(), name='home'),
path('', include('shower.urls', namespace='shower')),
]
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.